Multicall taking 300 seconds to return result - python-3.x

I am using python multicall library (https://github.com/banteg/multicall.py) to get the ERC20 balances with multiple wallet addresses at once with multiprocessing.
Once the process starts multicall returns the result in less than 1 sec, but once this process continues & run for hours it starts giving results in more than 1 min sometime it takes up to 300 secs too.
Can anyone answer the reason behind the latency in response by time.
Below is the code sample:
block_number = 11374651
GET_BALANCE_FN = "balanceOf(address)(uint256)"
def call_back_obj(success, value):
"""
Callback to process results from multicall for a function.
If call fails returns False (if changed to string throws error)
"""
if success is True and type(value) == bytes:
return value.decode("utf-8")
elif success is True:
return value
else:
return False
def get_instance():
web3_instance = Web3(
Web3.HTTPProvider(
node_provider,
request_kwargs={"timeout": 10},
)
)
web3_instance.middleware_onion.inject(geth_poa_middleware, layer=0)
return web3_instance
w3 = get_instance()
def token_balance_handler(addresses, block_number=None):
calls = []
for address_map in addresses:
contract_addr = address_map.get("tokenAddress")
wallet_addr = address_map.get("walletAddress")
calls.append(
Call(
contract_addr,
[GET_BALANCE_FN, (wallet_addr)],
[[f"{wallet_addr}-{contract_addr}", call_back_obj]],
)
)
return Multicall(
calls, _w3=w3, block_id=block_number, require_success=False
)
print(token_balance_handler(addresses, block_number)())

Related

Best way to run "self monitoring" - asyncio/thread?

Python 3.10.6
ManagerTask() - responsible of executing Task() methods .
Note , Task's method are actually are Celery tasks (async ).
I'd like to add the option to track those tasks execution inside ManagerTask class.
I manage to get it work but as it my first asyncio I'm not sure i'm doing it right ,
(I'm aware of Flower).
Second, at current running single Main() which orchestrating executing Task() ,
In real world need to expand it to support executing multiple sessions of Main() in parallel.
class ManagerTask:
def __init__(self ,id:int ) -> None:
self.id = id
self.tasks: List["Task"] = []
self.executed_tasks: List["Task"] = []
self.state = State.SCHEDULED
def load_tasks(self , configs: List[Dict]):
# code for loading tasks
async def run(self):
""" Execute all tasks and start monitor results"""
execute_task = asyncio.create_task(self.task_exec())
monitor_progresss = asyncio.create_task(self.monitor_progress())
await execute_task
await monitor_progresss
async def monitor_progress(self):
""" If one task Failed - Main.state failed if all success mark Main.state -success"""
import time
failure_flag = False
count_success = 0
i = 0
while True:
state = self.executed_tasks[i].task_run.state
if state == 'SUCCESS':
count_success += 1
i += 1
if state == 'FAILURE':
failure_flag = True
i += 1
print(f'Rule_ID:{self.executed_tasks[i].rule_id} \
\nCelery_UUID:{self.executed_tasks[i].task_celery_id} \
\nstatus - {self.executed_tasks[i].task_run.state}')
# all tasks proccessed (either failed/success)
if i == len(self.executed_tasks) -1:
if failure_flag:
self.state = State.FAILED
elif count_success == len(self.executed_tasks) :
self.state = State.FINISHED
break;
# otherwise wait
await asyncio.sleep(3)
async def task_exec(self):
for task in self.starting_tasks:
task.run()
client (execute the app):
...code
cust = MainTask(id=1)
cust.load_tasks(configs=rules ,db_connections=db_connections)
asyncio.run(cust.run())
print("MainTask State:" + cust.state)
Example of output:
Rule_ID:4
Celery_UUID:fecde27c-b58a-43cd-9498-3478404c248b
status - FAILURE
....
Rule_ID:6
Celery_UUID:85df9bba-3d75-4b00-a533-a81cd3f6afb3
status - SUCCESS
MainTask State:Failed
1.Is that is proper way executing asyncio ?
2.for running multiple MainTask , how I should do it? Thread/Asyncio?
As this program executing all task using celery I think i should also run in asyncio but not sure .
second , would be thankful for guidance if this is the right approach
async def exec_id(id):
cust = MainTask(id=id)
cust.load_tasks(configs=rules ,...)
await cust.run()
async def main():
ids = [111,222,333]
for id in ids:
await exec_id(id)
asyncio.run(main())

Using ctrader-fix to download historical data from cTrader

I am using the python package ctrader-fix (https://pypi.org/project/ctrader-fix/) to download historical price data from ctrader's API (https://help.ctrader.com/fix/).
The code does not make clear to me at least where exactly I declare the symbol (e.g. 'NatGas') through its SymbolID code number (in the case of 'NatGas' the SymbolID code number is 10055) for which I request historical data but also it does not make clear where I specify the timeframe I am interested on (e.g. 'H' for hourly data) and the number of records I want to retrieve.
section of ctrader where the FIX SymbolID number of 'NatGas' is provided
The code that is provided is the following (I have filled the values except the username).
config = {
'Host': '',
'Port': 5201,
'SSL': False,
'Username': '****************',
'Password': '3672075',
'BeginString': 'FIX.4.4',
'SenderCompID': 'demo.pepperstoneuk.3672025',
'SenderSubID': 'QUOTE',
'TargetCompID': 'cServer',
'TargetSubID': 'QUOTE',
'HeartBeat': '30'
}
client = Client(config["Host"], config["Port"], ssl = config["SSL"])
def send(request):
diferred = client.send(request)
diferred.addCallback(lambda _: print("\nSent: ", request.getMessage(client.getMessageSequenceNumber()).replace("", "|")))
def onMessageReceived(client, responseMessage): # Callback for receiving all messages
print("\nReceived: ", responseMessage.getMessage().replace("", "|"))
# We get the message type field value
messageType = responseMessage.getFieldValue(35)
# we send a security list request after we received logon message response
if messageType == "A":
securityListRequest = SecurityListRequest(config)
securityListRequest.SecurityReqID = "A"
securityListRequest.SecurityListRequestType = 0
send(securityListRequest)
# After receiving the security list we send a market order request by using the security list first symbol
elif messageType == "y":
# We use getFieldValue to get all symbol IDs, it will return a list in this case
# because the symbol ID field is repetitive
symboldIds = responseMessage.getFieldValue(55)
if config["TargetSubID"] == "TRADE":
newOrderSingle = NewOrderSingle(config)
newOrderSingle.ClOrdID = "B"
newOrderSingle.Symbol = symboldIds[1]
newOrderSingle.Side = 1
newOrderSingle.OrderQty = 1000
newOrderSingle.OrdType = 1
newOrderSingle.Designation = "From Jupyter"
send(newOrderSingle)
else:
marketDataRequest = MarketDataRequest(config)
marketDataRequest.MDReqID = "a"
marketDataRequest.SubscriptionRequestType = 1
marketDataRequest.MarketDepth = 1
marketDataRequest.NoMDEntryTypes = 1
marketDataRequest.MDEntryType = 0
marketDataRequest.NoRelatedSym = 1
marketDataRequest.Symbol = symboldIds[1]
send(marketDataRequest)
# after receiving the new order request response we stop the reactor
# And we will be disconnected from API
elif messageType == "8" or messageType == "j":
print("We are done, stopping the reactor")
reactor.stop()
def disconnected(client, reason): # Callback for client disconnection
print("\nDisconnected, reason: ", reason)
def connected(client): # Callback for client connection
print("Connected")
logonRequest = LogonRequest(config)
send(logonRequest)
# Setting client callbacks
client.setConnectedCallback(connected)
client.setDisconnectedCallback(disconnected)
client.setMessageReceivedCallback(onMessageReceived)
# Starting the client service
client.startService()
# Run Twisted reactor, we imported it earlier
reactor.run()
Can you explain the code to me and provide instructions on how to get for example hourly data for NatGas (1,000 observations)?`

try to modify before display put in variable, but error print(record['recommendation']) become temp=record['recommendation']

from neo4j import GraphDatabase, basic_auth
driver = GraphDatabase.driver(
"neo4j://34.201.9.108:7687",
auth=basic_auth("neo4j", "chart-certifications-bottom"))
cypher_query = '''
MATCH (m:Movie {title:$movie})<-[:RATED]-(u:User)-[:RATED]->(rec:Movie)
RETURN distinct rec.title AS recommendation LIMIT 20
'''
with driver.session(database="neo4j") as session:
results = session.read_transaction(
lambda tx: tx.run(cypher_query,
movie="Crimson Tide").data())
for record in results:
print(record['recommendation']) #<----------------------- OK
driver.close()
try to modify before display put in variable, but error
print(record['recommendation']) become temp=record['recommendation']
#app.get("/neo4j")
def graph_db():
driver = GraphDatabase.driver(
"neo4j://34.201.9.108:7687",
auth=basic_auth("neo4j", "chart-certifications-bottom"))
cypher_query = '''
MATCH (n:Person) RETURN n LIMIT 25
'''
with driver.session(database="neo4j") as session:
results = session.read_transaction(
lambda tx: tx.run(cypher_query,
movie="Crimson Tide").data())
data=[]
for record in results:
temp=record['recommendation'] #<-----------------------error
data.append(temp)
result=data
driver.close()
return{"Result ":result}
When you replaced your query to
MATCH (n:Person) RETURN n LIMIT 25
You are returning nodes of 25 Person and the column recommendation does not exists.
temp=record['recommendation']
Make sure that the result set that your query is returning is the same with the column name that you are accessing.

for_log generator returns no messages

I am trying to get this for_log coroutine to work, what im trying to do is delete all messages from a specific month, in this case August, I went around the Internet and documentation, and also with the help from another question I posted here and came up with this:
#Client.command(pass_context = True)
async def clear(ctx, number: int, month, year):
def around_month(month, year):
begin = datetime.strptime(f'1 {month} {year}', '%d %b %Y')
if begin.month == 12:
end = datetime(begin.year+1, 1, 1)
else:
end = datetime(begin.year, begin.month, 1)
return begin, end
if ctx.message.author.id not in AdminIDs:
await Client.send_message(ctx.message.channel, 'You do not have permission to use this command')
return
counter = 0
begin, end = around_month(month, year)
tmsg = await Client.send_message(ctx.message.channel, 'Progress: 0/' + str(number))
async for x in Client.logs_from(ctx.message.channel, limit = number, after=begin, before=end):
counter += 1
print(counter)
await Client.edit_message(tmsg, 'Progress:' + counter + '/' + str(number))
await Client.delete_messages(x)
await asyncio.sleep(1.5)
await Client.send_message(ctx.message.channel, 'Operation completed! ' + 'Cleared: ' + str(counter) + ' items')
and then use !fclear 100 AUG 2018
This looks fine at first glance, but for some reason, it does not delete any messages, I inserted a counter to see if the for loop actually goes through the counter, and it gets nothing, it's still zero, interestingly enough, I tried to print the counter while it was inside the for loop, and it didn't print it to the console, the only reason I could think for that to happen is if it doesn't go to the for loop at all which may be because it doesn't find any messages? I'm not sure. There are no errors in the console or anywhere else
It gets nothing
This is a correct deduction. Working back from this, you'll find that around_month returns a tuple of the same date.
begin, end = around_month(month, year)
begin == end # True
Taken from the Discord docs 1,
The before, after, and around keys are mutually exclusive, only one may be passed at a time.
Given your need to delete messages from a specific month, you will need to fetch messages in that month until you exhaust the list of messages created in that month.
Also you need to invoke Client.logs_from with the snowflake time. There is a handy utility function for converting datetime object to a time snowflake. 2
from discord.utils import time_snowflake
from datetime import time delta
# ...
def before_time(messages, before):
return [
message for message in messages
if message.id < before
]
after_dt = datetime.strptime(f'1 {month} {year}', '%d %b %Y')
before_dt = (after_dt + timedelta(days=31)).replace(day=1)
after = time_snowflake(after_dt)
before = time_snowflake(before_dt)
messages = await Client.logs_from(
ctx.message.channel, limit=number, after=after
)
marked_for_deletion = before_time(messages, before)
while marked_for_deletion:
await Client.delete_messages(marked_for_deletion)
messages = await Client.logs_from(
ctx.message.channel, limit=number, after=after
)
marked_for_deletion = before_time(messages, before)
await asyncio.sleep(1.5)

why would python3 recursive function return null

I have this function that when hitting a rate limit will call itself again. It should eventually succeed and return the working data. It works normally then rate limiting works as expected, and finally when the data goes back to normal I get:
TypeError: 'NoneType' object is not subscriptable
def grabPks(pageNum):
# cloudflare blocks bots...use scraper library to get around this or build your own logic to store and use a manually generated cloudflare session cookie... I don't care 😎
req = scraper.get("sumurl.com/"+str(pageNum)).content
if(req == b'Rate Limit Exceeded'):
print("adjust the rate limiting because they're blocking us :(")
manPenalty = napLength * 3
print("manually sleeping for {} seconds".format(manPenalty))
time.sleep(manPenalty)
print("okay let's try again... NOW SERVING {}".format(pageNum))
grabPks(pageNum)
else:
tree = html.fromstring(req)
pk = tree.xpath("/path/small/text()")
resCmpress = tree.xpath("path/a//text()")
resXtend = tree.xpath("[path/td[2]/small/a//text()")
balance = tree.xpath("path/font//text()")
return pk, resCmpress, resXtend, balance
I've tried to move the return to outside of the else scope but then it throws:
UnboundLocalError: local variable 'pk' referenced before assignment
Your top level grabPks doesnt return anything if it is rate limited.
Think about this:
Call grabPks()
You're rate limited so you go into the if statement and call grabPks() again.
This time it succeeds so grabPks() returns the value to the function above it.
The first function now falls out of the if statement, gets to the end of the function and returns nothing.
Try return grabPks(pageNum) instead inside your if block.
well okay... I needed to return grabPKs to make it play nice...:
def grabPks(pageNum):
# cloudflare blocks bots...use scraper library to get around this or build your own logic to store and use a manually generated cloudflare session cookie... I don't care 😎
req = scraper.get("sumurl.com/"+str(pageNum)).content
if(req == b'Rate Limit Exceeded'):
print("adjust the rate limiting because they're blocking us :(")
manPenalty = napLength * 3
print("manually sleeping for {} seconds".format(manPenalty))
time.sleep(manPenalty)
print("okay let's try again... NOW SERVING {}".format(pageNum))
return grabPks(pageNum)
else:
tree = html.fromstring(req)
pk = tree.xpath("/path/small/text()")
resCmpress = tree.xpath("path/a//text()")
resXtend = tree.xpath("[path/td[2]/small/a//text()")
balance = tree.xpath("path/font//text()")
return pk, resCmpress, resXtend, balance

Resources