Exception error in EReader thread running ibapi - python-3.x

I am running Python ib-api to receive the realtime market data from Interactive Brokers. It can provide the data I expected but it ends with "unhandled exception in EReader thread".
from ibapi.client import EClient
from ibapi.wrapper import EWrapper
from ibapi.contract import Contract as IBcontract
from threading import Thread
import queue
import pandas as pd
from ibapi.ticktype import TickTypeEnum`
`DEFAULT_PRICE_DATA_ID = 1001`
`FINISHED = object()
STARTED = object()
TIME_OUT = object()`
class finishableQueue(object):
def __init__(self, queue_to_finish):
self._queue = queue_to_finish
self.status = STARTED
def get(self, timeout):
contents_of_queue=[]
finished=False
while not finished:
try:
current_element = self._queue.get(timeout=timeout)
if current_element is FINISHED:
finished = True
self.status = FINISHED
else:
contents_of_queue.append(current_element)
except queue.Empty:
finished = True
self.status = TIME_OUT
return contents_of_queue
def timed_out(self):
return self.status is TIME_OUT
class TestWrapper(EWrapper):
def __init__(self):
self._my_price_data_dict = {}
def get_error(self, timeout=5):
if self.is_error():
try:
return self._my_errors.get(timeout=timeout)
except queue.Empty:
return None
return None
def is_error(self):
an_error_if=not self._my_errors.empty()
return an_error_if
def init_error(self):
error_queue=queue.Queue()
self._my_errors = error_queue
def error(self, id, errorCode, errorString):
## Overriden method
errormsg = "IB error id %d errorcode %d string %s" % (id, errorCode, errorString)
self._my_errors.put(errormsg)
def init_ibprices(self, tickerid):
ibprice_data_queue = self._my_price_data_dict[tickerid] = queue.Queue()
return ibprice_data_queue
def tickPrice(self, reqId, tickType, price, attrib):
tickdata = (TickTypeEnum.to_str(tickType), price)
price_data_dict = self._my_price_data_dict
if reqId not in price_data_dict.keys():
self.init_ibprices(reqId)
price_data_dict[reqId].put(tickdata)
class TestClient(EClient):
def __init__(self, wrapper):
EClient.__init__(self, wrapper)
def error(self, reqId, errorCode, errorString):
print("Error: ", reqId, " ", errorCode, " ", errorString)
def getIBrealtimedata(self, ibcontract, tickerid=DEFAULT_PRICE_DATA_ID):
ib_data_queue = finishableQueue(self.init_ibprices(tickerid))
self.reqMktData(
tickerid,
ibcontract,
"",
False,
False,
[]
)
MAX_WAIT_SECONDS = 5
print("Getting data from the server... could take %d seconds to complete " % MAX_WAIT_SECONDS)
price_data = ib_data_queue.get(timeout = MAX_WAIT_SECONDS)
while self.wrapper.is_error():
print(self.get_error())
if ib_data_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
self.cancelMktData(tickerid)
return price_data
class TestApp(TestWrapper, TestClient):
def __init__(self, ipaddress, portid, clientid):
TestWrapper.__init__(self)
TestClient.__init__(self, wrapper=self)
self.connect(ipaddress, portid, clientid)
thread = Thread(target = self.run)
thread.start()
setattr(self, "_thread", thread)
self.init_error()
def main(slist):
app = TestApp("127.0.0.1", 7497, 1)
for i in slist:
ibcontract = IBcontract()
ibcontract.secType = "STK"
ibcontract.symbol = i
ibcontract.exchange ="SEHK"
Lastprice = app.getIBrealtimedata(ibcontract)
df = pd.DataFrame(Lastprice)
print(ibcontract.symbol, df.head())
app.disconnect()
if __name__ == "__main__":
seclist = [700,2318,5,12]
main(seclist)
Here are the error messages:
unhandled exception in EReader thread
Traceback (most recent call last):
File "D:\Anaconda3\envs\myweb\lib\site-packages\ibapi\reader.py", line 34, >in run
data = self.conn.recvMsg()
File "D:\Anaconda3\envs\myweb\lib\site-packages\ibapi\connection.py", line >99, in recvMsg
buf = self._recvAllMsg()
File "D:\Anaconda3\envs\myweb\lib\site-packages\ibapi\connection.py", line >119, in _recvAllMsg
buf = self.socket.recv(4096)
OSError: [WinError 10038] An operation was attempted on something that is >not a socket

A separate thread is started to read incoming messages from the socket:
thread = Thread(target = self.run)
thread.start()
But this thread is never stopped, and is still running when you call disconnect(). As a result, it tries to access the socket object which is now None, triggering the error. Try stopping the EReader thread prior to disconnecting by setting done=True.
As a side note, since this error happens at the very end of the program at the disconnection it shouldn't interfere with receiving the expected data.

A workaround to avoid the warning.
Implement this at your EClient / EWrapper subclass:
Create a socket shutdown function:
import socket, time
[...]
def _socketShutdown(self):
self.conn.lock.acquire()
try:
if self.conn.socket is not None:
self.conn.socket.shutdown(socket.SHUT_WR)
finally:
self.conn.lock.release()
Use it before closing the connection:
self._socketShutdown()
time.sleep(1)
self.disconnect()

Related

Shutdown during recv on python socket

During the execution of this code, it blocks on the join
I have a TCP server running on ("127.0.0.1", 1777) for the test
I tried using directly the socket with recv, but the result is the same
Any idea, why the shutdown on READ doesn't interrupt the read ?
import socket
from threading import Thread
from time import sleep
class Parser(Thread):
rbufsize = 4096
wbufsize = 4096
encoding="utf-8"
new_line = "\n"
def __init__(self):
super().__init__()
self._socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
self._wfile = None
self._rfile = None
def run(self):
self._socket.connect(("127.0.0.1", 1777))
self._rfile = self._socket.makefile('rb', self.rbufsize, encoding=self.encoding, newline=self.new_line)
self._wfile = self._socket.makefile('wb', self.wbufsize, encoding=self.encoding, newline=self.new_line)
while True:
data = self._rfile.readline()
if not data:
break
self._handle_data(data)
self._cleanup()
def _cleanup(self):
"""
Fermeture
"""
if not self._wfile.closed:
try:
self._wfile.flush()
except socket.error:
# A final socket error may have occurred here, such as
# the local error ECONNABORTED.
pass
self._socket.shutdown(socket.SHUT_RDWR)
self._wfile.close()
self._rfile.close()
self._socket.close()
def stop(self):
self._socket.shutdown(socket.SHUT_RD)
if __name__ == "__main__":
p = Parser()
p.start()
sleep(5)
p.stop()
print("start join")
p.join()

Tello programming

Im trying to connect to my Tello drone with Spyder by socket but the dron dosen't send an answer back. It prints that the Tello drone refuses to enter command mode.
import socket
import threading
import time
import traceback
class Tello:
self.abort_flag = False
self.command_timeout = command_timeout
self.imperial = imperial
self.response = None
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.tello_address = (tello_ip, tello_port)
self.socket.bind((local_ip, local_port))
self.receive_thread = threading.Thread(target=self._receive_thread)
self.receive_thread.daemon=True
self.receive_thread.start()
if self.send_command('command') != 'OK':
raise RuntimeError('Tello rejected attempt to enter command mode')
def __del__(self):
self.socket.close()
def _receive_thread(self):
while True:
try:
self.response, ip = self.socket.recvfrom(256)
except Exception:
break
def send_command(self, command):
self.abort_flag = False
timer = threading.Timer(self.command_timeout, self.set_abort_flag)
self.socket.sendto(command.encode('utf-8'), self.tello_address)
timer.start()
while self.response is None:
if self.abort_flag is True:
raise RuntimeError('No response to command')
timer.cancel()
response = self.response.decode('utf-8')
self.response = None
return response
def set_abort_flag(self):
"""Sets self.abort_flag to True.
Used by the timer in Tello.send_command() to indicate to that a response
timeout has occurred.
"""
self.abort_flag = True

How to handle exception with imap_unordered in python multiprocessing

I am using pool.imap_unordered to apply a function over different txt files saved locally.
Is it possible to capture the exception and pass?
If my code runs into an exception, it blocks the entire loop.
pool = Pool(processes=15)
results = {}
files = glob.glob('{}/10K_files/*.txt'.format(path_input))
for key, output in tqdm(pool.imap_unordered(process_file, files),total=len(files)):
results[key] = output
I've tried something like this:
pool = Pool(processes=15)
results = {}
files = glob.glob('{}/10K_files/*.txt'.format(path_input))
try:
for key, output in tqdm(pool.imap_unordered(process_file, files), total=len(files)):
results[key] = output
except:
print("error")
but then I want to resume the loop from where I started.
Thanks!
You could catch the exception in process_file and return it. Then test for whether the return value is an exception. Here is an example:
import os
import traceback
import multiprocessing as mp
def main():
work_items = [i for i in range(20)]
pool = mp.Pool()
for result in pool.imap_unordered(process_file_exc, work_items):
if isinstance(result, Exception):
print("Got exception: {}".format(result))
else:
print("Got OK result: {}".format(result))
def process_file_exc(work_item):
try:
return process_file(work_item)
except Exception as ex:
return Exception("Err on item {}".format(work_item)
+ os.linesep + traceback.format_exc())
def process_file(work_item):
if work_item == 9:
# this will raise ZeroDivisionError exception
return work_item / 0
return "{} * 2 == {}".format(work_item, work_item * 2)
if __name__ == '__main__':
main()

RabbitMQ Pika connection reset , (-1, ConnectionResetError(104, 'Connection reset by peer'))

searched through stackoverflow and posting this question because no solution worked for me and my question might be different from other question.
I am writing a script which gets an article from rabbitMQ queue and process the article to count words and extract key words from it and dump it in db. my script is working fine but after some time of execution i get this exception
(-1, "ConnectionResetError(104, 'Connection reset by peer')")
I have no idea why am I getting this. I have tried a lot of solutions available on stackover flow none is working for me. I havr written my script and tried it in two different ways. both work fine but after some time same exception occurs.
here is my first code:
def app_main():
global channel, results, speedvars
Logger.log_message('Starting app main')
# Edit 4
def pika_connect():
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=Config.AMQ_DAEMONS['base']['amq-host']))
channel = connection.channel()
print ("In pika connect")
Logger.log_message('Setting up input queue consumer')
channel.queue_declare(Config.AMQ_DAEMONS['consumer']['input'], durable=True)
channel.basic_consume(on_message, queue=Config.AMQ_DAEMONS['consumer']['input'], no_ack=True)
Logger.log_message('Starting loop')
channel.start_consuming()
#########
speedvars = SpeedVars()
speedtracker = SpeedTracker(speedvars)
speedtracker.start()
sender = ResultsSender(results, speedvars)
sender.start()
# Edit 5 starting 10 threads to listen to pika
for th in range(qthreads):
Logger.log_message('Starting thread: '+str(th))
try:
t = Thread(target=pika_connect, args=())
t.start()
except Exception as e:
Logger.error_message("Exception in starting threads " + str(e))
try:
app_main()
except Exception as e:
Logger.error_message("Exception in APP MAIN " + str(e))
here is my second code:
def app_main():
global channel, results, speedvars
Logger.log_message('Starting app main')
speedvars = SpeedVars()
speedtracker = SpeedTracker(speedvars)
speedtracker.start()
sender = ResultsSender(results, speedvars)
sender.start()
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=Config.AMQ_DAEMONS['base']['amq-host']))
channel = connection.channel()
print ("In app main")
Logger.log_message('Setting up input queue consumer')
channel.queue_declare(Config.AMQ_DAEMONS['consumer']['input'], durable=True)
channel.basic_consume(on_message, queue=Config.AMQ_DAEMONS['consumer']['input'], no_ack=True)
Logger.log_message('Starting loop')
try:
channel.start_consuming()
except Exception as e:
Logger.error_message("Exception in start_consuming in main " + str(e))
raise e
try:
app_main()
except Exception as e:
Logger.error_message("Exception in APP MAIN " + str(e))
in my first code i used threading because i want to speed up the process of processing articles.
this is my call back fuction
def on_message(ch, method, properties, message):
Logger.log_message("Starting parsing new msg ")
handle_message(message)
EDIT: Full Code
import os
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
from Modules import Logger
import pika
import Config
import json
import pickle
import Pipeline
import sys
import time
import datetime
import threading
import queue
import functools
from pid.decorator import pidfile
Logger.log_init(Config.AMQ_DAEMONS['consumer']['log-ident'])
#qthreads = Config.AMQ_DAEMONS['consumer']['threads']
results = queue.Queue()
channel = None
speedvars = None
SPD_RECEIVED = 'received'
SPD_DISCARDED = 'discarded'
SPD_SENT = 'sent'
class SpeedVars(object):
vars = {}
lock = None
def __init__(self):
self.lock = threading.Lock()
def inc(self, var):
self.lock.acquire()
try:
if var in self.vars:
self.vars[var] += 1
else:
self.vars[var] = 1
finally:
self.lock.release()
def dec(self, var):
self.lock.acquire()
try:
if var in self.vars:
self.vars[var] -= 1
else:
Logger.error_message('Cannot decrement ' + var + ', not tracked')
finally:
self.lock.release()
def get(self, var):
out = None
self.lock.acquire()
try:
if var in self.vars:
out = self.vars[var]
else:
Logger.error_message('Cannot get ' + var + ', not tracked')
finally:
self.lock.release()
return out
def get_all(self):
out = None
self.lock.acquire()
try:
out = self.vars.copy()
finally:
self.lock.release()
return out
class SpeedTracker(threading.Thread):
speedvars = None
start_ts = None
last_vars = {}
def __init__(self, speedvars):
super(SpeedTracker, self).__init__()
self.start_ts = time.time()
self.speedvars = speedvars
Logger.log_message('Setting up speed tracker')
def run(self):
while True:
time.sleep(Config.AMQ_DAEMONS['consumer']['speed-tracking-interval'])
prev = self.last_vars
cur = self.speedvars.get_all()
now = time.time()
if len(prev) > 0:
q = {}
for key in cur:
qty = cur[key] - prev[key]
avg = qty / Config.AMQ_DAEMONS['consumer']['speed-tracking-interval']
overall_avg = cur[key] / (now - self.start_ts)
Logger.log_message('Speed-tracking (' + key + '): total ' + str(cur[key])
+ ', delta ' + str(qty) + ', speed ' + '%0.2f' % avg + '/sec, '
+ ', overall speed ' + '%0.2f' % overall_avg + '/sec')
pending = cur[SPD_RECEIVED] - cur[SPD_DISCARDED] - cur[SPD_SENT]
pending_avg = pending / (now - self.start_ts)
Logger.log_message('Speed-tracking (pending): total ' + str(pending)
+ ', overall speed ' + '%0.2f' % pending_avg + '/sec')
self.last_vars = cur
class ResultsSender(threading.Thread):
channel = None
results = None
speedvars = None
def __init__(self, results, speedvars):
super(ResultsSender, self).__init__()
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=Config.AMQ_DAEMONS['base']['amq-host']))
self.channel = connection.channel()
Logger.log_message('Setting up output exchange')
self.channel.exchange_declare(exchange=Config.AMQ_DAEMONS['consumer']['output'], exchange_type='direct')
self.results = results
self.speedvars = speedvars
def run(self):
while True:
item = self.results.get()
self.channel.basic_publish(
exchange=Config.AMQ_DAEMONS['consumer']['output'],
routing_key='',
body=item)
self.speedvars.inc(SPD_SENT)
def parse_message(message):
try:
bodytxt = message.decode('UTF-8')
body = json.loads(bodytxt)
return body
except Exception as e:
Logger.error_message("Cannot parse message - " + str(e))
raise e
def get_body_elements(body):
try:
artid = str(body.get('article_id'))
article_dt = datetime.datetime.fromtimestamp(body.get('pubTime'))
date = article_dt.strftime(Config.DATE_FORMAT)
article = "\n".join([body.get('title', ''), body.get('subheading', ''), body.get('content', '')])
return (artid, date, article)
except Exception as e:
Logger.error_message("Cannot retrieve article attributes " + str(e))
raise e
def process_article(id, date, text):
global results, speedvars
try:
Logger.log_message('Processing article ' + id)
keywords = Pipeline.extract_keywords(text)
send_data = {"id": id, "date": date, "keywords": keywords}
results.put(pickle.dumps(send_data))
# print('Queue Size:',results.qsize())
except Exception as e:
Logger.error_message("Problem processing article " + str(e))
raise e
def ack_message(ch, delivery_tag):
"""Note that `channel` must be the same pika channel instance via which
the message being ACKed was retrieved (AMQP protocol constraint).
"""
if channel.is_open:
channel.basic_ack(delivery_tag)
else:
Logger.error_message("Channel is already closed, so we can't ACK this message" + str(e))
# Channel is already closed, so we can't ACK this message;
# log and/or do something that makes sense for your app in this case.
#pass
def handle_message(connection, ch, delivery_tag, message):
global speedvars
start = time.time()
thread_id = threading.get_ident()
try:
speedvars.inc(SPD_RECEIVED)
body = parse_message(message)
(id, date, text) = get_body_elements(body)
words = len(text.split())
if words <= Config.AMQ_DAEMONS['consumer']['word-count-limit']:
process_article(id, date, text)
else:
Logger.log_message('Ignoring article, over word count limit')
speedvars.inc(SPD_DISCARDED)
except Exception as e:
Logger.error_message("Could not process message - " + str(e))
cb = functools.partial(ack_message, ch, delivery_tag)
connection.add_callback_threadsafe(cb)
Logger.log_message("Thread id: "+str(thread_id)+" Delivery tag: "+str(delivery_tag))
Logger.log_message("TOtal time taken to handle message : "+ str(time.time()-start))
# CALL BACK
## def on_message(ch, method, properties, message):
## global executor
## executor.submit(handle_message, message)
def on_message(ch, method, header_frame, message, args):
(connection, threads) = args
delivery_tag = method.delivery_tag
t = threading.Thread(target=handle_message, args=(connection, ch, delivery_tag, message))
t.start()
threads.append(t)
####################################################
#pidfile(piddir=Config.AMQ_DAEMONS['base']['pid-dir'], pidname=Config.AMQ_DAEMONS['consumer']['pid-file'])
def app_main():
global channel, results, speedvars
speedvars = SpeedVars()
speedtracker = SpeedTracker(speedvars)
speedtracker.start()
sender = ResultsSender(results, speedvars)
sender.start()
# Pika Connection
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=Config.AMQ_DAEMONS['base']['amq-host']))
channel = connection.channel()
Logger.log_message('Setting up input queue consumer')
channel.queue_declare(Config.AMQ_DAEMONS['consumer']['input'], durable=True)
#channel.basic_consume(on_message, queue=Config.AMQ_DAEMONS['consumer']['input'], no_ack=True)
channel.basic_qos(prefetch_count=1)
threads = []
on_message_callback = functools.partial(on_message, args=(connection, threads))
channel.basic_consume(on_message_callback, Config.AMQ_DAEMONS['consumer']['input'])
Logger.log_message('Starting loop')
## channel.start_consuming()
try:
channel.start_consuming()
except KeyboardInterrupt:
channel.stop_consuming()
Wait for all to complete
for thread in threads:
thread.join()
connection.close()
app_main()
pika is not taking a lot of time to process message still i am facing connection reset issue.
**TOtal time taken to handle message : 0.0005991458892822266
**
Your handle_message method is blocking heartbeats because all of your code, including the Pika I/O loop, is running on the same thread. Check out this example of how to run your work (handle_message) on a separate thread from Pikas I/O loop and then acknowledge messages correctly.
NOTE: the RabbitMQ team monitors the rabbitmq-users mailing list and only sometimes answers questions on StackOverflow.
I was getting the same issue . Increasing the duration of heart-beat & connection timeouts configuration didn't work out for me. I finally figured out that, if you have
already created a channel and you are not publishing anything on it for
several minutes(20 mins in my case) ,in that case we get this error.
The Solutions which worked for me:
Create channel immediately just before publishing any message. OR
Use try-except and if you get an exception , create another channel and republish. ie.
try:
channel.basic_publish(exchange='', routing_key='abcd', body=data)
except Exception as e1:
connection=pika.BlockingConnection(pika.ConnectionParameters(host='1.128.0.3',credentials=credentials))
channel = connection.channel()
channel.basic_publish(exchange='', routing_key='abcd', body=data)
This will atleast keep the things running and prevent from losing any data. I'm not an expert in this, but hope this helps someone!
I also faced the same issue and resolved by increasing the duration for heart-beat & connection timeouts configuration.
Many thanks to #LukeBakken who has actually identified the root cause.
Here is how you can configure the timeouts:
import pika
def main():
# NOTE: These parameters work with all Pika connection types
params = pika.ConnectionParameters(heartbeat=600, blocked_connection_timeout=300)
conn = pika.BlockingConnection(params)
chan = conn.channel()
chan.basic_publish('', 'my-alphabet-queue', "abc")
# If publish causes the connection to become blocked, then this conn.close()
# would hang until the connection is unblocked, if ever. However, the
# blocked_connection_timeout connection parameter would interrupt the wait,
# resulting in ConnectionClosed exception from BlockingConnection (or the
# on_connection_closed callback call in an asynchronous adapter)
conn.close()
if __name__ == '__main__':
main()
Reference: https://pika.readthedocs.io/en/stable/examples/heartbeat_and_blocked_timeouts.html

Pass asynchronous websocket.send() to stdout/stderr wrapper class

I have a class function which unbuffers stdout and stderr, like so:
class Unbuffered:
def __init__(self, stream):
self.stream = stream
def write(self, data):
data = data.strip()
if data.startswith("INFO: "):
data = data[6:]
if '[' in data:
progress = re.compile(r"\[(\d+)/(\d+)\]")
data = progress.match(data)
total = data.group(2)
current = data.group(1)
data = '{0}/{1}'.format(current, total)
if data.startswith("ERROR: "):
data = data[7:]
self.stream.write(data + '\n')
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
The output is from a function run in ProcessPoolExecutor when inbound from websocket arrives.
I want the output printed in console as well as sent to my websocket client. I tried asyncing Unbuffered and passing websocket instance to it but no luck.
UPDATE: The essentials of run() and my websocket handler() look something like this:
def run(url, path):
logging.addLevelName(25, "INFO")
fmt = logging.Formatter('%(levelname)s: %(message)s')
#----
output.progress_stream = Unbuffered(sys.stderr)
stream = Unbuffered(sys.stdout)
#----
level = logging.INFO
hdlr = logging.StreamHandler(stream)
hdlr.setFormatter(fmt)
log.addHandler(hdlr)
log.setLevel(level)
get_media(url, opt)
async def handler(websocket, path):
while True:
inbound = json.loads(await websocket.recv())
if inbound is None:
break
url = inbound['url']
if 'path' in inbound:
path = inbound['path'].rstrip(os.path.sep) + os.path.sep
else:
path = os.path.expanduser("~") + os.path.sep
# blah more code
while inbound != None:
await asyncio.sleep(.001)
await loop.run_in_executor(None, run, url, path)
run(), handler() and Unbuffered are separate from each other.
Rewriting get_media() to use asyncio instead of running it in a different thread would be the best. Otherwise, there are some options to communicate between a regular thread and coroutines, for example, using a socketpair:
import asyncio
import socket
import threading
import time
import random
# threads stuff
def producer(n, writer):
for i in range(10):
# print("sending", i)
writer.send("message #{}.{}\n".format(n, i).encode())
time.sleep(random.uniform(0.1, 1))
def go(writer):
threads = [threading.Thread(target=producer, args=(i + 1, writer,))
for i in range(5)]
for t in threads:
t.start()
for t in threads:
t.join()
writer.send("bye\n".encode())
# asyncio coroutines
async def clock():
for i in range(11):
print("The time is", i)
await asyncio.sleep(1)
async def main(reader):
buffer = ""
while True:
buffer += (await loop.sock_recv(reader, 10000)).decode()
# print(len(buffer))
while "\n" in buffer:
msg, _nl, buffer = buffer.partition("\n")
print("Got", msg)
if msg == "bye":
return
reader, writer = socket.socketpair()
reader.setblocking(False)
threading.Thread(target=go, args=(writer,)).start()
# time.sleep(1.5) # socket is buffering
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait([clock(), main(reader)]))
loop.close()
You can also try this 3rd-party thread+asyncio compatible queue: janus

Resources