MQRC_ENVIRONMENT_ERROR on commit - python-3.x

I'm experiencing an issue while trying to use pymqi to send messages to IBM MQ.
A project i'm working on
Reads from DB2
Prepares data for WorkerProcess
WorkerProcess does some decision based on that data and sens a message to MQ
When the WorkerProcess tries to commit the message for MQ, I get an error:
Stacktrace:
worker_process.py:
self.mq.commit()
mq_service.py:
self.qmgr.commit()
pymqi/__init__.py line 1689, in commit
raise MQMIError (rv[0], rv[1])
pymqi.MQMIError: MQI Error. Comp: 2, Reason 2012: FAILED: MQRC_ENVIRONMENT_ERROR
Code and stack trace has been typed by hand and can contain typos.
Bellow code is a pseudo code of what I'm doing.
Any help and/or advice is greatly appreciated.
main.py:
c_processors = []
for i in range(num_of_proccessors):
p = WorkerProcess()
p.start()
c_processors.append(p)
for p in c_processors:
p.join()
worker_process.py
class WorkerProcess(Process):
def __init__(self):
Process.__init__(self)
self.mq = MQService()
def run(self):
self.mq.send_message('test')
self.mq.commit()
self.mq.close_connection()
mq_service.py
class MQService():
def __init__(self):
self.connect()
self.pmd = pymqi.MD()
self.pmd.Format = pymqi.CMQC.MQFMT_STRING
self.pmo = pymqi.PMO(Options=pymqi.CMQC.MQPMO_FAIL_IF_QUIESCING)
self.pmo.Options |= pymqi.CMQC.MQGMO_SYNCPOINT
def connect(self):
self._connect_to_qmgr(manager,chanel,host,port,user,password) #these arguments are retrieved from config
self._q = pymqi.Queue(self.qmgr, queue_name)
def _connect_to_qmgr(self,manager,chanel,host,port,user,password):
self.qmgr = pymqi.QueueManager(None)
_cd = pymqi.CD()
_cd.ChannelName = channel.encode('utf-8')
_cd.ConnectionName = f"{host} ({port})".encode('utf-8')
_cd.ChannelType = pymqi.CMQC.MQCHT_CLNTCONN
_cd.TransportType = pymqi.CMQC.MQXPT_TCP
_connect_options = pymqi.CMQC.MQCNO_HANDLE_SHARE_BLOCK
_qmgr.connect_with_options(manager, cd=_cd, opts=_connect_options, user=user, password=password)
def send_message(self, message):
self._q.put(message, self.pmd, self.pmo)
def commit(self):
self.qmgr.commit()
def rollback(self):
self.qmgr.backout()
def close_connection(self):
self.qmgr.disconnect()
EDIT:
Additional information:
I'm running IBM MQ client version 9.1.0.1.
There are no errors in AMQERR0*.LOG files.
LD_LIBRARY_PATH is set
This error showed while refactoring the code.
Below is the code that is working (before refactoring):
Some arguments in function signature are replaced with (args) for the sake of brevity and readability*
main.py:
def connect_to_mq():
return MQService(args*) # these arguments are read from Config file
def process_chunk(args*):
_mq = connect_to_mq()
_mq.send_message('test')
_mq.commit()
_mq.close_connection()
c_processors = []
for i in range(num_of_proccessors):
p = Process(target=process_chunk, args=(args*))
p.start()
c_processors.append(p)
for p in c_processors:
p.join()
mq_service.py
class MQService():
def __init__(self, args*):
self.pmd = pymqi.MD()
self.pmd.Format = pymqi.CMQC.MQFMT_STRING
self.pmo = pymqi.PMO(Options=pymqi.CMQC.MQPMO_FAIL_IF_QUIESCING)
self.pmo.Options |= pymqi.CMQC.MQGMO_SYNCPOINT
self.connect_to_qmgr(args*)
self.connect_to_queue(args*)
def _connect_to_qmgr(self,manager,chanel,host,port,user,password):
self.qmgr = pymqi.connect(manager,
chanel,
"%s (%s)" % (host, port),
user=user,
password=password)
def connect_to_queue(q_name):
self._q = pymqi.Queue(self.qmgr, q_name)
def send_message(self, message):
self._q.put(message, self.pmd, self.pmo)
def commit(self):
self.qmgr.commit()
def rollback(self):
self.qmgr.backout()
def close_connection(self):
self.qmgr.disconnect()

Related

Shutdown during recv on python socket

During the execution of this code, it blocks on the join
I have a TCP server running on ("127.0.0.1", 1777) for the test
I tried using directly the socket with recv, but the result is the same
Any idea, why the shutdown on READ doesn't interrupt the read ?
import socket
from threading import Thread
from time import sleep
class Parser(Thread):
rbufsize = 4096
wbufsize = 4096
encoding="utf-8"
new_line = "\n"
def __init__(self):
super().__init__()
self._socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
self._wfile = None
self._rfile = None
def run(self):
self._socket.connect(("127.0.0.1", 1777))
self._rfile = self._socket.makefile('rb', self.rbufsize, encoding=self.encoding, newline=self.new_line)
self._wfile = self._socket.makefile('wb', self.wbufsize, encoding=self.encoding, newline=self.new_line)
while True:
data = self._rfile.readline()
if not data:
break
self._handle_data(data)
self._cleanup()
def _cleanup(self):
"""
Fermeture
"""
if not self._wfile.closed:
try:
self._wfile.flush()
except socket.error:
# A final socket error may have occurred here, such as
# the local error ECONNABORTED.
pass
self._socket.shutdown(socket.SHUT_RDWR)
self._wfile.close()
self._rfile.close()
self._socket.close()
def stop(self):
self._socket.shutdown(socket.SHUT_RD)
if __name__ == "__main__":
p = Parser()
p.start()
sleep(5)
p.stop()
print("start join")
p.join()

Running a function on thread gives an error

I am trying to run a function "generate_model" on thread which takes three arguments.
def thread_for_generate_model(Thread):
def __init__(self, name, job_id, boolean_string, Batch_size):
self.name = name
self.job_id = job_id
self.boolean_string = boolean_string
self.Batch_size = Batch_size
def run(self):
LOGGER.info("vector model create started for job_id: %s on thread %s", self.job_id, self.name)
generate_model(self.job_id, self.boolean_string, self.Batch_size)
LOGGER.info("vector model created for job_id: %s", self.job_id)
def main():
....
thread_for_generate_model("Thread_for_vectormodel", job_id, generate_search_string(job_id,keywords), 5000).start()
# I am trying to run this function on a thread
# generate_model(job_id, generate_search_string(job_id,keywords), 5000)
....
I got an error ,
TypeError: thread_for_generate_model() takes 1 positional argument but 4 were given
by solution in the link, I have modified as below by adding an additional parameter
def run(self, event= None)
but still has the same error. how to rectify it?
Code below should do what you are trying to do - I have just added a few dummy functions etc. to get the code not throw syntax error or undefined functions/variables etc.. This is roughly the structure you can follow.
As pointed out in the comments - use def something to define a method. and class Something to define a class.
from threading import Thread
import logging
import time
LOGGER = logging.getLogger()
logging.basicConfig()
class thread_for_generate_model(Thread):
def __init__(self, name, job_id, boolean_string, Batch_size):
Thread.__init__(self)
self.name = name
self.job_id = job_id
self.boolean_string = boolean_string
self.Batch_size = Batch_size
def run(self):
LOGGER.info("vector model create started for job_id: %s on thread %s", self.job_id, self.name)
generate_model(self.job_id, self.boolean_string, self.Batch_size)
LOGGER.info("vector model created for job_id: %s", self.job_id)
def generate_search_string(job_id, keywords):
return False
def generate_model(job_id, string, batch_size):
while True:
time.sleep(1)
def main():
job_id = 0
keywords = ['a', 'b']
thread_for_generate_model("Thread_for_vectormodel", job_id, generate_search_string(job_id,keywords), 5000).start()
# I am trying to run this function on a thread
# generate_model(job_id, generate_search_string(job_id,keywords), 5000)
if __name__ == '__main__':
main()

RabbitMQ Pika connection reset , (-1, ConnectionResetError(104, 'Connection reset by peer'))

searched through stackoverflow and posting this question because no solution worked for me and my question might be different from other question.
I am writing a script which gets an article from rabbitMQ queue and process the article to count words and extract key words from it and dump it in db. my script is working fine but after some time of execution i get this exception
(-1, "ConnectionResetError(104, 'Connection reset by peer')")
I have no idea why am I getting this. I have tried a lot of solutions available on stackover flow none is working for me. I havr written my script and tried it in two different ways. both work fine but after some time same exception occurs.
here is my first code:
def app_main():
global channel, results, speedvars
Logger.log_message('Starting app main')
# Edit 4
def pika_connect():
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=Config.AMQ_DAEMONS['base']['amq-host']))
channel = connection.channel()
print ("In pika connect")
Logger.log_message('Setting up input queue consumer')
channel.queue_declare(Config.AMQ_DAEMONS['consumer']['input'], durable=True)
channel.basic_consume(on_message, queue=Config.AMQ_DAEMONS['consumer']['input'], no_ack=True)
Logger.log_message('Starting loop')
channel.start_consuming()
#########
speedvars = SpeedVars()
speedtracker = SpeedTracker(speedvars)
speedtracker.start()
sender = ResultsSender(results, speedvars)
sender.start()
# Edit 5 starting 10 threads to listen to pika
for th in range(qthreads):
Logger.log_message('Starting thread: '+str(th))
try:
t = Thread(target=pika_connect, args=())
t.start()
except Exception as e:
Logger.error_message("Exception in starting threads " + str(e))
try:
app_main()
except Exception as e:
Logger.error_message("Exception in APP MAIN " + str(e))
here is my second code:
def app_main():
global channel, results, speedvars
Logger.log_message('Starting app main')
speedvars = SpeedVars()
speedtracker = SpeedTracker(speedvars)
speedtracker.start()
sender = ResultsSender(results, speedvars)
sender.start()
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=Config.AMQ_DAEMONS['base']['amq-host']))
channel = connection.channel()
print ("In app main")
Logger.log_message('Setting up input queue consumer')
channel.queue_declare(Config.AMQ_DAEMONS['consumer']['input'], durable=True)
channel.basic_consume(on_message, queue=Config.AMQ_DAEMONS['consumer']['input'], no_ack=True)
Logger.log_message('Starting loop')
try:
channel.start_consuming()
except Exception as e:
Logger.error_message("Exception in start_consuming in main " + str(e))
raise e
try:
app_main()
except Exception as e:
Logger.error_message("Exception in APP MAIN " + str(e))
in my first code i used threading because i want to speed up the process of processing articles.
this is my call back fuction
def on_message(ch, method, properties, message):
Logger.log_message("Starting parsing new msg ")
handle_message(message)
EDIT: Full Code
import os
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
from Modules import Logger
import pika
import Config
import json
import pickle
import Pipeline
import sys
import time
import datetime
import threading
import queue
import functools
from pid.decorator import pidfile
Logger.log_init(Config.AMQ_DAEMONS['consumer']['log-ident'])
#qthreads = Config.AMQ_DAEMONS['consumer']['threads']
results = queue.Queue()
channel = None
speedvars = None
SPD_RECEIVED = 'received'
SPD_DISCARDED = 'discarded'
SPD_SENT = 'sent'
class SpeedVars(object):
vars = {}
lock = None
def __init__(self):
self.lock = threading.Lock()
def inc(self, var):
self.lock.acquire()
try:
if var in self.vars:
self.vars[var] += 1
else:
self.vars[var] = 1
finally:
self.lock.release()
def dec(self, var):
self.lock.acquire()
try:
if var in self.vars:
self.vars[var] -= 1
else:
Logger.error_message('Cannot decrement ' + var + ', not tracked')
finally:
self.lock.release()
def get(self, var):
out = None
self.lock.acquire()
try:
if var in self.vars:
out = self.vars[var]
else:
Logger.error_message('Cannot get ' + var + ', not tracked')
finally:
self.lock.release()
return out
def get_all(self):
out = None
self.lock.acquire()
try:
out = self.vars.copy()
finally:
self.lock.release()
return out
class SpeedTracker(threading.Thread):
speedvars = None
start_ts = None
last_vars = {}
def __init__(self, speedvars):
super(SpeedTracker, self).__init__()
self.start_ts = time.time()
self.speedvars = speedvars
Logger.log_message('Setting up speed tracker')
def run(self):
while True:
time.sleep(Config.AMQ_DAEMONS['consumer']['speed-tracking-interval'])
prev = self.last_vars
cur = self.speedvars.get_all()
now = time.time()
if len(prev) > 0:
q = {}
for key in cur:
qty = cur[key] - prev[key]
avg = qty / Config.AMQ_DAEMONS['consumer']['speed-tracking-interval']
overall_avg = cur[key] / (now - self.start_ts)
Logger.log_message('Speed-tracking (' + key + '): total ' + str(cur[key])
+ ', delta ' + str(qty) + ', speed ' + '%0.2f' % avg + '/sec, '
+ ', overall speed ' + '%0.2f' % overall_avg + '/sec')
pending = cur[SPD_RECEIVED] - cur[SPD_DISCARDED] - cur[SPD_SENT]
pending_avg = pending / (now - self.start_ts)
Logger.log_message('Speed-tracking (pending): total ' + str(pending)
+ ', overall speed ' + '%0.2f' % pending_avg + '/sec')
self.last_vars = cur
class ResultsSender(threading.Thread):
channel = None
results = None
speedvars = None
def __init__(self, results, speedvars):
super(ResultsSender, self).__init__()
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=Config.AMQ_DAEMONS['base']['amq-host']))
self.channel = connection.channel()
Logger.log_message('Setting up output exchange')
self.channel.exchange_declare(exchange=Config.AMQ_DAEMONS['consumer']['output'], exchange_type='direct')
self.results = results
self.speedvars = speedvars
def run(self):
while True:
item = self.results.get()
self.channel.basic_publish(
exchange=Config.AMQ_DAEMONS['consumer']['output'],
routing_key='',
body=item)
self.speedvars.inc(SPD_SENT)
def parse_message(message):
try:
bodytxt = message.decode('UTF-8')
body = json.loads(bodytxt)
return body
except Exception as e:
Logger.error_message("Cannot parse message - " + str(e))
raise e
def get_body_elements(body):
try:
artid = str(body.get('article_id'))
article_dt = datetime.datetime.fromtimestamp(body.get('pubTime'))
date = article_dt.strftime(Config.DATE_FORMAT)
article = "\n".join([body.get('title', ''), body.get('subheading', ''), body.get('content', '')])
return (artid, date, article)
except Exception as e:
Logger.error_message("Cannot retrieve article attributes " + str(e))
raise e
def process_article(id, date, text):
global results, speedvars
try:
Logger.log_message('Processing article ' + id)
keywords = Pipeline.extract_keywords(text)
send_data = {"id": id, "date": date, "keywords": keywords}
results.put(pickle.dumps(send_data))
# print('Queue Size:',results.qsize())
except Exception as e:
Logger.error_message("Problem processing article " + str(e))
raise e
def ack_message(ch, delivery_tag):
"""Note that `channel` must be the same pika channel instance via which
the message being ACKed was retrieved (AMQP protocol constraint).
"""
if channel.is_open:
channel.basic_ack(delivery_tag)
else:
Logger.error_message("Channel is already closed, so we can't ACK this message" + str(e))
# Channel is already closed, so we can't ACK this message;
# log and/or do something that makes sense for your app in this case.
#pass
def handle_message(connection, ch, delivery_tag, message):
global speedvars
start = time.time()
thread_id = threading.get_ident()
try:
speedvars.inc(SPD_RECEIVED)
body = parse_message(message)
(id, date, text) = get_body_elements(body)
words = len(text.split())
if words <= Config.AMQ_DAEMONS['consumer']['word-count-limit']:
process_article(id, date, text)
else:
Logger.log_message('Ignoring article, over word count limit')
speedvars.inc(SPD_DISCARDED)
except Exception as e:
Logger.error_message("Could not process message - " + str(e))
cb = functools.partial(ack_message, ch, delivery_tag)
connection.add_callback_threadsafe(cb)
Logger.log_message("Thread id: "+str(thread_id)+" Delivery tag: "+str(delivery_tag))
Logger.log_message("TOtal time taken to handle message : "+ str(time.time()-start))
# CALL BACK
## def on_message(ch, method, properties, message):
## global executor
## executor.submit(handle_message, message)
def on_message(ch, method, header_frame, message, args):
(connection, threads) = args
delivery_tag = method.delivery_tag
t = threading.Thread(target=handle_message, args=(connection, ch, delivery_tag, message))
t.start()
threads.append(t)
####################################################
#pidfile(piddir=Config.AMQ_DAEMONS['base']['pid-dir'], pidname=Config.AMQ_DAEMONS['consumer']['pid-file'])
def app_main():
global channel, results, speedvars
speedvars = SpeedVars()
speedtracker = SpeedTracker(speedvars)
speedtracker.start()
sender = ResultsSender(results, speedvars)
sender.start()
# Pika Connection
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=Config.AMQ_DAEMONS['base']['amq-host']))
channel = connection.channel()
Logger.log_message('Setting up input queue consumer')
channel.queue_declare(Config.AMQ_DAEMONS['consumer']['input'], durable=True)
#channel.basic_consume(on_message, queue=Config.AMQ_DAEMONS['consumer']['input'], no_ack=True)
channel.basic_qos(prefetch_count=1)
threads = []
on_message_callback = functools.partial(on_message, args=(connection, threads))
channel.basic_consume(on_message_callback, Config.AMQ_DAEMONS['consumer']['input'])
Logger.log_message('Starting loop')
## channel.start_consuming()
try:
channel.start_consuming()
except KeyboardInterrupt:
channel.stop_consuming()
Wait for all to complete
for thread in threads:
thread.join()
connection.close()
app_main()
pika is not taking a lot of time to process message still i am facing connection reset issue.
**TOtal time taken to handle message : 0.0005991458892822266
**
Your handle_message method is blocking heartbeats because all of your code, including the Pika I/O loop, is running on the same thread. Check out this example of how to run your work (handle_message) on a separate thread from Pikas I/O loop and then acknowledge messages correctly.
NOTE: the RabbitMQ team monitors the rabbitmq-users mailing list and only sometimes answers questions on StackOverflow.
I was getting the same issue . Increasing the duration of heart-beat & connection timeouts configuration didn't work out for me. I finally figured out that, if you have
already created a channel and you are not publishing anything on it for
several minutes(20 mins in my case) ,in that case we get this error.
The Solutions which worked for me:
Create channel immediately just before publishing any message. OR
Use try-except and if you get an exception , create another channel and republish. ie.
try:
channel.basic_publish(exchange='', routing_key='abcd', body=data)
except Exception as e1:
connection=pika.BlockingConnection(pika.ConnectionParameters(host='1.128.0.3',credentials=credentials))
channel = connection.channel()
channel.basic_publish(exchange='', routing_key='abcd', body=data)
This will atleast keep the things running and prevent from losing any data. I'm not an expert in this, but hope this helps someone!
I also faced the same issue and resolved by increasing the duration for heart-beat & connection timeouts configuration.
Many thanks to #LukeBakken who has actually identified the root cause.
Here is how you can configure the timeouts:
import pika
def main():
# NOTE: These parameters work with all Pika connection types
params = pika.ConnectionParameters(heartbeat=600, blocked_connection_timeout=300)
conn = pika.BlockingConnection(params)
chan = conn.channel()
chan.basic_publish('', 'my-alphabet-queue', "abc")
# If publish causes the connection to become blocked, then this conn.close()
# would hang until the connection is unblocked, if ever. However, the
# blocked_connection_timeout connection parameter would interrupt the wait,
# resulting in ConnectionClosed exception from BlockingConnection (or the
# on_connection_closed callback call in an asynchronous adapter)
conn.close()
if __name__ == '__main__':
main()
Reference: https://pika.readthedocs.io/en/stable/examples/heartbeat_and_blocked_timeouts.html

Flask SocketIO Eventlet Second Simultaneous Read Error

I am trying to use Flask-Socketio to connect to 3 separate RabbitMQ queues:
- 1 that listens for a config update message
- and 2 that are defined in a database
On the server start, I am connecting to the database, getting the configs there, and starting the consumers. Then, in a web frontend, if one of the configuration settings are changed, these changes are being written to the database, and a config update message is being added to the first RabbitMQ queue. Ideally what this would trigger a shutdown of the Pika consumer that I currently have running, a joining of that thread, and a relaunch of another thread with the new configuration information.
Everything that I just laid out is working, but on the first attempt to shut down the consumer, I'm always getting the error:
There was an error stopping the consumer: Second simultaneous read on fileno x detected.
Unless you really know what you're doing, make sure that only one greenthread can read any particular socket.
Consider using a pools.Pool. If you do know what you're doing and want to disable this error, call eventlet.debug.hub_prevent_multiple_readers(False)...
The consumers are eventually closed, and restarted, however, I would like to understand why this is occurring, and how I could change my code to stop it. The place where the error always happens is in the hand off between these 2 function, the first is in my Consumer class and the second is in my Queue class:
def run(self):
while True:
self.go = True
self.message_queue = Queue(self.configs, self.go, self.mongo_config)
self.message_queue.start()
# here I wait for an event which I set when the config is updated
self.event.wait()
self.go = False
setattr(self.message_queue, 'go', False)
new_config = self.refresh_configs()
setattr(self, 'configs', new_config)
# when this is called, it should close the existing connection and join the thread
self.message_queue.refresh_connection()
self.message_queue.join()
def refresh_connection(self):
while True:
if not self.go:
break
self.rmq_connection = rabbitmq_consumer(...)
self.rmq_connection.start_consuming()
self._lock.acquire()
try:
# right here is where the second read error occurs
self.rmq_connection.stop_consuming()
self.rmq_connection.close()
except Exception as e:
print('There was an error stopping the consumer: {0}'.format(e))
self._lock.release()
Below is a much more complete example of the code, in case it helps shed some light on the issue.
thread = None
thread_lock = Lock()
event = Event()
class Queue(Thread):
def __init__(self, configs, go, outbound):
Thread.__init__(self)
self._lock = eventlet.semaphore.Semaphore(1)
self.go = go
self.configs = configs
self.outbound = outbound
...
self.rmq_connection = None
def on_rmq_message(self, ...):
...
self._lock.acquire()
socketio.emit('eventEmit', {'data': results}, namespace='/')
result = rabbitmq_producer(...)
self._lock.release()
def refresh_connection(self):
while True:
if not self.go:
break
self.rmq_connection = rabbitmq_consumer(...)
self.rmq_connection.start_consuming()
self._lock.acquire()
try:
self.rmq_connection.stop_consuming()
self.rmq_connection.close()
except Exception as e:
print('There was an error stopping the consumer: {0}'.format(e))
self._lock.release()
def run(self):
self.refresh_connection()
class Consumer(Thread):
def __init__(self, configs, event, channel, mongo_config):
Thread.__init__(self)
self.configs = configs
self.mongo_config = mongo_config
self.event = event
self.message_queue = None
self.go = None
...
def refresh_configs(self):
r = None
mconnection = connect_mongodb(...)
results = retrieve(...)
for result in results.data:
if result.get('channel') == self.channel:
r = result
return r
def run(self):
while True:
self.go = True
self.message_queue = Queue(self.configs, self.go, self.mongo_config)
self.message_queue.start()
self.event.wait()
self.go = False
setattr(self.message_queue, 'go', False)
new_config = self.refresh_configs()
setattr(self, 'configs', new_config)
self.message_queue.refresh_connection()
self.message_queue.join()
class Producer(Thread):
def __init__(self, configs, event):
Thread.__init__(self)
self._lock = eventlet.semaphore.Semaphore(1)
self.configs = configs
self.event = event
...
self.channel = self.configs.get('channel', None)
self.config_consumer = None
def on_config_message(self, ...):
...
self._lock.acquire()
socketio.emit('configEmit', {'data': results}, namespace='/')
self._lock.release()
self.event.set()
self.event.clear()
def run(self):
self.config_consumer = rabbitmq_consumer(...)
self.config_consumer.start_consuming()
def init_connections():
with app.test_request_context('/'):
mconnection = connect_mongodb(...)
results = retrieve(mconnection.engine, mongo_collection, mongo_db, cursor=False)
...
t1 = Producer(configs, event)
for result in results.data:
config = {
...
}
t2 = Consumer(result, event, result['channel'], config)
t2.start()
t1.start()
t1.join()
t2.join()
#socketio.on('connect')
def handle_connection():
global thread
socketio.emit('connectEmit', {'data': 'Connected!'}, namespace='/')
with thread_lock:
if thread is None:
thread = socketio.start_background_task(target=init_connections)
Thank you for any help you can offer.

Pass asynchronous websocket.send() to stdout/stderr wrapper class

I have a class function which unbuffers stdout and stderr, like so:
class Unbuffered:
def __init__(self, stream):
self.stream = stream
def write(self, data):
data = data.strip()
if data.startswith("INFO: "):
data = data[6:]
if '[' in data:
progress = re.compile(r"\[(\d+)/(\d+)\]")
data = progress.match(data)
total = data.group(2)
current = data.group(1)
data = '{0}/{1}'.format(current, total)
if data.startswith("ERROR: "):
data = data[7:]
self.stream.write(data + '\n')
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
The output is from a function run in ProcessPoolExecutor when inbound from websocket arrives.
I want the output printed in console as well as sent to my websocket client. I tried asyncing Unbuffered and passing websocket instance to it but no luck.
UPDATE: The essentials of run() and my websocket handler() look something like this:
def run(url, path):
logging.addLevelName(25, "INFO")
fmt = logging.Formatter('%(levelname)s: %(message)s')
#----
output.progress_stream = Unbuffered(sys.stderr)
stream = Unbuffered(sys.stdout)
#----
level = logging.INFO
hdlr = logging.StreamHandler(stream)
hdlr.setFormatter(fmt)
log.addHandler(hdlr)
log.setLevel(level)
get_media(url, opt)
async def handler(websocket, path):
while True:
inbound = json.loads(await websocket.recv())
if inbound is None:
break
url = inbound['url']
if 'path' in inbound:
path = inbound['path'].rstrip(os.path.sep) + os.path.sep
else:
path = os.path.expanduser("~") + os.path.sep
# blah more code
while inbound != None:
await asyncio.sleep(.001)
await loop.run_in_executor(None, run, url, path)
run(), handler() and Unbuffered are separate from each other.
Rewriting get_media() to use asyncio instead of running it in a different thread would be the best. Otherwise, there are some options to communicate between a regular thread and coroutines, for example, using a socketpair:
import asyncio
import socket
import threading
import time
import random
# threads stuff
def producer(n, writer):
for i in range(10):
# print("sending", i)
writer.send("message #{}.{}\n".format(n, i).encode())
time.sleep(random.uniform(0.1, 1))
def go(writer):
threads = [threading.Thread(target=producer, args=(i + 1, writer,))
for i in range(5)]
for t in threads:
t.start()
for t in threads:
t.join()
writer.send("bye\n".encode())
# asyncio coroutines
async def clock():
for i in range(11):
print("The time is", i)
await asyncio.sleep(1)
async def main(reader):
buffer = ""
while True:
buffer += (await loop.sock_recv(reader, 10000)).decode()
# print(len(buffer))
while "\n" in buffer:
msg, _nl, buffer = buffer.partition("\n")
print("Got", msg)
if msg == "bye":
return
reader, writer = socket.socketpair()
reader.setblocking(False)
threading.Thread(target=go, args=(writer,)).start()
# time.sleep(1.5) # socket is buffering
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait([clock(), main(reader)]))
loop.close()
You can also try this 3rd-party thread+asyncio compatible queue: janus

Resources