Flask SocketIO Eventlet Second Simultaneous Read Error - multithreading

I am trying to use Flask-Socketio to connect to 3 separate RabbitMQ queues:
- 1 that listens for a config update message
- and 2 that are defined in a database
On the server start, I am connecting to the database, getting the configs there, and starting the consumers. Then, in a web frontend, if one of the configuration settings are changed, these changes are being written to the database, and a config update message is being added to the first RabbitMQ queue. Ideally what this would trigger a shutdown of the Pika consumer that I currently have running, a joining of that thread, and a relaunch of another thread with the new configuration information.
Everything that I just laid out is working, but on the first attempt to shut down the consumer, I'm always getting the error:
There was an error stopping the consumer: Second simultaneous read on fileno x detected.
Unless you really know what you're doing, make sure that only one greenthread can read any particular socket.
Consider using a pools.Pool. If you do know what you're doing and want to disable this error, call eventlet.debug.hub_prevent_multiple_readers(False)...
The consumers are eventually closed, and restarted, however, I would like to understand why this is occurring, and how I could change my code to stop it. The place where the error always happens is in the hand off between these 2 function, the first is in my Consumer class and the second is in my Queue class:
def run(self):
while True:
self.go = True
self.message_queue = Queue(self.configs, self.go, self.mongo_config)
self.message_queue.start()
# here I wait for an event which I set when the config is updated
self.event.wait()
self.go = False
setattr(self.message_queue, 'go', False)
new_config = self.refresh_configs()
setattr(self, 'configs', new_config)
# when this is called, it should close the existing connection and join the thread
self.message_queue.refresh_connection()
self.message_queue.join()
def refresh_connection(self):
while True:
if not self.go:
break
self.rmq_connection = rabbitmq_consumer(...)
self.rmq_connection.start_consuming()
self._lock.acquire()
try:
# right here is where the second read error occurs
self.rmq_connection.stop_consuming()
self.rmq_connection.close()
except Exception as e:
print('There was an error stopping the consumer: {0}'.format(e))
self._lock.release()
Below is a much more complete example of the code, in case it helps shed some light on the issue.
thread = None
thread_lock = Lock()
event = Event()
class Queue(Thread):
def __init__(self, configs, go, outbound):
Thread.__init__(self)
self._lock = eventlet.semaphore.Semaphore(1)
self.go = go
self.configs = configs
self.outbound = outbound
...
self.rmq_connection = None
def on_rmq_message(self, ...):
...
self._lock.acquire()
socketio.emit('eventEmit', {'data': results}, namespace='/')
result = rabbitmq_producer(...)
self._lock.release()
def refresh_connection(self):
while True:
if not self.go:
break
self.rmq_connection = rabbitmq_consumer(...)
self.rmq_connection.start_consuming()
self._lock.acquire()
try:
self.rmq_connection.stop_consuming()
self.rmq_connection.close()
except Exception as e:
print('There was an error stopping the consumer: {0}'.format(e))
self._lock.release()
def run(self):
self.refresh_connection()
class Consumer(Thread):
def __init__(self, configs, event, channel, mongo_config):
Thread.__init__(self)
self.configs = configs
self.mongo_config = mongo_config
self.event = event
self.message_queue = None
self.go = None
...
def refresh_configs(self):
r = None
mconnection = connect_mongodb(...)
results = retrieve(...)
for result in results.data:
if result.get('channel') == self.channel:
r = result
return r
def run(self):
while True:
self.go = True
self.message_queue = Queue(self.configs, self.go, self.mongo_config)
self.message_queue.start()
self.event.wait()
self.go = False
setattr(self.message_queue, 'go', False)
new_config = self.refresh_configs()
setattr(self, 'configs', new_config)
self.message_queue.refresh_connection()
self.message_queue.join()
class Producer(Thread):
def __init__(self, configs, event):
Thread.__init__(self)
self._lock = eventlet.semaphore.Semaphore(1)
self.configs = configs
self.event = event
...
self.channel = self.configs.get('channel', None)
self.config_consumer = None
def on_config_message(self, ...):
...
self._lock.acquire()
socketio.emit('configEmit', {'data': results}, namespace='/')
self._lock.release()
self.event.set()
self.event.clear()
def run(self):
self.config_consumer = rabbitmq_consumer(...)
self.config_consumer.start_consuming()
def init_connections():
with app.test_request_context('/'):
mconnection = connect_mongodb(...)
results = retrieve(mconnection.engine, mongo_collection, mongo_db, cursor=False)
...
t1 = Producer(configs, event)
for result in results.data:
config = {
...
}
t2 = Consumer(result, event, result['channel'], config)
t2.start()
t1.start()
t1.join()
t2.join()
#socketio.on('connect')
def handle_connection():
global thread
socketio.emit('connectEmit', {'data': 'Connected!'}, namespace='/')
with thread_lock:
if thread is None:
thread = socketio.start_background_task(target=init_connections)
Thank you for any help you can offer.

Related

MQRC_ENVIRONMENT_ERROR on commit

I'm experiencing an issue while trying to use pymqi to send messages to IBM MQ.
A project i'm working on
Reads from DB2
Prepares data for WorkerProcess
WorkerProcess does some decision based on that data and sens a message to MQ
When the WorkerProcess tries to commit the message for MQ, I get an error:
Stacktrace:
worker_process.py:
self.mq.commit()
mq_service.py:
self.qmgr.commit()
pymqi/__init__.py line 1689, in commit
raise MQMIError (rv[0], rv[1])
pymqi.MQMIError: MQI Error. Comp: 2, Reason 2012: FAILED: MQRC_ENVIRONMENT_ERROR
Code and stack trace has been typed by hand and can contain typos.
Bellow code is a pseudo code of what I'm doing.
Any help and/or advice is greatly appreciated.
main.py:
c_processors = []
for i in range(num_of_proccessors):
p = WorkerProcess()
p.start()
c_processors.append(p)
for p in c_processors:
p.join()
worker_process.py
class WorkerProcess(Process):
def __init__(self):
Process.__init__(self)
self.mq = MQService()
def run(self):
self.mq.send_message('test')
self.mq.commit()
self.mq.close_connection()
mq_service.py
class MQService():
def __init__(self):
self.connect()
self.pmd = pymqi.MD()
self.pmd.Format = pymqi.CMQC.MQFMT_STRING
self.pmo = pymqi.PMO(Options=pymqi.CMQC.MQPMO_FAIL_IF_QUIESCING)
self.pmo.Options |= pymqi.CMQC.MQGMO_SYNCPOINT
def connect(self):
self._connect_to_qmgr(manager,chanel,host,port,user,password) #these arguments are retrieved from config
self._q = pymqi.Queue(self.qmgr, queue_name)
def _connect_to_qmgr(self,manager,chanel,host,port,user,password):
self.qmgr = pymqi.QueueManager(None)
_cd = pymqi.CD()
_cd.ChannelName = channel.encode('utf-8')
_cd.ConnectionName = f"{host} ({port})".encode('utf-8')
_cd.ChannelType = pymqi.CMQC.MQCHT_CLNTCONN
_cd.TransportType = pymqi.CMQC.MQXPT_TCP
_connect_options = pymqi.CMQC.MQCNO_HANDLE_SHARE_BLOCK
_qmgr.connect_with_options(manager, cd=_cd, opts=_connect_options, user=user, password=password)
def send_message(self, message):
self._q.put(message, self.pmd, self.pmo)
def commit(self):
self.qmgr.commit()
def rollback(self):
self.qmgr.backout()
def close_connection(self):
self.qmgr.disconnect()
EDIT:
Additional information:
I'm running IBM MQ client version 9.1.0.1.
There are no errors in AMQERR0*.LOG files.
LD_LIBRARY_PATH is set
This error showed while refactoring the code.
Below is the code that is working (before refactoring):
Some arguments in function signature are replaced with (args) for the sake of brevity and readability*
main.py:
def connect_to_mq():
return MQService(args*) # these arguments are read from Config file
def process_chunk(args*):
_mq = connect_to_mq()
_mq.send_message('test')
_mq.commit()
_mq.close_connection()
c_processors = []
for i in range(num_of_proccessors):
p = Process(target=process_chunk, args=(args*))
p.start()
c_processors.append(p)
for p in c_processors:
p.join()
mq_service.py
class MQService():
def __init__(self, args*):
self.pmd = pymqi.MD()
self.pmd.Format = pymqi.CMQC.MQFMT_STRING
self.pmo = pymqi.PMO(Options=pymqi.CMQC.MQPMO_FAIL_IF_QUIESCING)
self.pmo.Options |= pymqi.CMQC.MQGMO_SYNCPOINT
self.connect_to_qmgr(args*)
self.connect_to_queue(args*)
def _connect_to_qmgr(self,manager,chanel,host,port,user,password):
self.qmgr = pymqi.connect(manager,
chanel,
"%s (%s)" % (host, port),
user=user,
password=password)
def connect_to_queue(q_name):
self._q = pymqi.Queue(self.qmgr, q_name)
def send_message(self, message):
self._q.put(message, self.pmd, self.pmo)
def commit(self):
self.qmgr.commit()
def rollback(self):
self.qmgr.backout()
def close_connection(self):
self.qmgr.disconnect()

asyncio, multiprocessing and websockets not returning a result

I am trying to get websockets, asyncio and multiprocess to work together. I have been stuck on this for 2 days and could appreciate some help.
I have searched for websockets asyncio and multiprocessing on stackoverflow as well as general internet searches. I have found threading examples, which I can make work.
import asyncio
import websockets
import threading
class Connection():
def __init__(self):
self.loop = asyncio.new_event_loop()
sock_thread = threading.Thread(target=self.new_loop)
sock_thread.start()
self.x = 0
async def connect_to_socket(self):
self.websocket = await websockets.connect('ws://demos.kaazing.com/echo')
await self.websocket.send("hello")
response = await self.websocket.recv()
print(response)
async def listen_to_socket(self):
while True:
await asyncio.sleep(0)
print('Listening for a message...')
while self.x < 5:
message = await self.websocket.recv()
print("< {}".format(message))
print('\n\n')
print(self.x)
self.x += 1
self.task.cancel()
self.loop.close()
def stop(self):
print('canceling task\n\n')
self.x = 0
self.task.cancel()
def new_loop(self):
self.task = self.loop.create_task(self.connect_to_socket())
self.loop.run_forever()
def make_task(self):
self.task = self.loop.create_task(self.listen_to_socket())
if __name__ == '__main__':
conn=Connection()
This works with no issues. I have seen examples where multiprocessing opens a process in an event loop, this is not what I want. I want to ope However, this is not what I want. I want to open a new process and run an event loop in the new process. Inside the event loop, I want to run my sockets. I want to free my main process from listening to sockets and use a child process to listen to the sockets while I do computationally expensive work on my main process.
When I try the following code. I get nothing.
import asyncio
import websockets
import multiprocessing
class Connection(multiprocessing.Process):
def __init__(self, tasks, results):
super().__init__()
self.tasks = tasks
self.results = results
self.loop = asyncio.new_event_loop()
print('create event loop')
self.x = 0
self.task = self.loop.create_task(self.test())
print('done with connecting')
#connect to socket and get response
async def test(self):
self.ws = await websockets.connect('ws://demos.kaazing.com/echo')
await self.websocket.send("hello")
response = await self.websocket.recv()
print(response)
#listen to socket long term after connection
async def listen_to_socket(self):
while True:
await asyncio.sleep(0)
print('Listening for a message...')
while self.x < 5:
await self.websocket.send("hello")
message = await self.websocket.recv()
print("< {}".format(message))
print('\n\n')
print(self.x)
self.x += 1
self.results.put(message)
self.task.cancel()
self.loop.close()
#stop task
def stop(self):
print('canceling task\n\n')
self.x = 0
self.task.cancel()
# listen to socket long term
#I have not called this as I can't even get a response from test()
def make_task(self):
self.task = self.loop.create_task(self.listen_to_socket())
if __name__ == '__main__':
tasks = multiprocessing.JoinableQueue()
results = multiprocessing.Queue()
process = Connection(tasks, results)
if tasks.empty():
print('empty')
else:
print(tasks.get())
I expect to connect with the socket and receive a response. However, I get nothing. I get no error messages,no printout from the connection, I get an empty queue and that's all. How do I get the return values from my websocket?
I am still new enough, I am not sure what I am doing wrong. Any advice would help me out.
Thank you
Anyone interested, I got this to work. It is very much a work in progress and I am adding to it, and since this is for me and relatively simple, I didn't comment it.
I started with the code from this answer and built on it.
Python3 Websockets / Multithreading issue
import asyncio
import websockets
import sys
import time
import multiprocessing
class connect():
def __init__(self, results, tasks):
self.x = 0
self.results = results
self.tasks = tasks
self.loop = asyncio.new_event_loop()
async def commander_start(self):
while not self.tasks.empty():
self.uri = self.tasks.get()
self.tasks.task_done()
self.ws = await websockets.connect(self.uri)
while True:
await asyncio.sleep(0.1)
print('Listening for a message...')
while self.x < 5:
await self.ws.send("hello")
message = await self.ws.recv()
message = message+str(self.x)
print("< {}".format(message))
print('\n\n')
print(self.x)
self.x += 1
self.results.put(message)
self.ws.close()
self.x = 0
print('ws clsed')
self.task.cancel()
await asyncio.sleep(1)
self.loop.close()
def run_commander(self):
self.task = self.loop.create_task(self.commander_start())
self.loop.run_forever()
def main(self):
self.commander = multiprocessing.Process(target=self.run_commander)
self.commander.start()
time.sleep(3)
self.commander.kill()
print('is alive:', self.commander, self.commander.is_alive())
if __name__ == "__main__":
size_q = 10
tasks = multiprocessing.JoinableQueue(maxsize=size_q)
results = multiprocessing.Queue(maxsize=size_q)
conn = connect(results,tasks)
tasks.put('ws://demos.kaazing.com/echo')
conn.main()
print('tasks2 put')
tasks.put('wss://echo.websocket.org')
conn.main()
if not results.empty():
for x in range(size_q):
print(results.get())
There is a bunch I am going to change and improve, I just wanted the base system to work so I could build from there, so that anyone that uses this will need to modify it to suit their needs. For instance, I spawn a new process and kill it, instead of running a continuous process and giving it work to do, I also am trying to figure out the specifics of the joinable queue and how to use it to add jobs after the process and event loop has been created.

RabbitMQ Pika connection reset , (-1, ConnectionResetError(104, 'Connection reset by peer'))

searched through stackoverflow and posting this question because no solution worked for me and my question might be different from other question.
I am writing a script which gets an article from rabbitMQ queue and process the article to count words and extract key words from it and dump it in db. my script is working fine but after some time of execution i get this exception
(-1, "ConnectionResetError(104, 'Connection reset by peer')")
I have no idea why am I getting this. I have tried a lot of solutions available on stackover flow none is working for me. I havr written my script and tried it in two different ways. both work fine but after some time same exception occurs.
here is my first code:
def app_main():
global channel, results, speedvars
Logger.log_message('Starting app main')
# Edit 4
def pika_connect():
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=Config.AMQ_DAEMONS['base']['amq-host']))
channel = connection.channel()
print ("In pika connect")
Logger.log_message('Setting up input queue consumer')
channel.queue_declare(Config.AMQ_DAEMONS['consumer']['input'], durable=True)
channel.basic_consume(on_message, queue=Config.AMQ_DAEMONS['consumer']['input'], no_ack=True)
Logger.log_message('Starting loop')
channel.start_consuming()
#########
speedvars = SpeedVars()
speedtracker = SpeedTracker(speedvars)
speedtracker.start()
sender = ResultsSender(results, speedvars)
sender.start()
# Edit 5 starting 10 threads to listen to pika
for th in range(qthreads):
Logger.log_message('Starting thread: '+str(th))
try:
t = Thread(target=pika_connect, args=())
t.start()
except Exception as e:
Logger.error_message("Exception in starting threads " + str(e))
try:
app_main()
except Exception as e:
Logger.error_message("Exception in APP MAIN " + str(e))
here is my second code:
def app_main():
global channel, results, speedvars
Logger.log_message('Starting app main')
speedvars = SpeedVars()
speedtracker = SpeedTracker(speedvars)
speedtracker.start()
sender = ResultsSender(results, speedvars)
sender.start()
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=Config.AMQ_DAEMONS['base']['amq-host']))
channel = connection.channel()
print ("In app main")
Logger.log_message('Setting up input queue consumer')
channel.queue_declare(Config.AMQ_DAEMONS['consumer']['input'], durable=True)
channel.basic_consume(on_message, queue=Config.AMQ_DAEMONS['consumer']['input'], no_ack=True)
Logger.log_message('Starting loop')
try:
channel.start_consuming()
except Exception as e:
Logger.error_message("Exception in start_consuming in main " + str(e))
raise e
try:
app_main()
except Exception as e:
Logger.error_message("Exception in APP MAIN " + str(e))
in my first code i used threading because i want to speed up the process of processing articles.
this is my call back fuction
def on_message(ch, method, properties, message):
Logger.log_message("Starting parsing new msg ")
handle_message(message)
EDIT: Full Code
import os
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
from Modules import Logger
import pika
import Config
import json
import pickle
import Pipeline
import sys
import time
import datetime
import threading
import queue
import functools
from pid.decorator import pidfile
Logger.log_init(Config.AMQ_DAEMONS['consumer']['log-ident'])
#qthreads = Config.AMQ_DAEMONS['consumer']['threads']
results = queue.Queue()
channel = None
speedvars = None
SPD_RECEIVED = 'received'
SPD_DISCARDED = 'discarded'
SPD_SENT = 'sent'
class SpeedVars(object):
vars = {}
lock = None
def __init__(self):
self.lock = threading.Lock()
def inc(self, var):
self.lock.acquire()
try:
if var in self.vars:
self.vars[var] += 1
else:
self.vars[var] = 1
finally:
self.lock.release()
def dec(self, var):
self.lock.acquire()
try:
if var in self.vars:
self.vars[var] -= 1
else:
Logger.error_message('Cannot decrement ' + var + ', not tracked')
finally:
self.lock.release()
def get(self, var):
out = None
self.lock.acquire()
try:
if var in self.vars:
out = self.vars[var]
else:
Logger.error_message('Cannot get ' + var + ', not tracked')
finally:
self.lock.release()
return out
def get_all(self):
out = None
self.lock.acquire()
try:
out = self.vars.copy()
finally:
self.lock.release()
return out
class SpeedTracker(threading.Thread):
speedvars = None
start_ts = None
last_vars = {}
def __init__(self, speedvars):
super(SpeedTracker, self).__init__()
self.start_ts = time.time()
self.speedvars = speedvars
Logger.log_message('Setting up speed tracker')
def run(self):
while True:
time.sleep(Config.AMQ_DAEMONS['consumer']['speed-tracking-interval'])
prev = self.last_vars
cur = self.speedvars.get_all()
now = time.time()
if len(prev) > 0:
q = {}
for key in cur:
qty = cur[key] - prev[key]
avg = qty / Config.AMQ_DAEMONS['consumer']['speed-tracking-interval']
overall_avg = cur[key] / (now - self.start_ts)
Logger.log_message('Speed-tracking (' + key + '): total ' + str(cur[key])
+ ', delta ' + str(qty) + ', speed ' + '%0.2f' % avg + '/sec, '
+ ', overall speed ' + '%0.2f' % overall_avg + '/sec')
pending = cur[SPD_RECEIVED] - cur[SPD_DISCARDED] - cur[SPD_SENT]
pending_avg = pending / (now - self.start_ts)
Logger.log_message('Speed-tracking (pending): total ' + str(pending)
+ ', overall speed ' + '%0.2f' % pending_avg + '/sec')
self.last_vars = cur
class ResultsSender(threading.Thread):
channel = None
results = None
speedvars = None
def __init__(self, results, speedvars):
super(ResultsSender, self).__init__()
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=Config.AMQ_DAEMONS['base']['amq-host']))
self.channel = connection.channel()
Logger.log_message('Setting up output exchange')
self.channel.exchange_declare(exchange=Config.AMQ_DAEMONS['consumer']['output'], exchange_type='direct')
self.results = results
self.speedvars = speedvars
def run(self):
while True:
item = self.results.get()
self.channel.basic_publish(
exchange=Config.AMQ_DAEMONS['consumer']['output'],
routing_key='',
body=item)
self.speedvars.inc(SPD_SENT)
def parse_message(message):
try:
bodytxt = message.decode('UTF-8')
body = json.loads(bodytxt)
return body
except Exception as e:
Logger.error_message("Cannot parse message - " + str(e))
raise e
def get_body_elements(body):
try:
artid = str(body.get('article_id'))
article_dt = datetime.datetime.fromtimestamp(body.get('pubTime'))
date = article_dt.strftime(Config.DATE_FORMAT)
article = "\n".join([body.get('title', ''), body.get('subheading', ''), body.get('content', '')])
return (artid, date, article)
except Exception as e:
Logger.error_message("Cannot retrieve article attributes " + str(e))
raise e
def process_article(id, date, text):
global results, speedvars
try:
Logger.log_message('Processing article ' + id)
keywords = Pipeline.extract_keywords(text)
send_data = {"id": id, "date": date, "keywords": keywords}
results.put(pickle.dumps(send_data))
# print('Queue Size:',results.qsize())
except Exception as e:
Logger.error_message("Problem processing article " + str(e))
raise e
def ack_message(ch, delivery_tag):
"""Note that `channel` must be the same pika channel instance via which
the message being ACKed was retrieved (AMQP protocol constraint).
"""
if channel.is_open:
channel.basic_ack(delivery_tag)
else:
Logger.error_message("Channel is already closed, so we can't ACK this message" + str(e))
# Channel is already closed, so we can't ACK this message;
# log and/or do something that makes sense for your app in this case.
#pass
def handle_message(connection, ch, delivery_tag, message):
global speedvars
start = time.time()
thread_id = threading.get_ident()
try:
speedvars.inc(SPD_RECEIVED)
body = parse_message(message)
(id, date, text) = get_body_elements(body)
words = len(text.split())
if words <= Config.AMQ_DAEMONS['consumer']['word-count-limit']:
process_article(id, date, text)
else:
Logger.log_message('Ignoring article, over word count limit')
speedvars.inc(SPD_DISCARDED)
except Exception as e:
Logger.error_message("Could not process message - " + str(e))
cb = functools.partial(ack_message, ch, delivery_tag)
connection.add_callback_threadsafe(cb)
Logger.log_message("Thread id: "+str(thread_id)+" Delivery tag: "+str(delivery_tag))
Logger.log_message("TOtal time taken to handle message : "+ str(time.time()-start))
# CALL BACK
## def on_message(ch, method, properties, message):
## global executor
## executor.submit(handle_message, message)
def on_message(ch, method, header_frame, message, args):
(connection, threads) = args
delivery_tag = method.delivery_tag
t = threading.Thread(target=handle_message, args=(connection, ch, delivery_tag, message))
t.start()
threads.append(t)
####################################################
#pidfile(piddir=Config.AMQ_DAEMONS['base']['pid-dir'], pidname=Config.AMQ_DAEMONS['consumer']['pid-file'])
def app_main():
global channel, results, speedvars
speedvars = SpeedVars()
speedtracker = SpeedTracker(speedvars)
speedtracker.start()
sender = ResultsSender(results, speedvars)
sender.start()
# Pika Connection
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=Config.AMQ_DAEMONS['base']['amq-host']))
channel = connection.channel()
Logger.log_message('Setting up input queue consumer')
channel.queue_declare(Config.AMQ_DAEMONS['consumer']['input'], durable=True)
#channel.basic_consume(on_message, queue=Config.AMQ_DAEMONS['consumer']['input'], no_ack=True)
channel.basic_qos(prefetch_count=1)
threads = []
on_message_callback = functools.partial(on_message, args=(connection, threads))
channel.basic_consume(on_message_callback, Config.AMQ_DAEMONS['consumer']['input'])
Logger.log_message('Starting loop')
## channel.start_consuming()
try:
channel.start_consuming()
except KeyboardInterrupt:
channel.stop_consuming()
Wait for all to complete
for thread in threads:
thread.join()
connection.close()
app_main()
pika is not taking a lot of time to process message still i am facing connection reset issue.
**TOtal time taken to handle message : 0.0005991458892822266
**
Your handle_message method is blocking heartbeats because all of your code, including the Pika I/O loop, is running on the same thread. Check out this example of how to run your work (handle_message) on a separate thread from Pikas I/O loop and then acknowledge messages correctly.
NOTE: the RabbitMQ team monitors the rabbitmq-users mailing list and only sometimes answers questions on StackOverflow.
I was getting the same issue . Increasing the duration of heart-beat & connection timeouts configuration didn't work out for me. I finally figured out that, if you have
already created a channel and you are not publishing anything on it for
several minutes(20 mins in my case) ,in that case we get this error.
The Solutions which worked for me:
Create channel immediately just before publishing any message. OR
Use try-except and if you get an exception , create another channel and republish. ie.
try:
channel.basic_publish(exchange='', routing_key='abcd', body=data)
except Exception as e1:
connection=pika.BlockingConnection(pika.ConnectionParameters(host='1.128.0.3',credentials=credentials))
channel = connection.channel()
channel.basic_publish(exchange='', routing_key='abcd', body=data)
This will atleast keep the things running and prevent from losing any data. I'm not an expert in this, but hope this helps someone!
I also faced the same issue and resolved by increasing the duration for heart-beat & connection timeouts configuration.
Many thanks to #LukeBakken who has actually identified the root cause.
Here is how you can configure the timeouts:
import pika
def main():
# NOTE: These parameters work with all Pika connection types
params = pika.ConnectionParameters(heartbeat=600, blocked_connection_timeout=300)
conn = pika.BlockingConnection(params)
chan = conn.channel()
chan.basic_publish('', 'my-alphabet-queue', "abc")
# If publish causes the connection to become blocked, then this conn.close()
# would hang until the connection is unblocked, if ever. However, the
# blocked_connection_timeout connection parameter would interrupt the wait,
# resulting in ConnectionClosed exception from BlockingConnection (or the
# on_connection_closed callback call in an asynchronous adapter)
conn.close()
if __name__ == '__main__':
main()
Reference: https://pika.readthedocs.io/en/stable/examples/heartbeat_and_blocked_timeouts.html

Python Tweepy streaming with multitasking

in Python 2.7 I am successful in using the following code to listen to a direct message stream on an account:
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy import API
from tweepy.streaming import StreamListener
# These values are appropriately filled in the code
consumer_key = '######'
consumer_secret = '######'
access_token = '######'
access_token_secret = '######'
class StdOutListener( StreamListener ):
def __init__( self ):
self.tweetCount = 0
def on_connect( self ):
print("Connection established!!")
def on_disconnect( self, notice ):
print("Connection lost!! : ", notice)
def on_data( self, status ):
print("Entered on_data()")
print(status, flush = True)
return True
# I can add code here to execute when a message is received, such as slicing the message and activating something else
def on_direct_message( self, status ):
print("Entered on_direct_message()")
try:
print(status, flush = True)
return True
except BaseException as e:
print("Failed on_direct_message()", str(e))
def on_error( self, status ):
print(status)
def main():
try:
auth = OAuthHandler(consumer_key, consumer_secret)
auth.secure = True
auth.set_access_token(access_token, access_token_secret)
api = API(auth)
# If the authentication was successful, you should
# see the name of the account print out
print(api.me().name)
stream = Stream(auth, StdOutListener())
stream.userstream()
except BaseException as e:
print("Error in main()", e)
if __name__ == '__main__':
main()
This is great, and I can also execute code when I receive a message, but the jobs I'm adding to a work queue need to be able to stop after a certain amount of time. I'm using a popular start = time.time() and subtracting current time to determine elapsed time, but this streaming code does not loop to check the time. I just waits for a new message, so the clock is never checked so to speak.
My question is this: How can I get streaming to occur and still track time elapsed? Do I need to use multithreading as described in this article? http://www.tutorialspoint.com/python/python_multithreading.htm
I am new to Python and having fun playing around with hardware attached to a Raspberry Pi. I have learned so much from Stackoverflow, thank you all :)
I'm not sure exactly how you want to decide when to stop, but you can pass a timeout argument to the stream to give up after a certain delay.
stream = Stream(auth, StdOutListener(), timeout=30)
That will call your listener's on_timeout() method. If you return true, it will continue streaming. Otherwise, it will stop.
Between the stream's timeout argument and your listener's on_timeout(), you should be able to decide when to stop streaming.
I found I was able to get some multithreading code the way I wanted to. Unlike this tutorial from Tutorialspoint which gives an example of launching multiple instances of the same code with varying timing parameters, I was able to get two different blocks of code to run in their own instances
One block of code constantly adds 10 to a global variable (var).
Another block checks when 5 seconds elapses then prints var's value.
This demonstrates 2 different tasks executing and sharing data using Python multithreading.
See code below
import threading
import time
exitFlag = 0
var = 10
class myThread1 (threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
#var counting block begins here
print "addemup starting"
global var
while (var < 100000):
if var > 90000:
var = 0
var = var + 10
class myThread2 (threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
#time checking block begins here and prints var every 5 secs
print "checkem starting"
global var
start = time.time()
elapsed = time.time() - start
while (elapsed < 10):
elapsed = time.time() - start
if elapsed > 5:
print "var = ", var
start = time.time()
elapsed = time.time() - start
# Create new threads
thread1 = myThread1(1, "Thread-1", 1)
thread2 = myThread2(2, "Thread-2", 2)
# Start new Threads
thread1.start()
thread2.start()
print "Exiting Main Thread"
My next task will be breaking up my twitter streaming in to its own thread, and passing direct messages received as variables to a task queueing program, while hopefully the first thread continues to listen for more direct messages.

2 threads each serving 2 different HTTP Server, will not stop after a given duration

So I have spent at least a couple of days on this problem.
I would like to have 2 threads HTTP server each serving two different IP:ports.
I open a FF and navigate to either say: http://196.64.131.250:8001/ or http://196.64.131.250:8002 and it should do a GET.
Also I like my threads or program itself stop after a given duration from command line say 5 sec.
I have done everything I could, even tried SIGAlarm and variable "keepRunning" which gets changed by a third thread after duration , but my program does Not stop. What am I doing wrong please.
note that I have commented the daemon: i.e (ZhttpThread[1-2].daemon = True)
if I dont comment it my threads stop right away. I want my HTTP server threads continue servicing, and if the duration DUR expires , then the program stops or threads stop.
import SimpleHTTPServer, SocketServer, logging, cgi, sys, signal, threading, time
import subprocess as sub
keepRunning = True
origTime = int(time.time())
class ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
logging.warning("======= GET STARTED =======")
getHdr = self.headers
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
getHdr = self.headers
print(', '.join((getHdr)))
#if ("accept-encoding" in getHdr):
if ("accept-encoding" in (', '.join((getHdr)))):
print ('Test Passed ---------------')
signal.alarm(1)
class threadWithTO(threading.Thread):
def __init__(self, thID, ip, port, timeout):
threading.Thread.__init__(self)
self.ip = ip
self.port = port
self.handler = ServerHandler
self.httpd = SocketServer.TCPServer((self.ip, self.port), self.handler)
def run(self):
print (self.httpd)
#self.httpd.serve_forever()
if (keepRunning == True):
self.httpd.handle_request()
else:
self._stop.set()
def Run(self):
self.start()
def timeHandler(signum, frame):
print('Times up', signum)
sys.exit()
def timeChkr( threadName, dur, t1, t2):
print (origTime)
print ('Begin Timer thread')
while True:
nowTime = int(time.time())
print (nowTime)
delta = nowTime - origTime
if (delta >= dur):
keepRunning = False
t1.stop()
t2.stop()
else:
time.sleep(1)
def main():
#signal.signal(signal.SIGALRM, timeHandler)
#signal.alarm(DUR)
origTime = int(time.time())
ZhttpThread1 = threadWithTO("thread1", I, PORT, DUR)
ZhttpThread2 = threadWithTO("thread2", I, (int(PORT)+1), DUR)
t = threading.Thread(target=timeChkr, args = ("thread3",DUR))
t.daemon = True
t.start()
#ZhttpThread1.daemon = True
#ZhttpThread2.daemon = True
ZhttpThread1.Run()
ZhttpThread2.Run()
ok i figured out the issue is with socket. My socket is open and even though I have tried socket.settimeout I still cant get the socket to close
Thanks to Andrew.. my son whom sparked the idea in my head... here is the solution..
class ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
logging.warning("======= GET STARTED =======")
logging.warning(self.headers)
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
logging.warning("======= POST STARTED =======")
logging.warning(self.headers)
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD':'POST',
'CONTENT_TYPE':self.headers['Content-Type'],
})
logging.warning("======= POST VALUES =======")
print form.list
'''
for item in form.list:
logging.warning(item) '''
logging.warning("\n")
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
class StoppableHTTPServer(BaseHTTPServer.HTTPServer):
def server_bind(self):
BaseHTTPServer.HTTPServer.server_bind(self)
self.socket.settimeout(1)
self.run = True
def get_request(self):
while self.run:
try:
sock, addr = self.socket.accept()
sock.settimeout(None)
return (sock, addr)
except socket.timeout:
pass
def stop(self):
self.run = False
def serve(self):
while self.run:
#print "1"
self.handle_request()
if __name__=="__main__":
if len(sys.argv) < 1:
I = ""
PORT = 8000
DUR = 10
else:
I = sys.argv[1]
PORT = int(sys.argv[2])
DUR = int(sys.argv[3])
#httpd1 = StoppableHTTPServer((I,PORT), SimpleHTTPServer.SimpleHTTPRequestHandler)
#httpd2 = StoppableHTTPServer((I,(int(PORT)+1)), SimpleHTTPServer.SimpleHTTPRequestHandler)
httpd1 = StoppableHTTPServer((I,PORT), ServerHandler)
httpd2 = StoppableHTTPServer((I,(int(PORT)+1)), ServerHandler)
thread.start_new_thread(httpd1.serve, ())
thread.start_new_thread(httpd2.serve, ())
#raw_input("Press <RETURN> to stop server\n")`enter code here`
print '0'
time.sleep(DUR)
print 'Times up Dude'
httpd1.stop()
httpd2.stop()

Resources