I'm using python queue to insert data packets from mqtt listeners
But
I'm not sure when this queue will be loaded by Mqtt packet.
Can we put a listener on to this Queue ?
note : This listener callback will be used to insert the data in to DataBase.
import queue
import time
import threading
print('python queue')
def on_connect():
print('connected')
# gives message from device
def on_message(client,userdata,msg):
#print("Topic",msg.topic + "\nMessage:" + str(msg.payload)
qMqtt.put('msg')
#replace below line from here and move to listener
queueToDB('msg')
def queueToDB(msg):
qMqtt.get(msg)
dbaseInsert(msg)
def dbaseInsert(data):
#insert into query
print("inserted")
def run():
#mqttc= mqtt.Client()
#mqttc.on_connect=on_connect
#mqttc.on_message=on_message
global qMqtt
qMqtt = queue.Queue()
on_connect()
on_message('client','userdata','msg')
run()
Related
I'm trying to implement an async RPC client within a Flask server.
The idea is that each request spawn a thread with an uuid, and each request is going to wait until there is a response in the RpcClient queue attribute object with the correct uuid.
The problem is that one request out of two fails. I think that might be a problem with multi-threading, but I don't see where it comes from.
Bug can be seen here.
Using debug print, it seems that the message with the correct uuid is received in the _on_response callback and update the queue attribute in this instance correctly, but the queue attribute within the /rpc_call/<payload> endpoint doesn't synchronize (so queue[uuid] has a value of response in the RpcClient callback but still None in the scope of the endpoint).
My code:
from flask import Flask, jsonif
from gevent.pywsgi import WSGIServer
import sys
import os
import pika
import uuid
import time
import threading
class RpcClient(object):
"""Asynchronous Rpc client."""
internal_lock = threading.Lock()
queue = {}
def __init__(self):
self.connection = pika.BlockingConnection(
pika.ConnectionParameters(host='rabbitmq'))
self.channel = self.connection.channel()
self.channel.basic_qos(prefetch_count=1)
self.channel.exchange_declare(exchange='kaldi_expe', exchange_type='topic')
# Create all the queue and bind them to the corresponding routing key
self.channel.queue_declare('request', durable=True)
result = self.channel.queue_declare('answer', durable=True)
self.channel.queue_bind(exchange='kaldi_expe', queue='request', routing_key='kaldi_expe.web.request')
self.channel.queue_bind(exchange='kaldi_expe', queue='answer', routing_key='kaldi_expe.kaldi.answer')
self.callback_queue = result.method.queue
.
thread = threading.Thread(target=self._process_data_events)
thread.setDaemon(True)
thread.start()
def _process_data_events(self):
self.channel.basic_consume(self.callback_queue, self._on_response, auto_ack=True)
while True:
with self.internal_lock:
self.connection.process_data_events()
time.sleep(0.1)
def _on_response(self, ch, method, props, body):
"""On response we simply store the result in a local dictionary."""
self.queue[props.correlation_id] = body
def send_request(self, payload):
corr_id = str(uuid.uuid4())
self.queue[corr_id] = None
with self.internal_lock:
self.channel.basic_publish(exchange='kaldi_expe',
routing_key="kaldi_expe.web.request",
properties=pika.BasicProperties(
reply_to=self.callback_queue,
correlation_id=corr_id,
),
body=payload)
return corr_id
def flask_app():
app = Flask("kaldi")
#app.route('/', methods=['GET'])
def server_is_up():
return 'server is up', 200
#app.route('/rpc_call/<payload>')
def rpc_call(payload):
"""Simple Flask implementation for making asynchronous Rpc calls. """
corr_id = app.config['RPCclient'].send_request(payload)
while app.config['RPCclient'].queue[corr_id] is None:
#print("queue server: " + str(app.config['RPCclient'].queue))
time.sleep(0.1)
return app.config['RPCclient'].queue[corr_id]
if __name__ == '__main__':
while True:
try:
rpcClient = RpcClient()
app = flask_app()
app.config['RPCclient'] = rpcClient
print("Rabbit MQ is connected, starting server", file=sys.stderr)
app.run(debug=True, threaded=True, host='0.0.0.0')
except pika.exceptions.AMQPConnectionError as e:
print("Waiting for RabbitMq startup" + str(e), file=sys.stderr)
time.sleep(1)
except Exception as e:
worker.log.error(e)
exit(e)
I found where the bug came from:
Thedebug=True of the line app.run(debug=True, threaded=True, host='0.0.0.0') restart the server at the beginning.
The whole script is then restarted from the beginning. Because of it, another rpcClient is initialized and consume from the same queue. Problem is that the previous thread is also running. This cause two rpcClient to consume from the same thread, with one that is virtually useless.
All,
I modified the sample Receive python script for Azure EventHub a bit but when I run it goes into a loop fetching the same events over and over. I'm not sending any events to the eventhub since I want to read what is there and I dont see a while loop here so how is this happening and how do I stop after it reads all the events currently in the EventHub?
Thanks
grajee
# https://learn.microsoft.com/en-us/python/api/overview/azure/eventhub-readme?view=azure-python#consume-events-from-an-event-hub
import logging
from azure.eventhub import EventHubConsumerClient
connection_str = 'Endpoint=sb://testhubns01.servicebus.windows.net/;SharedAccessKeyName=getevents;SharedAccessKey=testtestest='
consumer_group = '$Default'
eventhub_name = 'testpart'
client = EventHubConsumerClient.from_connection_string(connection_str, consumer_group, eventhub_name=eventhub_name)
logger = logging.getLogger("azure.eventhub")
logging.basicConfig(level=logging.INFO)
def on_event(partition_context, event):
logger.info("Received event from partition: \"{}\" : \"{}\"" .format(partition_context.partition_id,event.body_as_str()))
partition_context.update_checkpoint(event)
with client:
client.receive(
on_event=on_event,
starting_position="-1", # "-1" is from the beginning of the partition.
)
# receive events from specified partition:
# client.receive(on_event=on_event, partition_id='0')
client.close()
The below piece of code from here makes it more clear .
import asyncio
from azure.eventhub.aio import EventHubConsumerClient
from azure.eventhub.extensions.checkpointstoreblobaio import BlobCheckpointStore
connection_str = '<< CONNECTION STRING FOR THE EVENT HUBS NAMESPACE >>'
consumer_group = '<< CONSUMER GROUP >>'
eventhub_name = '<< NAME OF THE EVENT HUB >>'
storage_connection_str = '<< CONNECTION STRING FOR THE STORAGE >>'
container_name = '<<NAME OF THE BLOB CONTAINER>>'
async def on_event(partition_context, event):
# do something
await partition_context.update_checkpoint(event) # Or update_checkpoint every N events for better performance.
async def receive(client):
await client.receive(
on_event=on_event,
starting_position="-1", # "-1" is from the beginning of the partition.
)
async def main():
checkpoint_store = BlobCheckpointStore.from_connection_string(storage_connection_str, container_name)
client = EventHubConsumerClient.from_connection_string(
connection_str,
consumer_group,
eventhub_name=eventhub_name,
**checkpoint_store=checkpoint_store, # For load balancing and checkpoint. Leave None for no load balancing**
)
async with client:
await receive(client)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
With mqtt subscribe client I am subscribing to lots of threads (over 6000) but not getting results that change on the fly. I'm lagging. Does mqtt give possibility to subscribe too many threads in parallel in background? loop_start would that be enough?
What should I pay attention to when subscribing to more topics?
import logging
import paho.mqtt.client as mqtt
import requests
import zmq
import pandas as pd
PORT=1351
def set_publisher():
context = zmq.Context()
socket_server = context.socket(zmq.PUB)
socket_server.bind(f"tcp://*:{PORT}")
return socket_server
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
#logging.warning(f"Connected with result code :: code : {rc}")
print(f"Connected with result code :: code : {rc}")
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe(topics)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
msg = msg.payload
# logging.info(f"message:: {msg}\n")
print(f"message:: {msg}\n")
if msg:
publisher.send(f"{msg}")
def on_disconnect(client, userdata, rc):
if rc != 0:
# logging.warning(f"Unexpected disconnection :: code: {rc}")
print(f"Unexpected disconnection :: code: {rc}")
#todo: if rc is change hostname raise except
client = mqtt.Client(protocol=mqtt.MQTTv31, transport="tcp")
client.username_pw_set(******, password=******)
topics = [(f"topic{i}", 0) for i in 6000]
client.on_connect = on_connect
client.on_message = on_message
client.on_disconnect = on_disconnect
if client.connect(hostname= *****, port= **** , keepalive=300) != 0:
# logging.info("Could not connect to MQTT Broker !")
print("Could not connect to MQTT Broker !")
client.loop_forever(timeout=3000)
You are describing a situation of compute power (either at the client or the broker or in-between) not sufficient to handle your scenario. It is a common occurrance and that is what performance testing is for: does your setup handle your scenario for your requirements? Capacity planning then expands that question to: ... in the future.
I want to send data through websockets as soon as a client is connected.
The Data is at an other place then the Websocket Handler. How can i get the data to the client ?
The server should hold the loop and the Handler. In the connector i connect to a tcp socket to get the data out of some hardware. I expect to have not more then 6 Websockets open once a time. The Data comes as a stream out of the TCP socket.
server.py
import os
from tornado import web, websocket
import asyncio
import connector
class StaticFileHandler(web.RequestHandler):
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
def get(self):
self.render('index.html')
class WSHandler(websocket.WebSocketHandler):
def open(self):
print('new connection')
self.write_message("connected")
def on_message(self, message):
print('message received %s' % message)
self.write_message("pong")
def on_close(self):
print('connection closed')
public_root = 'web_src'
handlers = [
(r'/', StaticFileHandler),
(r'/ws', WSHandler),
]
settings = dict(
template_path = os.path.join(os.path.dirname(__file__), public_root),
static_path = os.path.join(os.path.dirname(__file__), public_root),
debug = True
)
app = web.Application(handlers, **settings)
sensorIP = "xxx.xxx.xxx.xxx"
if __name__ == "__main__":
app.listen(8888)
asyncio.ensure_future(connector.main_task(sensorIP))
asyncio.get_event_loop().run_forever()
connector.py
import yaml
import asyncio
class RAMReceiver:
def __init__(self, reader):
self.reader = reader
self.remote_data = None
self.initParams = None
async def work(self):
i = 0
while True:
data = await self.reader.readuntil(b"\0")
self.remote_data = yaml.load(data[:-1].decode("utf-8",
"backslashreplace"))
# here i want to emit some data
# send self.remote_data to websockets
if i == 0:
i += 1
self.initParams = self.remote_data
# here i want to emit some data after open event is
# triggered
# send self.initParams as soon as a client has connected
async def main_task(host):
tasks = []
(ram_reader,) = await asyncio.gather(asyncio.open_connection(host,
51000))
receiver = RAMReceiver(ram_reader[0])
tasks.append(receiver.work())
while True:
await asyncio.gather(*tasks)
You can use Tornado's add_callback function to call a method on your websocket handler to send the messages.
Here's an example:
1. Create an additional method on your websocket handler which will receive a message from connector.py and will send to connected clients:
# server.py
class WSHandler(websocket.WebSocketHandler):
# make it a classmethod so that
# it can be accessed directly
# from class without `self`
#classmethod
async def send_data(cls, data):
# write your code for sending data to client
2. Pass the currently running IOLoop and WSHandler.send_data to your connector.py:
# server.py
from tornado import ioloop
...
if __name__ == "__main__":
...
io_loop = ioloop.IOLoop.current() # current IOLoop
callback = WSHandler.send_data
# pass io_loop and callback to main_task
asyncio.ensure_future(connector.main_task(sensorIP, io_loop, callback))
...
3. Then modify main_task function in connector.py to receive io_loop and callback. Then pass io_loop and callback to RAMReceiver.
4. Finally, use io_loop.add_callback to call WSHandler.send_data:
class RAMReceiver:
def __init__(self, reader, io_loop, callback):
...
self.io_loop = io_loop
self.callback = callback
async def work(self):
...
data = "Some data"
self.io_loop.add_callback(self.callback, data)
...
I'm trying to start a long blocking function after receiving an HTTP request. The request must be responded inmediately (200 OK or 500 Internal Error), but the process should run in the background and send a notification to a WebSocket after finished.
Also, the application should receive other requests for processing and these must also be responded inmediately, without blocking the previous ones.
I'm using add_callback, but I'm not sure if it's the correct way to use tornado, since it's blocking the incoming HTTP requests. I've tried using different threads, but I got exceptions when trying to call the send_message method inside the WebSocket handler.
import time
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.web import Application, RequestHandler, asynchronous
from tornado.websocket import WebSocketHandler
def long_process(id):
time.sleep(5)
class RequestWeb(RequestHandler):
#gen.coroutine
def process(self, id):
# Trying to call long_process, just like
# yield gen.Task(IOLoop.current().add_timeout, time.time() + 10)
# The response must be sent inmediately, but the process should run in the background
IOLoop.current().add_callback(callback=lambda: long_process(id))
#asynchronous
#gen.coroutine
def get(self, id):
IOLoop.current().add_future(self.process(id), self.process_complete)
self.write("OK")
def process_complete(self, future):
SocketHandler.send_message('Processing complete')
class SocketHandler(WebSocketHandler):
connections = set()
def open(self):
SocketHandler.connections.add(self)
#classmethod
def send_message(cls, message):
for ws in cls.connections:
ws.write_message(message)
def make_app():
return Application([
(r'/api/(?P<id>[a-zA-Z0-9]+)$', RequestWeb),
(r'/ws', SocketHandler)
])
if __name__ == "__main__":
app = make_app()
app.listen(8000)
IOLoop.current().start()