I'm writing an azure function in python. After receiving a message I will forward n-messages.
My code is:
def sendToQueue(data):
logging.info('sendToQueue: %s', data)
msg = Message(encodeData(data))
d = json.loads(data)
batchId = d['BatchId']
logging.info('sending message with batch Id: %s', batchId)
try:
logging.info('Sending oos message to queue %s', queue_oos_mssql_inbox)
sbs.send_queue_message(queue_oos_mssql_inbox, msg)
logging.info('Done Sending oos message to queue %s', queue_oos_mssql_inbox)
except :
logging.error('Unable to process message %s', batchId)
errorDescription = 'Unable to sent message to ' + queue_oos_mssql_inbox
logging.error('errorDescription message %s', errorDescription)
error = createErrorMessage(batchId, '404', str(errorDescription))
logging.error('error message %s', error)
sendToErrorQueue(json.loads(error))
logging.debug('done sending message: ')
When it's not able to sent the message I would like to log the error. How do I do something like this: 'except pyodbc.Error as ex:', but then with ServiceBus.error?
I can't find any samples or documentation.
Solution is rather simple, just added
exception Exception as e:
logging.error("Error msg: " + str(e))
.
You can use the root error class ServiceBusError:
try:
logging.info('Sending oos message to queue %s', queue_oos_mssql_inbox)
sbs.send_queue_message(queue_oos_mssql_inbox, msg)
logging.info('Done Sending oos message to queue %s', queue_oos_mssql_inbox)
except ServiceBusError as e:
logging.error('Unable to process message %s', batchId)
logging.exception('error %s', e)
ServiceBusError: All other Service Bus related errors. It is the root error class of all the errors described above.
Or any other described in:
https://pypi.org/project/azure-servicebus/
or in the reference docs:
https://azuresdkdocs.blob.core.windows.net/$web/python/azure-servicebus/latest/azure.servicebus.html#module-azure.servicebus.exceptions
Related
My question is simple:
Now this code sends empty message to subject chan.01.msg and gets message that is being currently broadcasted or prints nats: timeout. Altogether this request message is also shown (something like: Received a message on chan.01.msg _INBOX.<hash_my>.<salt_up>: b'') on subject and is not desirable there. I do filter it in callback, but I really feel that it's kinda wrong way to do it.
Can I just pull messages with desired subject?
async def msgcb(msg):
"""
Message callback function
"""
subject = msg.subject
reply = msg.reply
data = msg.data
if len(data) > 0:
print(f"Received a message on {subject} {reply}: {data}")
logging.debug("Prepare to subscribe")
sub = await nc.subscribe(subject="chan.01.msg", cb=msgcb)
logging.debug("loop process messages on subject")
while True:
await asyncio.sleep(1)
try:
resp = await nc.request('chan.01.msg')
print(resp)
except Exception as e:
print(e)
You are subscribing to the same subject where you are publishing so naturally would get the message when sending a request. To avoid receiving messages the same client produces you can use the no_echo option on connect.
I have a python service which sends and receives message from azure service bus queue. As per my setup the received will take long time to do some precess and finally completing the message.
To tackle this scenario added auto lock renewal with max_lock_renewal_duration=3600 and set message lock duration as 1 minute.
when i check the log for the first time the lock renewal worked fine but for the second time it errored out. But when i printed the error it just logging the received message not the error.
Code:
class AzureMessageQueue(MessageQueue):
async def on_renew_error(renewable, error, _):
print("On renew error -\n renewable: ", renewable,"\n error: ", error,"\n type: ", type(error), "\n message: ", error.message)
if type(error) == MessageAlreadySettled:
logger.warn("Message already settled")
else:
logger.warn("Error renewing lock: %s", error)
def __init__(self, conn_str: str, queue_name: str) -> None:
self.sb_client: ServiceBusClient = ServiceBusClient.from_connection_string(
conn_str=conn_str,
logging_enable=True,
retry_total=10,
retry_backoff_factor=1,
)
self.sender: ServiceBusSender = self.sb_client.get_queue_sender(queue_name)
self.receiver: ServiceBusReceiver = self.sb_client.get_queue_receiver(
queue_name
)
self.renewer: AutoLockRenewer = AutoLockRenewer(
max_lock_renewal_duration=3600, on_lock_renew_failure=self.on_renew_error
)
async def send(self, message: str) -> None:
sb_message = ServiceBusMessage(message)
await self.sender.send_messages(sb_message)
async def process(self, processor) -> AsyncIterator:
async with self.receiver:
async for msg in self.receiver:
self.renewer.register(self.receiver, msg)
message = str(msg)
try:
result = await processor(message)
await self.receiver.complete_message(msg)
yield message, None, result
except Exception as e:
yield message, e, None
Log:
On renew error -
renewable: <api.message_queue.AzureMessageQueue object at 0x7fe34bdaea90>
error: {"message": "test"}
type: <class 'azure.servicebus._common.message.ServiceBusReceivedMessage'>
message: {"message": "test"}
i'm trying to understand what caused the issue?
Im using python azure service bus sdk(azure-servicebus~=7.6.0).
Thanks in advance!
I'm new to Azure functions
Wished to know how to return exception from python worker log to the API caller .
In a HTTP Trigger with COSMOS DB binding , on firing an insert call to the binding , if data already exists , it fails with
"System.Private.CoreLib: Exception while executing function: Functions.insertEntityData. Microsoft.Azure.DocumentDB.Core: Entity with the specified id already exists in the system."
How can this message be sent back to the end user ? It is not getting captured anywhere.
def main(req: func.HttpRequest, cosmosdata: func.Out[func.Document]) -> func.HttpResponse:
try:
message = ""
logging.info('Python HTTP trigger function processed a request.')
entity_name = req.route_params['entity']
status_code = 500
payload = req.get_json()
if payload:
try:
logging.info(payload)
resultant = cosmosdata.set(func.Document.from_dict(payload))
logging.info(resultant)
status_code = 200
message = "Insert Successful to %s" % (entity_name)
except Exception as e:
return func.HttpResponse(str(e), status_code=500)
else:
status_code = 400
message = "Please pass data in the POST Request"
except Exception as e:
return func.HttpResponse(str(e), status_code=500)
return func.HttpResponse(message, status_code=500)
The try / catch block is not working because you're using an Output binding to Cosmos Db, which is the one that is failing. However, it also looks weird to me because by default it performs and Upsert operation.
I believe the problem relates to your partition Key defined in the function.json file.
https://learn.microsoft.com/en-us/azure/azure-functions/functions-bindings-cosmosdb-v2#input---python-examples
I am looking over the following code which does a 'group chat' with different members:
# Receive message from WebSocket
def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
# Send message to room group
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type': 'chat_message',
'message': 'OK'
}
)
# Receive message from room group
def chat_message(self, event):
message = event['message']
# Send message to WebSocket
self.send(text_data=json.dumps({
'message': message
}))
My questions is what do the two items do? I see that receive(), also does the group_send, so what purpose does the chat_message have if the receive sends it upon receiving it?
That chat server code is a simple example on how to send group messages.
In the code:
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type': 'chat_message',
'message': 'OK'
}
)
this line
'type': 'chat_message',
is responsible for calling method chat_message() with { 'message': 'OK'}
Before sending this message to the group members you may want to modify or check the data, or need to do other stuff. That's why self.channel_layer.group_send doesn't directly sends message to the group but calls another method (in this case chat_message) to handle sending of message and to keep receive() method's code clean.
Looking for some code samples to solve this problem :-
Would like to write some code (Python or Javascript) that would act as a subscriber to a RabbitMQ queue so that on receiving a message it would broadcast the message via websockets to any connected client.
I've looked at Autobahn and node.js (using "amqp" and "ws" ) but cannot get things to work as needed. Here's the server code in javascript using node.js:-
var amqp = require('amqp');
var WebSocketServer = require('ws').Server
var connection = amqp.createConnection({host: 'localhost'});
var wss = new WebSocketServer({port:8000});
wss.on('connection',function(ws){
ws.on('open', function() {
console.log('connected');
ws.send(Date.now().toString());
});
ws.on('message',function(message){
console.log('Received: %s',message);
ws.send(Date.now().toString());
});
});
connection.on('ready', function(){
connection.queue('MYQUEUE', {durable:true,autoDelete:false},function(queue){
console.log(' [*] Waiting for messages. To exit press CTRL+C')
queue.subscribe(function(msg){
console.log(" [x] Received from MYQUEUE %s",msg.data.toString('utf-8'));
payload = msg.data.toString('utf-8');
// HOW DOES THIS NOW GET SENT VIA WEBSOCKETS ??
});
});
});
Using this code, I can successfully subscribe to a queue in Rabbit and receive any messages that are sent to the queue. Similarly, I can connect a websocket client (e.g. a browser) to the server and send/receive messages. BUT ... how can I send the payload of the Rabbit queue message as a websocket message at the point indicated ("HOW DOES THIS NOW GET SENT VIA WEBSOCKETS") ? I think it's something to do with being stuck in the wrong callback or they need to be nested somehow ...?
Alternatively, if this can be done easier in Python (via Autobahn and pika) that would be great.
Thanks !
One way to implement your system is use python with tornado.
Here the server:
import tornado.ioloop
import tornado.web
import tornado.websocket
import os
import pika
from threading import Thread
clients = []
def threaded_rmq():
connection = pika.BlockingConnection(pika.ConnectionParameters("localhost"));
print 'Connected:localhost'
channel = connection.channel()
channel.queue_declare(queue="my_queue")
print 'Consumer ready, on my_queue'
channel.basic_consume(consumer_callback, queue="my_queue", no_ack=True)
channel.start_consuming()
def consumer_callback(ch, method, properties, body):
print " [x] Received %r" % (body,)
for itm in clients:
itm.write_message(body)
class SocketHandler(tornado.websocket.WebSocketHandler):
def open(self):
print "WebSocket opened"
clients.append(self)
def on_message(self, message):
self.write_message(u"You said: " + message)
def on_close(self):
print "WebSocket closed"
clients.remove(self)
class MainHandler(tornado.web.RequestHandler):
def get(self):
print "get page"
self.render("websocket.html")
application = tornado.web.Application([
(r'/ws', SocketHandler),
(r"/", MainHandler),
])
if __name__ == "__main__":
thread = Thread(target = threaded_rmq)
thread.start()
application.listen(8889)
tornado.ioloop.IOLoop.instance().start()
and here the html page:
<html>
<head>
<script src="//code.jquery.com/jquery-1.11.0.min.js"></script>
<script>
$(document).ready(function() {
var ws;
if ('WebSocket' in window) {
ws = new WebSocket('ws://localhost:8889/ws');
}
else if ('MozWebSocket' in window) {
ws = new MozWebSocket('ws://localhost:8889/ws');
}
else {
alert("<tr><td> your browser doesn't support web socket </td></tr>");
return;
}
ws.onopen = function(evt) { alert("Connection open ...")};
ws.onmessage = function(evt){
alert(evt.data);
};
function closeConnect(){
ws.close();
}
});
</script>
</head>
<html>
So when you publish a message to "my_queue" the message is redirects to all web page connected.
I hope it can be useful
EDIT**
Here https://github.com/Gsantomaggio/rabbitmqexample you can find the complete example