I have written below TCP server, I don't know why handler is not supporting multiple TCP client connections.
import socket
import sys
import ast
# Internal imports
import core
try:
import fcntl
except ImportError:
fcntl = None
import logging
import json
_LOG = logging.getLogger(__name__)
if sys.version_info.major == 2:
import SocketServer
TCPServer = SocketServer.TCPServer
RequestHandler = SocketServer.BaseRequestHandler
if sys.version_info.major == 3:
import socketserver
TCPServer = socketserver.TCPServer
RequestHandler = socketserver.BaseRequestHandler
class TCPServerRequestHandler(RequestHandler):
def handle(self):
"""Receives data from client.
"""
msg = self.request.recv(1024).strip()
if self.client_address and not msg:
_LOG.error("No Data revieved from Client: {}".format(self.client_address[0]))
return
# Send some data to client
# self.wfile.write("Hello Client....Got your message".encode())
data = ast.literal_eval(msg.decode('utf-8'))
if not data:
_LOG.error("No data recieved.")
else:
with core._connect(db="exampledb") as conn:
if "device" in data and data["device"] == "mcu":
table_name = "roku_online_status"
if conn:
data.pop("device")
cols = [c for c in data.keys()] # python 3 dict keys is not list
stored_value = core.doQuery(conn, table_name, cols, "id")
if stored_value[0] != data["online"]:
core.insert_row(data, table_name, conn)
else:
if conn:
core.insert_row(data, "particle_photon", conn)
conn.close()
class Server(TCPServer):
allow_reuse_address = True
# The constant would be better initialized by a systemd module
SYSTEMD_FIRST_SOCKET_FD = 3
def __init__(self, server_address, handler_cls, bind_and_activate=True):
self.handlers = set()
# Invoke base but omit bind/listen steps (performed by systemd activation!)
try:
TCPServer.__init__(self, server_address, handler_cls, bind_and_activate)
except TypeError:
TCPServer.__init__(self, server_address, handler_cls)
# Override socket
self.socket = socket.fromfd(
self.SYSTEMD_FIRST_SOCKET_FD, self.address_family, self.socket_type)
if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
def server_close(self):
TCPServer.server_close(self)
print("Shutting down server.")
for handler in self.handlers.copy():
print(handler)
self.shutdown_request(handler.request)
def main(server_address):
"""Starts TCPServer.
"""
logging.basicConfig(level=logging.DEBUG)
# Create a TCP Server instance
server = Server(server_address, TCPServerRequestHandler)
try:
server.serve_forever()
except KeyboardInterrupt:
sys.exit(0)
if __name__ == '__main__':
main(("10.10.10.2", 7111))
For a single connection it works just fine, but when multiple clients tries to connect it gets stuck.
You have implemented a single-threaded server with blocking I/O. This kind of server can only handle a single client at a time since it waits until the client is done (inside TCPServerRequestHandler) before it will be able to process the connection of the next client.
To handle multiple clients at the same time you either have to use multiple threads or processes where each can handle a single client or you have to implement an event based server which can handle multiple clients inside a single thread. To implement the first you might have a look at the ThreadingTCPServer and ForkingTCPServer and for the latter have a look at frameworks like Twisted.
Related
Apologies for the long post. I am trying to subscribe to rabbitmq queue and then trying to create a worker-queue to execute tasks. This is required since the incoming on the rabbitmq would be high and the processing task on the item from the queue would take 10-15 minutes to execute each time. Hence necessitating the need for a worker-queue. Now I am trying to initiate only 4 items in the worker-queue, and register a callback method for processing the items in the queue. The expectation is that my code handles the part when all the 4 instances in the worker-queue are busy, the new incoming would be blocked until a free slot is available.
The rabbitmq piece is working well. The problem is I cannot figure out why the items from my worker-queue are not executing the task, i.e the callback is not working. In fact, the item from the worker queue gets executed only once when the program execution starts. For the rest of the time, tasks keep getting added to the worker-queue without being consumed. Would appreciate it if somebody could help out with the understanding on this one.
I am attaching the code for rabbitmqConsumer, driver, and slaveConsumer. Some information has been redacted in the code for privacy issues.
# This is the driver
#!/usr/bin/env python
import time
from rabbitmqConsumer import BasicMessageReceiver
basic_receiver_object = BasicMessageReceiver()
basic_receiver_object.declare_queue()
while True:
basic_receiver_object.consume_message()
time.sleep(2)
#This is the rabbitmqConsumer
#!/usr/bin/env python
import pika
import ssl
import json
from slaveConsumer import slave
class BasicMessageReceiver:
def __init__(self):
# SSL Context for TLS configuration of Amazon MQ for RabbitMQ
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
url = <url for the queue>
parameters = pika.URLParameters(url)
parameters.ssl_options = pika.SSLOptions(context=ssl_context)
self.connection = pika.BlockingConnection(parameters)
self.channel = self.connection.channel()
# worker-queue object
self.slave_object = slave()
self.slave_object.start_task()
def declare_queue(self, queue_name=“abc”):
print(f"Trying to declare queue inside consumer({queue_name})...")
self.channel.queue_declare(queue=queue_name, durable=True)
def close(self):
print("Closing Receiver")
self.channel.close()
self.connection.close()
def _consume_message_setup(self, queue_name):
def message_consume(ch, method, properties, body):
print(f"I am inside the message_consume")
message = json.loads(body)
self.slave_object.execute_task(message)
ch.basic_ack(delivery_tag=method.delivery_tag)
self.channel.basic_qos(prefetch_count=1)
self.channel.basic_consume(on_message_callback=message_consume,
queue=queue_name)
def consume_message(self, queue_name=“abc”):
print("I am starting the rabbitmq start_consuming")
self._consume_message_setup(queue_name)
self.channel.start_consuming()
#This is the slaveConsumer
#!/usr/bin/env python
import pika
import ssl
import json
import requests
import threading
import queue
import os
class slave:
def __init__(self):
self.job_queue = queue.Queue(maxsize=3)
self.job_item = ""
def start_task(self):
def _worker():
while True:
json_body = self.job_queue.get()
self._parse_object_from_queue(json_body)
self.job_queue.task_done()
threading.Thread(target=_worker, daemon=True).start()
def execute_task(self, obj):
print("Inside execute_task")
self.job_item = obj
self.job_queue.put(self.job_item)
# print(self.job_queue.queue)
def _parse_object_from_queue(self, json_body):
if bool(json_body[‘entity’]):
if json_body['entity'] == 'Hello':
print("Inside Slave: Hello")
elif json_body['entity'] == 'World':
print("Inside Slave: World")
self.job_queue.join()
Into:
I am working in a TCP server that receives events over TCP. For this task, I decided to use asyncio Protocol libraries (yeah, maybe I should have used Streams), the reception of events works fine.
Problem:
I need to be able to connect to the clients, so I create another "server" used to look up all my connected clients, and after finding the correct one, I use the Protocol class transport object to send a message and try to grab the response by reading a buffer variable that always has the last received message.
My problem is, after sending the message, I don't know how to wait for the response, so I always get the previous message from the buffer.
I will try to simplify the code to illustrate (please, keep in mind that this is an example, not my real code):
import asyncio
import time
CONN = set()
class ServerProtocol(asyncio.Protocol):
def connection_made(self, transport):
self.transport = transport
CONN.add(self)
def data_received(self, data):
self.buffer = data
# DO OTHER STUFF
print(data)
def connection_lost(self, exc=None):
CONN.remove(self)
class ConsoleProtocol(asyncio.Protocol):
def connection_made(self, transport):
self.transport = transport
# Get first value just to ilustrate
self.client = next(iter(CONN))
def data_received(self, data):
# Forward the message to the client
self.client.transport.write(data)
# wait a fraction of a second
time.sleep(0.2)
# foward the response of the client
self.transport.write(self.client.buffer)
def main():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(
loop.create_server(protocol_factory=ServerProtocol,
host='0.0.0.0',
port=6789))
loop.run_until_complete(
loop.create_server(protocol_factory=ConsoleProtocol,
host='0.0.0.0',
port=9876))
try:
loop.run_forever()
except Exception as e:
print(e)
finally:
loop.close()
if __name__ == '__main__':
main()
This is not only my first experience writing a TCP server, but is also my first experience working with parallelism. So it took me days to realize that my sleep not only would not work, but I was locking the server while it "sleeps".
Any help is welcome.
time.sleep(0.2) is blocking, should not used in async programming, which will block the whole execution, if your program runing with 100 clients, the last client will be delayed for 0.2*99 seconds, which is not what you want.
the right way is trying to let program wait 0.2s but not blocking, then other concurrent clients would not be delayed,we can use thread.
import asyncio
import time
import threading
CONN = set()
class ServerProtocol(asyncio.Protocol):
def dealy_thread(self):
time.sleep(0.2)
def connection_made(self, transport):
self.transport = transport
CONN.add(self)
def data_received(self, data):
self.buffer = data
# DO OTHER STUFF
print(data)
def connection_lost(self, exc=None):
CONN.remove(self)
class ConsoleProtocol(asyncio.Protocol):
def connection_made(self, transport):
self.transport = transport
# Get first value just to ilustrate
self.client = next(iter(CONN))
def data_received(self, data):
# Forward the message to the client
self.client.transport.write(data)
# wait a fraction of a second
thread = threading.Thread(target=self.delay_thread, args=())
thread.daemon = True
thread.start()
# foward the response of the client
self.transport.write(self.client.buffer)
Iam trying to improve the performance of my application. It is a Python3.6 asyncio.Protocol based TCP server (SSL wrapped) handling a lot of requests.
It works fine and the performance is acceptable when only one connection is active, but as soon as another connection is opened, the client part of the application slows down. This is really noticeable once there are 10-15 client connection.
Is there a way to properly handle requests in parallel or should I resort to running multiple server instances?
/edit Added code
main.py
if __name__ == '__main__':
import package.server
server = package.server.TCPServer()
server.join()
package.server
import multiprocessing, asyncio, uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
from package.connection import Connection
class TCPServer(multiprocessing.Process):
name = 'tcpserver'
def __init__(self, discord_queue=None):
multiprocessing.Process.__init__(self)
self.daemon = True
# some setup in here
self.start()
def run(self):
loop = uvloop.new_event_loop()
self.loop = loop
# db setup, etc
server = loop.create_server(Connection, HOST, PORT, ssl=SSL_CONTEXT)
loop.run_until_complete(server)
loop.run_forever()
package.connection
import asyncio, hashlib, os
from time import sleep, time as timestamp
class Connection(asyncio.Protocol):
connections = {}
def setup(self, peer):
self.peer = peer
self.ip, self.port = self.peer[0], self.peer[1]
self.buffer = []
#property
def connection_id(self):
if not hasattr(self, '_connection_id'):
self._connection_id = hashlib.md5('{}{}{}'.format(self.ip, self.port, timestamp()).encode('utf-8')).hexdigest()
return self._connection_id
def connection_lost(self, exception):
del Connection.connections[self.connection_id]
def connection_made(self, transport):
self.transport = transport
self.setup(transport.get_extra_info('peername'))
Connection.connections[self.connection_id] = self
def data_received(self, data):
# processing, average server side execution time is around 30ms
sleep(0.030)
self.transport.write(os.urandom(64))
The application runs on Debian 9.9 and is started via systemd
To "benchmark" I use this script:
import os, socket
from multiprocessing import Pool
from time import time as timestamp
def foobar(i):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', 60000))
while True:
ms = timestamp()*1000
s.send(os.urandom(128))
s.recv(1024*2)
print(i, timestamp()*1000-ms)
if __name__ == '__main__':
instances = 4
with Pool(instances) as p:
print(p.map(foobar, range(0, instances)))
To answer my own question here. I went with a solution that spawned multiple instances which were listening on base_port + x and I put a nginx TCP loadbalancer in front of it.
The individual TCPServer instances are still spawned as own process and communicate among themselves via a separate UDP connection and with the main process via multiprocessing.Queue.
While this does not "fix" the problem, it provides a somewhat scalable solution for my very specific problem.
I'm using a socket to listen on a port in a while loop, with a 5 second timeout set by socket.settimeout(). But I have another method, which set's the listening port, and when called with a new port, i wanna force the socket to timeout so that I can reinitialise the socket and set the appropriate port inside the while loop. Is there a way to do that?
The socket is inside a subclass of threading.Thread
PS. Since this is my second day with Python, any other suggestions regarding any part would be most welcome. Thank you
EDIT:
I almost forgot. I want to reinitialise the socket when the setoutboundport method is called.
EDIT2
Man the whole code is messed up. I reexamined everything and it's really wrong for what I wanna achieve. Just focus on the main question. Timing out the socket.
import threading
import socket
import ResponseSender
import sys
import traceback
def __init__(self, inboundport, outboundip, outboundport, ttl=60):
super(Relay, self).__init__()
self.inboundport = inboundport
self.outboundip = outboundip
self.outboundport = outboundport
self.ttl = ttl
self.serverOn = True
self.firstpacket = True
self.sibling = None
self.newoutboundport = 0
self.listener = None
# ENDOF: __init__
def stop(self):
self.serverOn = False
# ENDOF: stop
def setsiblingrelay(self, relay):
self.sibling = relay
# ENDOF: setsiblingrelay
def setoutboundport(self, port):
self.newoutboundport = port
# ENDOF: setoutboundport
def run(self):
s = None
try:
while self.serverOn:
if not s:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if self.outboundport != self.newoutboundport:
try:
s.close()
except:
pass
s.settimeout(4)
s.bind(('', self.inboundport))
print("Relay started :{0} => {1}:{2}".format(self.inboundport, self.outboundip, self.outboundport))
print("---------------------------------------- LISTENING FOR INCOMING PACKETS")
data, address = s.recvfrom(32768)
print("Received {0} from {1}:{2} => sending to {3}:{4}"
.format(data, address[0], address[1], self.outboundip, self.outboundport))
ResponseSender.sendresponse(address[0], address[1], data)
except TimeoutError:
pass
except:
print("Error: {0}".format(traceback.format_exception(*sys.exc_info())))
# ENDOF: run
I have some problem with unpickling data recived from logger. Given udp_server:
import pickle
import logging
import logging.handlers
import socketserver
import struct
class MyUDPHandler(socketserver.BaseRequestHandler):
def handle(self):
data = self.request[0].strip()
socket = self.request[1]
print("{} wrote:".format(self.client_address[0]))
print(self.unPickle(data)) # here is problem
socket.sendto(data.upper(), self.client_address)
def unPickle(self, data):
return pickle.loads(data)
class LogRecordSocketReceiver(socketserver.UDPServer):
allow_reuse_address = True
def __init__(self, host='localhost',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=MyUDPHandler):
socketserver.UDPServer.__init__(self, (host, port), handler)
self.abort = 0
self.timeout = 1
self.logname = None
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
abort = self.abort
if __name__ == "__main__":
tcpserver = LogRecordSocketReceiver()
print('About to start UDP server...')
tcpserver.serve_until_stopped()
And udp_log_sender:
import logging, logging.handlers
rootLogger = logging.getLogger('')
rootLogger.setLevel(logging.DEBUG)
udp_handler = logging.handlers.DatagramHandler("localhost", logging.handlers.DEFAULT_TCP_LOGGING_PORT)
rootLogger.addHandler(udp_handler)
logging.info('Jackdaws love my big sphinx of quartz.')
When the server recives logging message and want to run unPickle the EOFError is thrown. What could cause such behaviour?
do not strip binary data
omit the first 4 bytes (i.e. use data[4:]) as they contain length of the dumped object
I didn't find this information in logging module documentation - sometimes one has go to the source (or just google harder).