Explication
I am doing a architecture server-multiclient with sockets in python3.
For this, I use multiprocessing library.
The code follow, creates a server listening clients connections:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("",PORT))
sock.listen(CLIENTS)
print(logFile().message(f"running ClassAdmin server, listen {CLIENTS} clients by port {PORT}...",True,"INFO"))
sockSSL = context.wrap_socket(sock,server_side=True)
while sockSSL:
connection, address = sockSSL.accept()
eventChildStop = multiprocessing.Event()
subprocess = multiprocessing.Process(target=ClientListener, name="client", args=(connection, address))
subprocess.start()
In the code above, each client is executed in a process child. With multiprocessing.Process()
This run the class ClientListener.
class ClientListener:
def __init__(self,conn,addr):
try:
self.conn, self.addr = conn, addr
self.nick = ""
self.__listenData()
except (KeyboardInterrupt,SystemExit) as err:
print(logFile().message(f"The host {self.nick} ({self.addr[0]}:{self.addr[1]}) left", True, "INFO"))
except BaseException as err:
type, object, traceback = sys.exc_info()
file = traceback.tb_frame.f_code.co_filename
line = traceback.tb_lineno
print(logFile().message(f"{err} in {file}:{line}", True, "ERROR"))
finally:
try:
ListClients().remove(self.conn)
self.conn.close()
except:
None
finally:
Client(self.conn,self.addr).registre(self.nick,"DISCONNECTED",False)
def __listenData(self):
while True:
data = self.conn.recv(1024)
text = data.decode('utf-8')
if text.startswith("sig."):
exec(f"raise {text.split('.')[1]}")
elif data:
if text.startswith("HelloServer: "):
self.nick = text.replace("HelloServer: ","")
client = Client(self.conn,self.addr).registre(self.nick, "CONNECTED", False)
if client==False:
self.conn.send(b"sig.SystemExit(-5000,'The nick exists and is connected',True)")
else:
print(logFile().message(f"The host {self.nick} ({self.addr[0]}:{self.addr[1]}) is connected", True, "INFO"))
ListClients().add(self.conn)
else:
print(data)
In the __init__() runs the method __listenData(), that, this method is responsible of to work with data sent by the client at server.
In the __init__() I work with exceptions for show information at close the client.
try:
#{...}
finally:
try:
ListClients().remove(self.conn)
self.conn.close()
except:
None
finally:
Client(self.conn,self.addr).registre(self.nick,"DISCONNECTED",False)
#HERE, Can I close the current child process?
In this try executes a finally, because always will delete the client of clients list, and if there is a connection will close it.
The problem
My problem is the follow:
I run the server....
In the client machine, I run the client....
When I had connected the client at server, in the server process had created a child process.
Now the client closed, so in the server, if we show the child process his status changed to Z, is means, Zombie
My question, is...
How to close this child process? As the client is running in a child process started by multiprocessing.Process(). I must be close it with the method terminate() of multiprocessing... I think that is this the solution.
Solution possible?
I thought in...
Add other child process listening a multiprocessing.Event() in the root:
while sockSSL:
connection, address = sockSSL.accept()
eventChildStop = multiprocessing.Event()
subprocess = multiprocessing.Process(target=ClientListener, name="client", args=(connection, address,eventChildStop))
subprocess.start()
multiprocessing.Process(target=ClientListener.exitSubprocess, name="exitChildProcess",args=(eventChildStop, subprocess)).start()
time.sleep(1)
In the class listenerClients I add the argument event in __init__():
class ClientListener:
def __init__(self,conn,addr,event):
I add the static method exitSubprocess(). This method in teory terminate the child process (this is not so):
#staticmethod
def exitSubprocess(event,process):
while True:
if event.is_set():
print(process.id)
process.terminate()
break
time.sleep(.5)
But, this is not so, the result is same. The childs process (one is the method static exitSubprocess. the first is the client process) are status Zombie. Why...?
Somebody understand what is happening?
I appreciate someone response. Thank you by your attention.
Solution
Hi!! The problem was solved!!
How to I solved it?
I it that did is, after of start the client's child process, start a thread in the parent process for, when the child process go to exit, before of exit, the thread joins the child process with the parent process and the thread exit successfully.
Finally, the client's child process exits.
steps to follow
first, in the server's root code added:
# This thread is responsible of close the client's child process
threading.Thread(target=ClientListener.exitSubprocess,name="closeChildProcess",args=(eventChildStop,subprocess,)).start()
result complete:
while sockSSL:
connection, address = sockSSL.accept()
eventChildStop = multiprocessing.Event()
subprocess = multiprocessing.Process(target=ClientListener, name="client", args=(connection, address,eventChildStop))
# This thread is responsible of close the client's child process
threading.Thread(target=ClientListener.exitSubprocess,name="closeChildProcess",args=(eventChildStop,subprocess,)).start()
subprocess.start()
time.sleep(1)
After in the new exitSubprocess method, I changed:
if event.is_set():
print(process.id)
process.terminate()
break
by
if event.is_set():
process.join()
break
result complete:
# This method get as argument the process child. For join it at parent process
#staticmethod
def exitSubprocess(event,process):
while True:
if event.is_set():
process.join()
break
time.sleep(.5)
Important, in the client's child process in his last finally add a time.sleep(1) of 1 second.
For give time at thread to join the client's child process to parent process
class ClientListener:
def __init__(self,conn,addr,event):
try:
self.conn, self.addr = conn, addr
self.nick = ""
self.__listenData()
except (KeyboardInterrupt,SystemExit) as err:
print(logFile().message(f"The host {self.nick} ({self.addr[0]}:{self.addr[1]}) left", True, "INFO"))
except BaseException as err:
type, object, traceback = sys.exc_info()
file = traceback.tb_frame.f_code.co_filename
line = traceback.tb_lineno
print(logFile().message(f"{err} in {file}:{line}", True, "ERROR"))
finally:
try:
ListClients().remove(self.conn)
self.conn.close()
except:
None
finally:
Client(self.conn,self.addr).registre(self.nick,"DISCONNECTED",False)
event.set()
# This will delay 1 second to close the proccess, for this gives time at exitSubprocess method to join the client's child process with the parent process
time.sleep(1)
Thank you very much by your attention and time.
The problem I am having is that:
To start an async function in the background I need an asycio event
loop.
This event loop usualy exists in the main thread, and when started,
blocks the exeuction of that thread (i.e lines of code after starting
the event loop aren't run untill the event loop is cancelled).
However, ROS2 has it's own event loop (executor) that also usually runs in the main thread
and blocks execution. This means it is difficult to have both event loops running
My attempted sollution was to start the asyncio event loop in a seperate thread. This is started in the Node constructor, and stops after the Node is deconstructed.
This looks like this:
class IncrementPercentDoneServiceNode(Node):
def __create_task(self, f: Awaitable):
self.__task = self.__loop.create_task(f)
def __init__(self):
super().__init__('increment_percent_done_service_node')
self.__loop = asyncio.new_event_loop()
self.__task: Optional[Task] = None
self.__thread = threading.Thread(target=self.__loop.run_forever)
self.__thread.start()
self.done = False
self.create_service(Trigger, 'start_incrementing',
callback=lambda request, responce : (
self.get_logger().info("Starting service"),
self.__loop.call_soon_threadsafe(self.__create_task, self.__increment_percent_complete()),
TriggerResponse(success=True, message='')
)[-1]
)
def __del__(self):
print("stopping loop")
self.done = True
if self.__task is not None:
self.__task.cancel()
self.__loop.stop()
self.__thread.join()
async def __increment_percent_complete(self):
timeout_start = time.time()
duration = 5
while time.time() < (timeout_start + duration):
time_since_start = time.time() - timeout_start
percent_complete = (time_since_start / duration) * 100.0
self.get_logger().info("Percent complete: {}%".format(percent_complete))
await asyncio.sleep(0.5)
self.get_logger().info("leaving async function")
self.done = True
if __name__ == '__main__':
rclpy.init()
test = IncrementPercentDoneServiceNode()
e = MultiThreadedExecutor()
e.add_node(test)
e.spin()
Is this a sensible way to do it? Is there a better way? How would I cancel the start_incrementing service with another service? (I know that this is what actions are for, but I cannot use them in this instance).
I'm trying to simulate processing in threads by using asyncio.Queue. However, I'm struggling to turn a threaded processing simulation part to asynchronous loop.
So what my script does in brief: 1) receive processing requests over a websocket, 2) assign the request to the requested queue (which simulates a thread), 3) runs processing queues, which put responses into one shared response queue, and then 4) the websocket takes out the responses from the shared queue one by one and sends them out to the server.
Simplified version of my code:
# Initialize empty processing queues for the number of threads I want to simulate
processing_queues = [asyncio.Queue() for i in range(n_queues)
# Initialize shared response queue
response_q = asyncio.Queue()
# Set up a websocket context manager
async with websockets.connect(f"ws://{host}:{port}") as websocket:
while True:
# Read incoming requests
message = await websocket.recv()
# Parse mssg -> get request data and on which thread / queue to process it
request_data, queue_no = parse_message(message)
# Put the request data to the requested queue (imitating thread)
await processing_queues[queue_no].put(request_data)
# THIS IS WHERE I THINK ASYNCHRONY BREAKS (AND I NEED HELP)
# Do processing in each imitated processing thread
for proc_q in processing_queues:
if not proc_q.empty():
request_data = await proc_q.get()
# do the processing
response = process_data(request_data)
# Add the response to the response queue
await response_q.put(response)
# Send responses back to the server
if not response_q.empty():
response_data = response_q.get()
await websocket.send(response_data)
From the output of the script, I deduced that 1) I seem to receive requests and send out responses asynchronously; 2) processing in queues does not happen asynchronously. Correct me if I'm wrong.
I was reading about create_task() in asyncio. Maybe that could be a way to solve my problem?
I'm open to any solution (even hacky).
P.S. I would just use threads from threading library, but I need asyncio for websockets library.
P.P.S. Threaded version of my idea.
class ProcessingImitationThread(threading.Thread):
def __init__(self, thread_id, request_q, response_q):
threading.Thread.__init__(self)
self.thread_id = thread_id
self.request_q = request_q
self.response_q = response_q
def run(self):
while True:
try:
(x, request_id) = self.request_q.get()
except Empty:
time.sleep(0.2)
else:
if x == -1:
# EXIT CONDITION
break
else:
sleep_time_for_x = count_imitation(x, state)
time.sleep(sleep_time_for_x)
self.response_q.put(request_id)
print(f"request {request_id} executed")
# Set up
processing_qs = [queue.Queue() for i in range(n_processes_simulated)]
response_q = queue.Queue()
processing_thread_handlers = []
for i in n_processes_simulated:
# create thread
t = ProcessingImitationThread(i, processing_qs[i], response_q)
processing_thread_handlers.append(t)
# Main loop
while True:
# receive requests and assign to requested queue (so that thread picks up)
if new_request:
requested_process, x, request_id = parse(new_request)
processing_qs[requested_process].put((x, request_id))
...
# if there are any new responses, sent them out to the server
if response_q.q_size() > 0:
request_id = response_q.get()
# Networking: send to server
...
# Close down
...
EDIT: fixes small typos.
Your intuition that you need create_task is correct, as create_task is the closest async equivalent of Thread.start: it creates a task that runs in parallel (in an async sense) to whatever you are doing now.
You need separate coroutines that drain the respective queues running in parallel; something like this:
async def main():
processing_qs = [asyncio.Queue() for i in range(n_queues)]
response_q = asyncio.Queue()
async with websockets.connect(f"ws://{host}:{port}") as websocket:
processing_tasks = [
asyncio.create_task(processing(processing_q, response_q))
for processing_q in processing_qs
]
response_task = asyncio.create_task(
send_responses(websocket, response_q))
while True:
message = await websocket.recv()
requested_process, x, request_id = parse(message)
await processing_qs[requested_process].put((x, request_id))
async def processing(processing_q, response_q):
while True:
x, request_id = await processing_q.get()
... create response ...
await response_q.put(response)
async def send_responses(websocket, response_q):
while True:
msg = await response_q.get()
await websocket.send(msg)
The following queue is not working properly somehow. Is there any obvious mistake I have made? Basically every incoming SMS message is put onto the queue, tries to send it and if it successful deletes from the queue. If its unsuccessful it sleeps for 2 seconds and tries sending it again.
# initialize queue
queue = queue.Queue()
def messagePump():
while True:
item = queue.get()
if item is not None:
status = sendText(item)
if status == 'SUCCEEDED':
queue.task_done()
else:
time.sleep(2)
def sendText(item):
response = getClient().send_message(item)
response = response['messages'][0]
if response['status'] == '0':
return 'SUCCEEDED'
else:
return 'FAILED'
#app.route('/webhooks/inbound-sms', methods=['POST'])
def delivery_receipt():
data = dict(request.form) or dict(request.args)
senderNumber = data['msisdn'][0]
incomingMessage = data['text'][0]
# came from customer service operator
if (senderNumber == customerServiceNumber):
try:
split = incomingMessage.split(';')
# get recipient phone number
recipient = split[0]
# get message content
message = split[1]
# check if target number is 10 digit long and there is a message
if (len(message) > 0):
# for confirmation send beginning string only
successText = 'Message successfully sent to: '+recipient+' with text: '+message[:7]
queue.put({'from': virtualNumber, 'to': recipient, 'text': message})
The above is running on a Flask server. So invoking messagePump:
thread = threading.Thread(target=messagePump)
thread.start()
The common in such cases is that Thread has completed execution before item started to be presented in the queue, please call thread.daemon = True before running thread.start().
Another thing which may happen here is that Thread was terminated due to exception. Make sure the messagePump handle all possible exceptions.
That topic regarding tracing exceptions on threads may be useful for you:
Catch a thread's exception in the caller thread in Python
Python GUI with Tk that a user adds servers to and it displays port and resource information about the server. Each server line is running in its own thread and loops with a myQueue.put(executor.submit(lambda: <function here>)). I can load up 15 servers and then close the window. Sometimes the python.exe closes and the IDE shows the application end with exit code 0. Sometimes I close the window and the IDE and task manager show that the program is still running. After a while the pycharm console prints "main thread is not in main loop" and nothing else happens after that. I thought using a queue with the threads would keep this from happening but something is going wrong.
Workflow for the below code: user adds server info in a popup window> that is run through creation function> information is passed off to threadmaker function that watches an indicator and reruns the SSH session to query info when the previous query finishes.
main = Tk()
myQueue = queue.Queue()
executor = concurrent.futures.ThreadPoolExecutor(max_workers=16)
def creation(server, nickname, user, passw):
#Create label for server, nickname, user, password here and place them on the main window
myQueue.put(executor.submit(lambda: threadmaker(server, nickname, user, passw)))
def threadmaker(server, nickname, user, passw):
# this function loops until indicator is 0 and then runs refresh function
global workinglist
if 'normal' == main.state():
if workinglist[server + "counter"] == "1":
time.sleep(3)
myQueue.put(executor.submit(threadmaker(server, nickname, user, passw)))
if workinglist[server + "counter"] == "0":
myQueue.put(executor.submit(refresh(server, nickname, user, passw)))
time.sleep(3)
myQueue.put(executor.submit(threadmaker(server, nickname, user, passw)))
if 'normal' != main.state():
print(main.state())
pass
def refresh(server, nickname, user, passw):
global workinglist
workinglist[server + "counter"] = "1"
if 'normal' == main.state():
if 'normal' == main.state():
try:
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(str(server), username=str(user), password=str(passw), timeout=10, allow_agent=False, look_for_keys=False)
stdin, stdout, stderr = ssh.exec_command("DF -H")
type(stdin)
test2 = stdout.readlines()
stdin.flush()
stdin.close()
ssh.close()
#<< do soething with the test2 value>>
except Exception as E:
print(E)
if 'normal' == main.state():
try:
#<< another ssh query >>
except Exception as E:
pass
workinglist[server + "counter"] = "0"
main.mainloop()
Am I handling the threads or the queue incorrectly?
I've added print(threading.currentThread().getName(), 'Starting') to the beginning of the refresh function and the running thread number never gets over the number of servers added + 1 for the main thread. So if Im handling all the threads with my threadpool what is hanging up? I assume something with the ssh attempt.