Why python asyncio process in a thread seems unstable on Linux? - python-3.x

I try to run a python3 asynchronous external command from a Qt Application. Before I was using a multiprocessing thread to do it without freezing the Qt Application. But now, I would like to do it with a QThread to be able to pickle and give a QtWindows as argument for some other functions (not presented here). I did it and test it with success on my Windows OS, but I tried the application on my Linux OS, I get the following error :RuntimeError: Cannot add child handler, the child watcher does not have a loop attached
From that point I tried to isolate the problem, and I obtain the minimal (as possible as I could) example below that replicates the problem.
Of course, as I mentioned before, if I replace QThreadPool by a list of multiprocessing.thread this example is working well. I also realized something that astonished me: if I uncomment the line rc = subp([sys.executable,"./HelloWorld.py"]) in the last part of the example, it works also. I couldn't explain myself why.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
## IMPORTS ##
from functools import partial
from PyQt5 import QtCore
from PyQt5.QtCore import QThreadPool, QRunnable, QCoreApplication
import sys
import asyncio.subprocess
# Global variables
Qpool = QtCore.QThreadPool()
def subp(cmd_list):
""" """
if sys.platform.startswith('linux'):
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
elif sys.platform.startswith('win'):
new_loop = asyncio.ProactorEventLoop() # for subprocess' pipes on Windows
asyncio.set_event_loop(new_loop)
else :
print('[ERROR] OS not available for encodage... EXIT')
sys.exit(2)
rc, stdout, stderr= new_loop.run_until_complete(get_subp(cmd_list) )
new_loop.close()
if rc!=0 :
print('Exit not zero ({}): {}'.format(rc, sys.exc_info()[0]) )#, exc_info=True)
return rc, stdout, stderr
async def get_subp(cmd_list):
""" """
print('subp: '+' '.join(cmd_list) )
# Create the subprocess, redirect the standard output into a pipe
create = asyncio.create_subprocess_exec(*cmd_list, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) #
proc = await create
# read child's stdout/stderr concurrently (capture and display)
try:
stdout, stderr = await asyncio.gather(
read_stream_and_display(proc.stdout),
read_stream_and_display(proc.stderr))
except Exception:
proc.kill()
raise
finally:
rc = await proc.wait()
print(" [Exit {}] ".format(rc)+' '.join(cmd_list))
return rc, stdout, stderr
async def read_stream_and_display(stream):
""" """
async for line in stream:
print(line, flush=True)
class Qrun_from_job(QtCore.QRunnable):
def __init__(self, job, arg):
super(Qrun_from_job, self).__init__()
self.job=job
self.arg=arg
def run(self):
code = partial(self.job)
code()
def ThdSomething(job,arg):
testRunnable = Qrun_from_job(job,arg)
Qpool.start(testRunnable)
def testThatThing():
rc = subp([sys.executable,"./HelloWorld.py"])
if __name__=='__main__':
app = QCoreApplication([])
# rc = subp([sys.executable,"./HelloWorld.py"])
ThdSomething(testThatThing,'tests')
sys.exit(app.exec_())
with the HelloWorld.py file:
#!/usr/bin/env python3
import sys
if __name__=='__main__':
print('HelloWorld')
sys.exit(0)
Therefore I have two questions: How to make this example working properly with QThread ? And why a previous call of an asynchronous task (with a call of subp function) change the stability of the example on Linux ?
EDIT
Following advices of #user4815162342, I tried with a run_coroutine_threadsafe with the code below. But it is not working and returns the same error ie RuntimeError: Cannot add child handler, the child watcher does not have a loop attached. I also tried to change the threading command by its equivalent in the module mutliprocessing ; and with the last one, the command subp is never launched.
The code :
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
## IMPORTS ##
import sys
import asyncio.subprocess
import threading
import multiprocessing
# at top-level
loop = asyncio.new_event_loop()
def spin_loop():
asyncio.set_event_loop(loop)
loop.run_forever()
def subp(cmd_list):
# submit the task to asyncio
fut = asyncio.run_coroutine_threadsafe(get_subp(cmd_list), loop)
# wait for the task to finish
rc, stdout, stderr = fut.result()
return rc, stdout, stderr
async def get_subp(cmd_list):
""" """
print('subp: '+' '.join(cmd_list) )
# Create the subprocess, redirect the standard output into a pipe
proc = await asyncio.create_subprocess_exec(*cmd_list, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) #
# read child's stdout/stderr concurrently (capture and display)
try:
stdout, stderr = await asyncio.gather(
read_stream_and_display(proc.stdout),
read_stream_and_display(proc.stderr))
except Exception:
proc.kill()
raise
finally:
rc = await proc.wait()
print(" [Exit {}] ".format(rc)+' '.join(cmd_list))
return rc, stdout, stderr
async def read_stream_and_display(stream):
""" """
async for line in stream:
print(line, flush=True)
if __name__=='__main__':
threading.Thread(target=spin_loop, daemon=True).start()
# multiprocessing.Process(target=spin_loop, daemon=True).start()
print('thread passed')
rc = subp([sys.executable,"./HelloWorld.py"])
print('end')
sys.exit(0)

As a general design principle, it's unnecessary and wasteful to create new event loops only to run a single subroutine. Instead, create an event loop, run it in a separate thread, and use it for all your asyncio needs by submitting tasks to it using asyncio.run_coroutine_threadsafe.
For example:
# at top-level
loop = asyncio.new_event_loop()
def spin_loop():
asyncio.set_event_loop(loop)
loop.run_forever()
asyncio.get_child_watcher().attach_loop(loop)
threading.Thread(target=spin_loop, daemon=True).start()
# ... the rest of your code ...
With this in place, you can easily execute any asyncio code from any thread whatsoever using the following:
def subp(cmd_list):
# submit the task to asyncio
fut = asyncio.run_coroutine_threadsafe(get_subp(cmd_list), loop)
# wait for the task to finish
rc, stdout, stderr = fut.result()
return rc, stdout, stderr
Note that you can use add_done_callback to be notified when the future returned by asyncio.run_coroutine_threadsafe finishes, so you might not need a thread in the first place.
Note that all interaction with the event loop should go either through the afore-mentioned run_coroutine_threadsafe (when submitting coroutines) or through loop.call_soon_threadsafe when you need the event loop to call an ordinary function. For example, to stop the event loop, you would invoke loop.call_soon_threadsafe(loop.stop).

I suspect that what you are doing is simply unsupported - according to the documentation:
To handle signals and to execute subprocesses, the event loop must be run in the main thread.
As you are trying to execute a subprocess, I do not think running a new event loop in another thread works.
Thing is, Qt already has an event loop, and what you really need is to convince asyncio to use it. That means that you need an event loop implementation that provides the "event loop interface for asyncio" implemented on top of "Qt's event loop".
I believe that asyncqt provides such an implementation. You may want to try to use QEventLoop(app) in place of asyncio.new_event_loop().

Related

Why can't I see terminal input (stdout) in Linux after executing this Python3 script?

I wrote a Python3 script (shown below, repo here https://gitlab.com/papiris/gcode-motor-stutter-generator)
After I execute it on Linux (Raspberry Pi OS bullseye 32-bit) and either exit by ctrl+c or let it finish; I can't see what I write in that respective terminal tab anymore. The terminal (kde konsole) responds to commands, the text just isn't visible. I can open a new terminal tab and keep working, but the terminal tabs I run this script in never show the text I input again.
Why is this, and how can I fix it?
I tried searching for this topic, but couldn't find anything similar.
#!/usr/bin/env python3
from sys import stdout, stdin
from curtsies import Input
from threading import Thread
from queue import Queue, Empty
### non-blocking read of stdin
def enqueue_input(stdin, queue):
try:
with Input(keynames='curses') as input_generator:
for _input in iter(input_generator):
queue.put(_input)
except keyboardInterrupt:
sys.exit(1)
q=Queue()
t = Thread(target=enqueue_input, args=(stdin, q))
t.daemon = True # thread dies with the program
t.start()
def main():
while True:
try:
input_key = q.get(timeout=2)
except Empty:
print(f'printing continuously')
pass
else:
if input_key == 'n':
print('extrusion loop stopped, moving on')
break
if __name__ == "__main__":
main()

subprocess.popen-process stops running while using it with SMACH

I'm simply trying to start a rosbag-command from python in a SMACH. I figured out that one way to do so is to use subprocesses. My goal is that as soon as the rosbag starts, the state machine transitions to state T2 (and stays there).
However, when starting a rosbag using subprocess.popen inside a SMACH-state and then using rostopic echo 'topic' , the rosbag appears to first properly publishing data, then suddenly stops publishing data and only as soon as I end the SMACH using Ctrl+C, the rosbag continues publishing some more data and before it stops as well.
Is there any reasonable explanation for that (did I maybe miss a parameter or is it just not possible to keep the node running that way)? Or is there maybe a better way to start the rosbag and let in run in the background?
(Btw also some other commands like some roslaunch-commands appear to stop working after they're started via subprocess.popen!)
My code looks as follows:
#!/usr/bin/env python3
import os
import signal
import subprocess
import smach
import smach_ros
import rospy
import time
from gnss_navigation.srv import *
class t1(smach.State):
def __init__(self, outcomes=['successful', 'failed', 'preempted']):
smach.State.__init__(self, outcomes)
def execute(self, userdata):
if self.preempt_requested():
self.service_preempt()
return 'preempted'
try:
process1 = subprocess.Popen('rosbag play /home/faps/bags/2020-05-07-11-18-18.bag', stdout=subprocess.PIPE,
shell=True, preexec_fn=os.setsid)
except Exception:
return 'failed'
return 'successful'
class t2(smach.State):
def __init__(self, outcomes=['successful', 'failed', 'preempted']):
smach.State.__init__(self, outcomes)
def execute(self, userdata):
#time.sleep(2)
if self.preempt_requested():
self.service_preempt()
return 'preempted'
return 'successful'
if __name__=="__main__":
rospy.init_node('test_state_machine')
sm_1 = smach.StateMachine(outcomes=['success', 'error', 'preempted'])
with sm_1:
smach.StateMachine.add('T1', t1(), transitions={'successful': 'T2', 'failed': 'error'})
smach.StateMachine.add('T2', t2(), transitions={'successful': 'T2', 'failed': 'error', 'preempted':'preempted'})
# Execute SMACH plan
outcome = sm_1.execute()
print('exit-outcome:' + outcome)
# Wait for ctrl-c to stop the application
rospy.spin()
As explained in the answer's comment section of this thread the problem appears when using subprocess.PIPE as stdout.
Therefore, the two possible solutions I used to solve the problem are:
If you don't care about print-outs and stuff -> use devnull as output:
FNULL = open(os.devnull, 'w')
process = subprocess.Popen('your command', stdout=FNULL, stderr=subprocess.STDOUT,
shell=True, preexec_fn=os.setsid)
If you do need print-outs and stuff -> create a log-file and use it as output:
log_file = open('path_to_log/log.txt', 'w')
process = subprocess.Popen('your command', stdout=log_file, stderr=subprocess.STDOUT,
shell=True, preexec_fn=os.setsid)

Using asyncio to wait for results from subprocess

My Python script contains a loop that uses subprocess to run commands outside the script. Each subprocess is independent. I listen for the returned message in case there's an error; I can't ignore the result of the subprocess. Here's the script without asyncio (I've replaced my computationally expensive call with sleep):
from subprocess import PIPE # https://docs.python.org/3/library/subprocess.html
import subprocess
def go_do_something(index: int) -> None:
"""
This function takes a long time
Nothing is returned
Each instance is independent
"""
process = subprocess.run(["sleep","2"],stdout=PIPE,stderr=PIPE,timeout=20)
stdout = process.stdout.decode("utf-8")
stderr = process.stderr.decode("utf-8")
if "error" in stderr:
print("error for "+str(index))
return
def my_long_func(val: int) -> None:
"""
This function contains a loop
Each iteration of the loop calls a function
Nothing is returned
"""
for index in range(val):
print("index = "+str(index))
go_do_something(index)
# run the script
my_long_func(3) # launch three tasks
I think I could use asyncio to speed up this activity since the Python script is waiting on the external subprocess to complete. I think threading or multiprocessing are not necessary, though they could also result in faster execution. Using a task queue (e.g., Celery) is another option.
I tried implementing the asyncio approach, but am missing something since the following attempt doesn't change the overall execution time:
import asyncio
from subprocess import PIPE # https://docs.python.org/3/library/subprocess.html
import subprocess
async def go_do_something(index: int) -> None:
"""
This function takes a long time
Nothing is returned
Each instance is independent
"""
process = subprocess.run(["sleep","2"],stdout=PIPE,stderr=PIPE,timeout=20)
stdout = process.stdout.decode("utf-8")
stderr = process.stderr.decode("utf-8")
if "error" in stderr:
print("error for "+str(index))
return
def my_long_func(val: int) -> None:
"""
This function contains a loop
Each iteration of the loop calls a function
Nothing is returned
"""
# https://docs.python.org/3/library/asyncio-eventloop.html
loop = asyncio.get_event_loop()
tasks = []
for index in range(val):
task = go_do_something(index)
tasks.append(task)
# https://docs.python.org/3/library/asyncio-task.html
tasks = asyncio.gather(*tasks)
loop.run_until_complete(tasks)
loop.close()
return
my_long_func(3) # launch three tasks
If I want to monitor the output of each subprocess but not wait while each subprocess runs, can I benefit from asyncio? Or does this situation require something like multiprocessing or Celery?
Try executing the commands using asyncio instead of subprocess.
Define a run() function:
import asyncio
async def run(cmd: str):
proc = await asyncio.create_subprocess_shell(
cmd,
stderr=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE
)
stdout, stderr = await proc.communicate()
print(f'[{cmd!r} exited with {proc.returncode}]')
if stdout:
print(f'[stdout]\n{stdout.decode()}')
if stderr:
print(f'[stderr]\n{stderr.decode()}')
Then you may call it as you would call any async function:
asyncio.run(run('sleep 2'))
#=>
['sleep 2' exited with 0]
The example was taken from the official documentation. Also available here.
#ronginat pointed me to https://asyncio.readthedocs.io/en/latest/subprocess.html which I was able to adapt to the situation I am seeking:
import asyncio
async def run_command(*args):
# Create subprocess
process = await asyncio.create_subprocess_exec(
*args,
# stdout must a pipe to be accessible as process.stdout
stdout=asyncio.subprocess.PIPE)
# Wait for the subprocess to finish
stdout, stderr = await process.communicate()
# Return stdout
return stdout.decode().strip()
async def go_do_something(index: int) -> None:
print('index=',index)
res = await run_command('sleep','2')
return res
def my_long_func(val: int) -> None:
task_list = []
for indx in range(val):
task_list.append( go_do_something(indx) )
loop = asyncio.get_event_loop()
commands = asyncio.gather(*task_list)
reslt = loop.run_until_complete(commands)
print(reslt)
loop.close()
my_long_func(3) # launch three tasks
The total time of execution is just over 2 seconds even though there are three sleeps of duration 2 seconds. And I get the stdout from each subprocess.

Python Asyncio: RuntimeEror: This eventloop is already running

I am working on a ayncio module and having issues in terminating program. I am running my program in terminal and Ctrl + C is not working to stop the running program.However, if I close the terminal and try to run program again, I get this issue :
INFO:root:In main
ERROR:root:This event loop is already running
Below is my sample code for understanding.
# all_tasks.py
import asyncio
import logging
# module imports
import settings
#configure logging into a file
logging.basicConfig(filename=settings.LOG_FILENAME,level=logging.DEBUG)
class AsyncTest(object):
async def taskOne(self):
while True:
print("Task One") # Print is just an example, I am doing lot of stuff inside.
await asyncio.sleep(60)
async def taskTwo(self):
while True:
print("Task Two") # Print is just an example, I am doing lot of stuff inside.
await asyncio.sleep(60)
async def main(self):
try:
loop = asyncio.get_event_loop()
tasks = [
asyncio.ensure_future(self.taskOne()),
asyncio.ensure_future(self.taskTwo()),
]
loop.run_until_complete(asyncio.wait(tasks))
except RuntimeError as error:
logging.info("In main")
logging.error(error)
if __name__ == '__main__':
asynctest = AsyncTest()
asyncio.run(asynctest.main())
Config: Windows 10, python 3.7.0
File Name: all_tasks.py
Command: python all_tasks.py
Any help is much appreciated.
Thanks
asyncio.run creates and runs event loop. You shouldn't create and run one, especially inside a coroutine (function defined with async def). In a coroutine you should only await for something.
Modify the code accordingly:
# ...
async def main(self):
tasks = [
asyncio.ensure_future(self.taskOne()),
asyncio.ensure_future(self.taskTwo()),
]
await asyncio.wait(tasks)
if __name__ == '__main__':
asynctest = AsyncTest()
asyncio.run(asynctest.main())
It'll work.

Terminate subprocess

I'm curious, why the code below freezes. When I kill python3 interpreter, "cat" process remains as a zombie. I expect the subprocess will be terminated before main process finished.
When I send manually SIGTERM to cat /dev/zero, the process is correctly finished (almost immediately)
#!/usr/bin/env python3
import subprocess
import re
import os
import sys
import time
from PyQt4 import QtCore
class Command(QtCore.QThread):
# stateChanged = QtCore.pyqtSignal([bool])
def __init__(self):
QtCore.QThread.__init__(self)
self.__runned = False
self.__cmd = None
print("initialize")
def run(self):
self.__runned = True
self.__cmd = subprocess.Popen(["cat /dev/zero"], shell=True, stdout=subprocess.PIPE)
try:
while self.__runned:
print("reading via pipe")
buf = self.__cmd.stdout.readline()
print("Buffer:{}".format(buf))
except:
logging.warning("Can't read from subprocess (cat /dev/zero) via pipe")
finally:
print("terminating")
self.__cmd.terminate()
self.__cmd.kill()
def stop(self):
print("Command::stop stopping")
self.__runned = False
if self.__cmd:
self.__cmd.terminate()
self.__cmd.kill()
print("Command::stop stopped")
def exitApp():
command.stop()
time.sleep(1)
sys.exit(0)
if __name__ == "__main__":
app = QtCore.QCoreApplication(sys.argv)
command = Command()
# command.daemon = True
command.start()
timer = QtCore.QTimer()
QtCore.QObject.connect(timer, QtCore.SIGNAL("timeout()"), exitApp)
timer.start(2 * 1000)
sys.exit(app.exec_())
As you noted yourself, the reason for zombie is that signal is caught by shell and doesn't effect process created by it. However there is a way to kill shell and all processes created by it; you have to use process group feature. See How to terminate a python subprocess launched with shell=True Having said that if you can manage without shell=True that's always preferable - see my answer here.
I solved this problem in a different way, so here's the result:
I have to call subprocess.Popen with shell=False, because otherwise it creates 2 processes (shell and the process) and __cmd.kill() send signal to shell while "process" remains as a zombie

Resources