How can I stop or reach to avoid hanging of the following:
import threading
mythread = Threading(target = input_read, args = (callback))
mythread.start()
running = True
def callback(msg):
if msg == 'stop': running = False
print(msg)
def input_read(callback):
while running:
callback(input())
while running:
try:
# some other code
except KeyboardInterrupt:
pass
Somehow the input should be stopped, time outted, killed, anything..
Solved with setting the thread to daemon:
mythread.daemon = True
mythread.start()
Related
I have below code, where I am using OpenCV to start webcam video. Along with that I also have a thread running that pings www.google.com to check network connectivity.
import time
import cv2
import os
from threading import Thread
stopThread = False
def CheckNetwork():
global stopThread
while True:
time.sleep(60)
host = "www.google.com"
response = os.system("ping " + host)
if response == 0:
print("Internet host reachable")
else:
print("Internet host not reachable")
if stopThread:
break
def main():
global stopThread
Thread(target=CheckNetwork).start()
cam = cv2.VideoCapture(0)
while True:
ret_val, img = cam.read()
cv2.imshow('Camera', img)
key = cv2.waitKey(1)
if key == ord('q'):
stopThread = True
break
cv2.destroyAllWindows()
main()
This code is running fine. If I have to close the application by pressing q, OpenCV window closes but application keeps running for 60sec because of the thread and only after 60sec whole application terminates safely.
I wanted to know if this is a good way to close the threads. Is there any better way available which can immediately terminate threads in Python?
There's no native way of stopping a thread in Python. Instead of using a stop flag, you can also use ctypes that calls the Python API to raise an exception in the thread.
import ctypes
# Other imports...
class ThreadWithException(threading.Thread):
def __init__(self, name):
threading.Thread.__init__(self)
self.name = name
def run(self):
# code here...
def get_id(self):
# returns id of the respective thread
if hasattr(self, '_thread_id'):
return self._thread_id
for id, thread in threading._active.items():
if thread is self:
return id
def raise_exception(self):
thread_id = self.get_id()
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id,
ctypes.py_object(SystemExit))
if res > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, 0)
print('Exception raise failure')
I have an application where a parent process polls a server to get download jobs and then spawns child processes to complete the job.This cycle continues until there are some jobs to be entertained by the parent process.I need to ensure that the child processes die in case the parent process crashes.I am using python as programming language.Plus let's say in case this parent process dies it is brought up by some other process. Below are some mechanisms -
1. As per multiprocessing module of python - "When a process exits, it attempts to terminate all of its daemonic child processes." So it attempts but does not guarantee.So its not reliable.
2. I can add entry in db with the mapping of child_process_id->jobId, which tells which child process is downloading which job.When the parent process comes up before polling it checks whether there is any entry of child_process_id->jobId. In case there is it kills the process with given child_process_id and sends the jobId in the next poll.
Can I have a clean way of killing the child processes when parent process crashes abruptly ?I need to have a solution compliant for windows, linux and mac. I was suggested by someone that File Locks can help me but i could not understand how file locks can help me achieve this.
#parent.py
import time
import subprocess
import file_lock_lib
import os
PARENT_LOCK_NAME = "ParentChildTask"
CHILD_LOCK_NAME = "ChildTask-%d"
def main():
#for running single parent process
fmutex, res = file_lock_lib.FileLock(PARENT_LOCK_NAME, True)
print("PARENT ID is =============" + str(os.getpid()))
if not res:
print("ParentProcess already running")
exit(1)
print("Spawing Child Processes")
r = subprocess.Popen(["python", "/Users/abi/PycharmProjects/osProgramming/parent_child/child.py"])
#aquire the lock for child process
c_lock, res = file_lock_lib.FileLock(CHILD_LOCK_NAME% r.pid)
import time
start_time = int(time.time())
while (int(time.time()) - start_time) < 180:
a = 1
1/0
#file_lock_lib.FileUnlock(fmutex)
if __name__ == '__main__':
main()
#file_lock.lib.py
import sys
import os
if sys.platform != 'win32':
'NOOB_IDS fcntl sysv_ipc sendmsg'
import fcntl
def FileLock(fname, nb=False):
if sys.platform == 'win32':
try:
sa = w32s.SECURITY_ATTRIBUTES()
sa.SetSecurityDescriptorDacl(True, None, False)
fmutex = win32event.CreateMutex(sa, False, fname)
except pywintypes.error as fault:
if fault.winerror == 5:
fmutex = win32event.OpenMutex(win32event.SYNCHRONIZE, False, fname)
else:
raise
if nb:
wtime = 0
else:
wtime = win32event.INFINITE
rc = win32event.WaitForSingleObject(fmutex, wtime)
if rc == win32event.WAIT_TIMEOUT or rc == win32event.WAIT_FAILED:
win32api.CloseHandle(fmutex)
return None, False
else:
if not fname.startswith('/'):
# Not an absolute path name, prefix in $HOME/.inSync
fname = os.path.join(os.getenv('HOME'), '.file_lock_lib', fname)
fdir = os.path.dirname(fname)
if not os.path.exists(fdir):
os.makedirs(fdir)
try:
fmutex = open(fname, "rb+")
except:
fmutex = open(fname, "wb+")
try:
flags = fcntl.LOCK_EX
if nb:
flags |= fcntl.LOCK_NB
fcntl.flock(fmutex.fileno(), flags)
except IOError:
return None, False
return fmutex, True
def FileUnlock(fmutex):
if sys.platform == 'win32':
win32event.ReleaseMutex(fmutex)
win32api.CloseHandle(fmutex)
else:
fcntl.flock(fmutex.fileno(), fcntl.LOCK_UN)
fmutex.close()
#child.py
import time
import subprocess
import file_lock_lib
import os
PARENT_LOCK_NAME = "ParentChildTask"
CHILD_LOCK_NAME = "ChildTask-%d"
def main():
print("CHILD PID =================" + str(os.getpid()))
#check if parent process is running
fmutex, res = file_lock_lib.FileLock(PARENT_LOCK_NAME, True)
if res:
file_lock_lib.FileUnlock(fmutex)
print("Parent process is not running")
exit(1)
print("Child Started")
#spwan a thread to do work
#wait on Parent
mtx, res = file_lock_lib.FileLock(CHILD_LOCK_NAME%os.getpid())
file_lock_lib.FileUnlock(mtx)
print("Child Exited as parent process was killed")
if __name__ == '__main__':
main()
I figured out a way to solve the issue. Consider the parent and child process from the code above.
Hope this solution works....
What should be written in kill_input() instead of pass to stop input() and terminate the program?
#!/usr/bin/env python3
import threading, time
running = True
def kill_input():
pass
def input_reader():
while running:
print(input())
t = threading.Thread(target = input_reader)
t.start()
time.sleep(2)
kill_input()
print('bye')
Solved with setting the thread to daemon.
t.daemon = True
t.start()
If there are no hanging non-daemon threads it will terminate automatically.
everyone.
I use the parmiko with python 3.3.3 x64 on window 7 x64, the following is my code, and the strange is I need add time.sleep(0.01) for delay before using client.close() to end the session. Otherwise, a lot of processes will be existed in the SSH host and could not end automatically.
Could anyone do me a favor to explain these?
The paramiko used for python3:
(https://travis-ci.org/nischu7/paramiko)
Here is the steps for repeat:
A)remove the time.sleep(0.01) before client.close() and run the script
B)type the password for the SSH host
C)type the first command, for example: ls -la
D)type the command very frequently, for example, keep pressing the up-arrow and enter alternatively very fast with several times
E)when using ps -ef | grep dropbear (the SSH server, I have not tested about OpenSSH), a lot of processes exists
F) type exit and Ctrl + Z to terminate the script
G)keep the time.sleep(0.01) before client.close() and run the script again
H)do the above steps of B, C, D, then check with ps -ef | grep dropbear again, only one SSH process will generate by this script.
and here is the code:
from tkinter import *
from threading import Thread
from queue import Queue, Empty
import _thread
import time
from paramiko import SSHClient, Transport, AutoAddPolicy, WarningPolicy
import getpass
def start(client):
try :
client.connect(hostname='127.0.0.1', port=22, username='ubuntu', password=pw)
return True
except Exception as e:
client.close()
print(e)
return False
def check(client,outqueue):
while start(client):
outqueue.put("Command to run: ")
cmd = input()
if cmd == "exit":
client.close()
break
chan = client.get_transport().open_session()
outqueue.put("running '%s'" % cmd)
chan.exec_command(cmd)
while True:
if chan.recv_ready():
data = chan.recv(4096).decode('ascii')
outqueue.put("recv:\n%s" %data)
if chan.recv_stderr_ready():
error = chan.recv_stderr(4096).decode('ascii')
outqueue.put("error:\n%s" %error)
if chan.exit_status_ready():
exitcode = chan.recv_exit_status()
outqueue.put("exit status: %s" %exitcode)
#print('close s')
#print(client.close())
time.sleep(0.01)
client.close()
#print('close e')
#time.sleep(0.05)
break
def reader(outqueue):
while True:
while outqueue.qsize():
try:
data = outqueue.get()
if data:
print(data)
except Excetpiton as e:
print(e)
#continue
#time.sleep(0.5)
if __name__=='__main__':
pw = getpass.getpass()
client = SSHClient()
client.set_missing_host_key_policy(WarningPolicy())
#client.set_missing_host_key_policy(AutoAddPolicy())
outqueue = Queue()
r = Thread(target=reader,args=(outqueue,))
r.daemon = True
r.start()
t = Thread(target=check,args=(client,outqueue,))
#t.daemon = True
t.start()
t.join()
It's a mistake, I start the client more than one time, so solved by the following:
from threading import Thread
from queue import Queue, Empty
import _thread
import time
from paramiko import SSHClient, Transport, AutoAddPolicy, WarningPolicy
import getpass
def start(client):
try :
client.connect(hostname='127.0.0.1', port=22, username='ubuntu', password=pw)
return True
except Exception as e:
client.close()
print(e)
return False
def check(client,outqueue):
while True:
outqueue.put("Command to run: ")
cmd = input()
if cmd == "exit":
client.close()
break
chan = client.get_transport().open_session()
outqueue.put("running '%s'" % cmd)
chan.exec_command(cmd)
while True:
if chan.recv_ready():
data = chan.recv(4096).decode('ascii')
outqueue.put("recv:\n%s" %data)
if chan.recv_stderr_ready():
error = chan.recv_stderr(4096).decode('ascii')
outqueue.put("error:\n%s" %error)
if chan.exit_status_ready():
exitcode = chan.recv_exit_status()
outqueue.put("exit status: %s" %exitcode)
#print('close s')
#print(client.close())
#time.sleep(0.01)
#client.close()
#print('close e')
#time.sleep(0.05)
break
def reader(outqueue):
while True:
while outqueue.qsize():
try:
data = outqueue.get()
if data:
print(data)
except Excetpiton as e:
print(e)
#continue
#time.sleep(0.5)
if __name__=='__main__':
pw = getpass.getpass()
client = SSHClient()
client.set_missing_host_key_policy(WarningPolicy())
#client.set_missing_host_key_policy(AutoAddPolicy())
if not start(client):
#os._exit(0)
sys.exit(0)
outqueue = Queue()
r = Thread(target=reader,args=(outqueue,))
r.daemon = True
r.start()
t = Thread(target=check,args=(client,outqueue,))
#t.daemon = True
t.start()
t.join()
I am trying to write function using Python multiprocessing that i can control it and pass "command" to cleanly terminate the process.
I looked for few examples and tried it out ,but didn't seems to work fro me
So basically i need to to run separate process function code that doing some while loop action
and when needed stop it by passing somehow command and exit
Please advice
Thanks
example 1
from multiprocessing import Process, Queue
def start_process(queue):
while True:
try:
m = queue.get()
if m == 'exit':
print ('cleaning up worker...')
# add here your cleaning up code
break
else:
print (m)
except KeyboardInterrupt:
print ('ignore CTRL-C from worker')
if __name__ == '__main__':
queue = Queue()
process = Process(target=start_process, args=(queue,))
process.start()
queue.put(12)
try:
process.join()
except KeyboardInterrupt:
print ('wait for worker to cleanup...')
queue.put('exit')
process.join()
example 2
import multiprocessing
import time
class MyProcess(multiprocessing.Process):
def __init__(self, ):
multiprocessing.Process.__init__(self)
self.exit = multiprocessing.Event()
def run(self):
while not self.exit.is_set():
pass
print ("You exited!")
def shutdown(self):
print ("Shutdown initiated")
self.exit.set()
if __name__ == "__main__":
process = MyProcess()
process.start()
print ("Waiting for a while")
time.sleep(3)
process.shutdown()
time.sleep(3)
print ("Child process state: %d" % process.is_alive())
both examples works fine for me - perhaps you're misunderstanding how they should work?
in the first example, when the main thread runs, it starts the child and sends 12. then it waits to join the child. at that point everything is stalled because the child is waiting for 'exit'. but if you then hit ctrl-C the 'exit' is sent, the child exits, and the second join is successful:
> python3.3 example1.py
12
^Cignore CTRL-C from worker
wait for worker to cleanup...
cleaning up worker...
>
if you just want the parent to send 'exit' and then for everything to end, use:
def start_process(queue):
while True:
try:
m = queue.get()
if m == 'exit':
print ('cleaning up worker...')
# add here your cleaning up code
break
else:
print (m)
except KeyboardInterrupt:
print ('ignore CTRL-C from worker')
print('goodbye cruel world')
if __name__ == '__main__':
queue = Queue()
process = Process(target=start_process, args=(queue,))
process.start()
queue.put(12)
print ('sending exit')
queue.put('exit')
process.join()
which gives:
> python3.3 my-example.py
sending exit
12
cleaning up worker...
goodbye cruel world
>
your second example also works (with the indentation fixed):
> python3.3 example2.py
Waiting for a while
Shutdown initiated
You exited!
Child process state: 0
>
(just wait a little). not sure what else you could have expected here.