Program keeps running if TK window closed too fast - multithreading

Python GUI with Tk that a user adds servers to and it displays port and resource information about the server. Each server line is running in its own thread and loops with a myQueue.put(executor.submit(lambda: <function here>)). I can load up 15 servers and then close the window. Sometimes the python.exe closes and the IDE shows the application end with exit code 0. Sometimes I close the window and the IDE and task manager show that the program is still running. After a while the pycharm console prints "main thread is not in main loop" and nothing else happens after that. I thought using a queue with the threads would keep this from happening but something is going wrong.
Workflow for the below code: user adds server info in a popup window> that is run through creation function> information is passed off to threadmaker function that watches an indicator and reruns the SSH session to query info when the previous query finishes.
main = Tk()
myQueue = queue.Queue()
executor = concurrent.futures.ThreadPoolExecutor(max_workers=16)
def creation(server, nickname, user, passw):
#Create label for server, nickname, user, password here and place them on the main window
myQueue.put(executor.submit(lambda: threadmaker(server, nickname, user, passw)))
def threadmaker(server, nickname, user, passw):
# this function loops until indicator is 0 and then runs refresh function
global workinglist
if 'normal' == main.state():
if workinglist[server + "counter"] == "1":
time.sleep(3)
myQueue.put(executor.submit(threadmaker(server, nickname, user, passw)))
if workinglist[server + "counter"] == "0":
myQueue.put(executor.submit(refresh(server, nickname, user, passw)))
time.sleep(3)
myQueue.put(executor.submit(threadmaker(server, nickname, user, passw)))
if 'normal' != main.state():
print(main.state())
pass
def refresh(server, nickname, user, passw):
global workinglist
workinglist[server + "counter"] = "1"
if 'normal' == main.state():
if 'normal' == main.state():
try:
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(str(server), username=str(user), password=str(passw), timeout=10, allow_agent=False, look_for_keys=False)
stdin, stdout, stderr = ssh.exec_command("DF -H")
type(stdin)
test2 = stdout.readlines()
stdin.flush()
stdin.close()
ssh.close()
#<< do soething with the test2 value>>
except Exception as E:
print(E)
if 'normal' == main.state():
try:
#<< another ssh query >>
except Exception as E:
pass
workinglist[server + "counter"] = "0"
main.mainloop()
Am I handling the threads or the queue incorrectly?
I've added print(threading.currentThread().getName(), 'Starting') to the beginning of the refresh function and the running thread number never gets over the number of servers added + 1 for the main thread. So if Im handling all the threads with my threadpool what is hanging up? I assume something with the ssh attempt.

Related

Why at close the client socket, his process changes the status 'Z' (Zombie)?

Explication
I am doing a architecture server-multiclient with sockets in python3.
For this, I use multiprocessing library.
The code follow, creates a server listening clients connections:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("",PORT))
sock.listen(CLIENTS)
print(logFile().message(f"running ClassAdmin server, listen {CLIENTS} clients by port {PORT}...",True,"INFO"))
sockSSL = context.wrap_socket(sock,server_side=True)
while sockSSL:
connection, address = sockSSL.accept()
eventChildStop = multiprocessing.Event()
subprocess = multiprocessing.Process(target=ClientListener, name="client", args=(connection, address))
subprocess.start()
In the code above, each client is executed in a process child. With multiprocessing.Process()
This run the class ClientListener.
class ClientListener:
def __init__(self,conn,addr):
try:
self.conn, self.addr = conn, addr
self.nick = ""
self.__listenData()
except (KeyboardInterrupt,SystemExit) as err:
print(logFile().message(f"The host {self.nick} ({self.addr[0]}:{self.addr[1]}) left", True, "INFO"))
except BaseException as err:
type, object, traceback = sys.exc_info()
file = traceback.tb_frame.f_code.co_filename
line = traceback.tb_lineno
print(logFile().message(f"{err} in {file}:{line}", True, "ERROR"))
finally:
try:
ListClients().remove(self.conn)
self.conn.close()
except:
None
finally:
Client(self.conn,self.addr).registre(self.nick,"DISCONNECTED",False)
def __listenData(self):
while True:
data = self.conn.recv(1024)
text = data.decode('utf-8')
if text.startswith("sig."):
exec(f"raise {text.split('.')[1]}")
elif data:
if text.startswith("HelloServer: "):
self.nick = text.replace("HelloServer: ","")
client = Client(self.conn,self.addr).registre(self.nick, "CONNECTED", False)
if client==False:
self.conn.send(b"sig.SystemExit(-5000,'The nick exists and is connected',True)")
else:
print(logFile().message(f"The host {self.nick} ({self.addr[0]}:{self.addr[1]}) is connected", True, "INFO"))
ListClients().add(self.conn)
else:
print(data)
In the __init__() runs the method __listenData(), that, this method is responsible of to work with data sent by the client at server.
In the __init__() I work with exceptions for show information at close the client.
try:
#{...}
finally:
try:
ListClients().remove(self.conn)
self.conn.close()
except:
None
finally:
Client(self.conn,self.addr).registre(self.nick,"DISCONNECTED",False)
#HERE, Can I close the current child process?
In this try executes a finally, because always will delete the client of clients list, and if there is a connection will close it.
The problem
My problem is the follow:
I run the server....
In the client machine, I run the client....
When I had connected the client at server, in the server process had created a child process.
Now the client closed, so in the server, if we show the child process his status changed to Z, is means, Zombie
My question, is...
How to close this child process? As the client is running in a child process started by multiprocessing.Process(). I must be close it with the method terminate() of multiprocessing... I think that is this the solution.
Solution possible?
I thought in...
Add other child process listening a multiprocessing.Event() in the root:
while sockSSL:
connection, address = sockSSL.accept()
eventChildStop = multiprocessing.Event()
subprocess = multiprocessing.Process(target=ClientListener, name="client", args=(connection, address,eventChildStop))
subprocess.start()
multiprocessing.Process(target=ClientListener.exitSubprocess, name="exitChildProcess",args=(eventChildStop, subprocess)).start()
time.sleep(1)
In the class listenerClients I add the argument event in __init__():
class ClientListener:
def __init__(self,conn,addr,event):
I add the static method exitSubprocess(). This method in teory terminate the child process (this is not so):
#staticmethod
def exitSubprocess(event,process):
while True:
if event.is_set():
print(process.id)
process.terminate()
break
time.sleep(.5)
But, this is not so, the result is same. The childs process (one is the method static exitSubprocess. the first is the client process) are status Zombie. Why...?
Somebody understand what is happening?
I appreciate someone response. Thank you by your attention.
Solution
Hi!! The problem was solved!!
How to I solved it?
I it that did is, after of start the client's child process, start a thread in the parent process for, when the child process go to exit, before of exit, the thread joins the child process with the parent process and the thread exit successfully.
Finally, the client's child process exits.
steps to follow
first, in the server's root code added:
# This thread is responsible of close the client's child process
threading.Thread(target=ClientListener.exitSubprocess,name="closeChildProcess",args=(eventChildStop,subprocess,)).start()
result complete:
while sockSSL:
connection, address = sockSSL.accept()
eventChildStop = multiprocessing.Event()
subprocess = multiprocessing.Process(target=ClientListener, name="client", args=(connection, address,eventChildStop))
# This thread is responsible of close the client's child process
threading.Thread(target=ClientListener.exitSubprocess,name="closeChildProcess",args=(eventChildStop,subprocess,)).start()
subprocess.start()
time.sleep(1)
After in the new exitSubprocess method, I changed:
if event.is_set():
print(process.id)
process.terminate()
break
by
if event.is_set():
process.join()
break
result complete:
# This method get as argument the process child. For join it at parent process
#staticmethod
def exitSubprocess(event,process):
while True:
if event.is_set():
process.join()
break
time.sleep(.5)
Important, in the client's child process in his last finally add a time.sleep(1) of 1 second.
For give time at thread to join the client's child process to parent process
class ClientListener:
def __init__(self,conn,addr,event):
try:
self.conn, self.addr = conn, addr
self.nick = ""
self.__listenData()
except (KeyboardInterrupt,SystemExit) as err:
print(logFile().message(f"The host {self.nick} ({self.addr[0]}:{self.addr[1]}) left", True, "INFO"))
except BaseException as err:
type, object, traceback = sys.exc_info()
file = traceback.tb_frame.f_code.co_filename
line = traceback.tb_lineno
print(logFile().message(f"{err} in {file}:{line}", True, "ERROR"))
finally:
try:
ListClients().remove(self.conn)
self.conn.close()
except:
None
finally:
Client(self.conn,self.addr).registre(self.nick,"DISCONNECTED",False)
event.set()
# This will delay 1 second to close the proccess, for this gives time at exitSubprocess method to join the client's child process with the parent process
time.sleep(1)
Thank you very much by your attention and time.

wait till command completed in paramiko invoke_shell() [duplicate]

This question already has answers here:
Execute multiple dependent commands individually with Paramiko and find out when each command finishes
(1 answer)
Executing command using "su -l" in SSH using Python
(1 answer)
Closed 5 days ago.
I wanted to wait the given command execution has been completed on remote machines. this case it just executed and return and not waiting till its completed.
import paramiko
import re
import time
def scp_switch(host, username, PasswdValue):
ssh = paramiko.SSHClient()
try:
# Logging into remote host as my credentials
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, username=username, password=PasswdValue ,timeout=30)
try:
# switcing to powerbroker/root mode
command = "pbrun xyz -u root\n"
channel = ssh.invoke_shell()
channel.send(command)
time.sleep(3)
while not re.search('Password',str(channel.recv(9999), 'utf-8')):
time.sleep(1)
print('Waiting...')
channel.send("%s\n" % PasswdValue)
time.sleep(3)
#Executing the command on remote host with root (post logged as root)
# I dont have any specific keyword to search in given output hence I am not using while loop here.
cmd = "/tmp/slp.sh cool >/tmp/slp_log.txt \n"
print('Executing %s' %cmd)
channel.send(cmd) # its not waiting here till the process completed,
time.sleep(3)
res = str(channel.recv(1024), 'utf-8')
print(res)
print('process completed')
except Exception as e:
print('Error while switching:', str(e))
except Exception as e:
print('Error while SSH : %s' % (str(e)))
ssh.close()
""" Provide the host and credentials here """
HOST = 'abcd.us.domain.com'
username = 'heyboy'
password = 'passcode'
scp_switch(HOST, username, password)
As per my research, it will not return any status code, is there any logic to get the return code and wait till the process completed?
I know this is an old post, but leaving this here in case someone has the same problem.
You can use an echo that will run in case your command executes successfully, for example if you are doing an scp ... && echo 'transfer complete', then you can catch this output with a loop
while True:
s = chan.recv(4096)
s = s.decode()
if 'transfer done' in s:
break
time.sleep(1)

ThreadPoolWorkers that will not die if they create a thread

In my Slack RTM message handler function, I am calling any one of a set of functions. Alone, each message event handler blocks the next, so I began to call these functions in new threads. After doing this, I cannot exit() my program, I am left with a ThreadPoolExecutor-x_x for each of the message handlers that were called.
Even if I set my threads to setDaemon=True and .join() to them, the ThreadPoolExecutors remain.
def exitFunc(sendfn, channel, thread, user, text, groups, groupdict, meta):
reply = 'bye'
sendfn(channel=channel, message = reply)
for thread in threading.enumerate():
print(thread.getName())
exit()
Produces this, and hangs:
MainThread
ThreadPoolExecutor-0_0
ThreadPoolExecutor-0_1
ThreadPoolExecutor-3_0
ThreadPoolExecutor-3_1
ThreadPoolExecutor-3_2
exitFunc
ThreadPoolExecutor-3_3
When I don't run the function as a new thread, these ThreadPoolExecutors seem to hang around but they do let my program exit.
Spawning the threads:
def __init__(self, token, username = None, icon_emoji = None, security = None):
...
slack.RTMClient.run_on(event='message')(self.readMessage)
def readMessage(self, **payload):
...
thread = Thread(target=fn['fn'], kwargs = fnargs)
thread.start()
the function passed in as sendfn:
def sendMessage(self, channel, message, thread = None, username = None, icon_emoji = None):
if username == None:
username = self.default_username
if icon_emoji == None:
icon_emoji = self.default_icon_emoji
print('{} Send to {}: {}'.format(str(datetime.now()), channel, message))
self.webclient.chat_postMessage(
channel=channel,
text=message,
username=username,
icon_emoji=icon_emoji,
thread_ts=thread
)
I am using slackclient 2.0.1 python package.
This was an XY problem.
It wasn't that my threads weren't dying, it was that I was trying to exit() from a thread.
Since exit() ultimately “only” raises an exception, it will only exit
the process when called from the main thread, and the exception is not
intercepted.
https://docs.python.org/2/library/sys.html#sys.exit
I moved my exit to the main thread and it exited fine.

Python 3.4 - How to 'run' another script python script continuously, How to pass http get / post to socket

This question is two-fold.
1. So I need to run code for a socket server that's all defined and created in another.py, Clicking run on PyCharm works just fine, but if you exec() the file it just runs the bottom part of the code.
There are a few answers here but they are conflicting and for Python 2.
From what I can gather there are three ways:
- Execfile(), Which I think is Python 2 code.
- os.system() (But I've seen it be said that it's not correct to pass to the OS for this)
- And subprocess.Popen (unsure how to use this either)
I need this to run in the background, it is used to create threads for sockets for the recv portion of the overall program and listen on those ports so I can input commands to a router.
This is the complete code in question:
import sys
import socket
import threading
import time
QUIT = False
class ClientThread(threading.Thread): # Class that implements the client threads in this server
def __init__(self, client_sock): # Initialize the object, save the socket that this thread will use.
threading.Thread.__init__(self)
self.client = client_sock
def run(self): # Thread's main loop. Once this function returns, the thread is finished and dies.
global QUIT # Need to declare QUIT as global, since the method can change it
done = False
cmd = self.readline() # Read data from the socket and process it
while not done:
if 'quit' == cmd:
self.writeline('Ok, bye. Server shut down')
QUIT = True
done = True
elif 'bye' == cmd:
self.writeline('Ok, bye. Thread closed')
done = True
else:
self.writeline(self.name)
cmd = self.readline()
self.client.close() # Make sure socket is closed when we're done with it
return
def readline(self): # Helper function, read up to 1024 chars from the socket, and returns them as a string
result = self.client.recv(1024)
if result is not None: # All letters in lower case and without and end of line markers
result = result.strip().lower().decode('ascii')
return result
def writeline(self, text): # Helper func, writes the given string to the socket with and end of line marker at end
self.client.send(text.strip().encode("ascii") + b'\n')
class Server: # Server class. Opens up a socket and listens for incoming connections.
def __init__(self): # Every time a new connection arrives, new thread object is created and
self.sock = None # defers the processing of the connection to it
self.thread_list = []
def run(self): # Server main loop: Creates the server (incoming) socket, listens > creates thread to handle it
all_good = False
try_count = 0 # Attempt to open the socket
while not all_good:
if 3 < try_count: # Tried more than 3 times without success, maybe post is in use by another program
sys.exit(1)
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Create the socket
port = 80
self.sock.bind(('127.0.0.1', port)) # Bind to the interface and port we want to listen on
self.sock.listen(5)
all_good = True
break
except socket.error:
print('Socket connection error... Waiting 10 seconds to retry.')
del self.sock
time.sleep(10)
try_count += 1
print('Server is listening for incoming connections.')
print('Try to connect through the command line with:')
print('telnet localhost 80')
print('and then type whatever you want.')
print()
print("typing 'bye' finishes the thread. but not the server",)
print("eg. you can quit telnet, run it again and get a different ",)
print("thread name")
print("typing 'quit' finishes the server")
try:
while not QUIT:
try:
self.sock.settimeout(0.500)
client = self.sock.accept()[0]
except socket.timeout:
time.sleep(1)
if QUIT:
print('Received quit command. Shutting down...')
break
continue
new_thread = ClientThread(client)
print('Incoming Connection. Started thread ',)
print(new_thread.getName())
self.thread_list.append(new_thread)
new_thread.start()
for thread in self.thread_list:
if not thread.isAlive():
self.thread_list.remove(thread)
thread.join()
except KeyboardInterrupt:
print('Ctrl+C pressed... Shutting Down')
except Exception as err:
print('Exception caught: %s\nClosing...' % err)
for thread in self.thread_list:
thread.join(1.0)
self.sock.close()
if "__main__" == __name__:
server = Server()
server.run()
print('Terminated')
Notes:
This is created in Python 3.4
I use Pycharm as my IDE.
One part of a whole.
2. So I'm creating a lightning detection system and this is how I expect it to be done:
- Listen to the port on the router forever
The above is done, but the issue with this is described in question 1.
- Pull numbers from a text file for sending text message
Completed this also.
- Send http get / post to port on the router
The issue with this is that i'm unsure how the router will act if I send this in binary form, I suspect it wont matter, the input commands for sending over GSM are specific. Some clarification may be needed at some point.
- Recieve reply from router and exception manage
- Listen for relay trip for alarm on severe or close strike warning.
- If tripped, send messages to phones in storage from text file
This would be the http get / post that's sent.
- Wait for reply from router to indicate messages have been sent, exception handle if it's not the case
- Go back to start
There are a few issues I'd like some background knowledge on that is proving hard to find via the old Google and here on the answers in stack.
How do I grab the receive data from the router from another process running in another file? I guess I can write into a text file and call that data but i'd rather not.
How to multi-process and which method to use.
How to send http get / post to socket on router, post needed occording to the router manual is as follows: e.g. "http://192.168.1.1/cgi-bin/sms_send?number=0037061212345&text=test"
Notes: Using Sockets, threading, sys and time on Python 3.4/Pycharm IDE.
Lightning detector used is LD-250 with RLO Relay attached.
RUT500 Teltonica router used.
Any direction/comments, errors spotted, anything i'm drastically missing would be greatly appreciated! Thank you very much in advance :D constructive criticism is greatly encouraged!
Okay so for the first part none of those suggested in the OP were my answer. Running the script as is from os.system(), exec() without declaring a new socket object just ran from __name__, this essentially just printed out "terminated", to get around this was simple. As everything was put into a classes already, all I had to do is create a new thread. This is how it was done:
import Socketthread2
new_thread = Socketthread2.Server() # Effectively declaring a new server class object.
new_thread.run()
This allowed the script to run from the beginning by initialising the code from the start in Socket, which is also a class of Clientthread, so that was also run too. Running this at the start of the parent program allowed this to run in the background, then continue with the new code in parent while the rest of the script was continuously active.

a single tkinter button for multi function

I am trying to implement a single tkinter button which should "tail -f" some log file in a remote server. It should also stop the process locally AND kill the remote tail process when it is clicked for the second time. I tried this by not using tkinter with success.It stops when ctrl c is pressed.
When the tail(button3) button is clicked it hangs and it waits for the job completed. It is not accepting any new events until then. I know tkinter is single threaded and believe this is causing the issue. Code is below, appreciate any help.
from Tkinter import *
from re import *
import paramiko
import time
import select
class MyApp:
def __init__(self, parent):
self.myParent = parent
self.myContainer1 = Frame(parent,width=500,height=500)
self.myContainer1.pack()
#------------------ LABEL #1 for MAC ------------------------------------
#mac label field
self.label = Label (self.myContainer1, text='enter MAC').pack(side=TOP,padx=10,pady=10)
#------------------ ENTRY FIELD #1 for MAC ------------------------------------
#mac entry field
mac_var=StringVar()
self.entry = Entry(self.myContainer1,textvariable= mac_var ,width=10)
self.entry.pack(side=TOP,padx=100,pady=10)
mac_var.set("XXXXX")
s=mac_var.get()
#------------------ LABEL #2 for MAC OWNER ------------------------------------
#name label field
self.label = Label (self.myContainer1, text='enter MAC owner').pack(side=TOP,padx=10,pady=10)
#------------------ ENTRY #2 for MAC OWNER ------------------------------------
#name entry field
name_var=StringVar()
self.entry = Entry(self.myContainer1,textvariable= name_var ,width=25)
self.entry.pack(side=TOP,padx=100,pady=10)
name_var.set("name surname")
s=name_var.get()
#------------------ BUTTON #3 ------------------------------------
# event binding
self.button3 = Button(self.myContainer1)
self.button3.bind("<Button-1>", self.button3Click)
self.button3.configure(text="tail", background="purple")
self.button3.pack(side=LEFT)
def button3Click(self, event):
if self.button3["background"] == "purple":
self.button3.configure(text="Cancel Tail", background="yellow")
self.tail_flag=True
print "tail_flag is" , self.tail_flag
self.taillogg()
else:
self.button3.configure(text="Tail", background="purple")
self.canceltaillogg()
#root.destroy()
def canceltaillogg(self):
self.tail_flag=False
print "tail_flag is" , self.tail_flag
def taillogg(self):
server, port, username, password = ('myserver', 22, 'root', 'passw')
paramiko.util.log_to_file("C:\\log_transport_paramiko.txt")
nbytes = 100
trans = paramiko.Transport((server, port))
trans.connect(username = username, password = password)
trans.set_keepalive(1) # when ssh dies (with Ctrl C) processes spawned by that ssh connections will die, too ( in one sec)
sess = trans.open_channel("session")
#Once the channel is established, we can execute only one command.
#To execute another command, we need to create another channel
sess.exec_command('tail -f /usr/local/var/log/radius/radius.log')
timeout = 10
timestart =time.time()
while True:
try:
rl, wl, xl = select.select([sess],[],[],0.0)
if len(rl) > 0: #stdout
print sess.recv(1024)
if time.time()-timestart > timeout or self.tail_flag==False :
print "timeout 30 sec"
trans.close()
sess.close()
break
except KeyboardInterrupt :
print("Caught Control-C")
trans.close()
sess.close()
break
"""if self.tail_flag==False:
break"""
print ("\n")*100 # clear the screen
print "Starting program"
root = Tk()
root.title('MAC REGISTRATION APPLET')
myapp = MyApp(root)
print ("Ready to start executing the event loop.")
root.mainloop()
print ("Finished executing the event loop.")
Try this - see if it gives you the behavior your want.
self.button3.bind("<Button-1>", lambda:self.root.after(0, self.button3Click)
Also, this is unrelated, but you don't have to call "bind" on your button - it has a built in callback attribute called command:
self.button3 = Button(self.myContainer1, command=lambda:self.root.after(0, self.button3Click))
# self.button3.bind("<Button-1>", self.button3Click) (don't need this)
Then you'll want to remove the event argument from the button3Click function, since the button callback doesn't receive any arguments.

Resources