the SSHClient.Close() method of paramiko need to wait? - python-3.x

everyone.
I use the parmiko with python 3.3.3 x64 on window 7 x64, the following is my code, and the strange is I need add time.sleep(0.01) for delay before using client.close() to end the session. Otherwise, a lot of processes will be existed in the SSH host and could not end automatically.
Could anyone do me a favor to explain these?
The paramiko used for python3:
(https://travis-ci.org/nischu7/paramiko)
Here is the steps for repeat:
A)remove the time.sleep(0.01) before client.close() and run the script
B)type the password for the SSH host
C)type the first command, for example: ls -la
D)type the command very frequently, for example, keep pressing the up-arrow and enter alternatively very fast with several times
E)when using ps -ef | grep dropbear (the SSH server, I have not tested about OpenSSH), a lot of processes exists
F) type exit and Ctrl + Z to terminate the script
G)keep the time.sleep(0.01) before client.close() and run the script again
H)do the above steps of B, C, D, then check with ps -ef | grep dropbear again, only one SSH process will generate by this script.
and here is the code:
from tkinter import *
from threading import Thread
from queue import Queue, Empty
import _thread
import time
from paramiko import SSHClient, Transport, AutoAddPolicy, WarningPolicy
import getpass
def start(client):
try :
client.connect(hostname='127.0.0.1', port=22, username='ubuntu', password=pw)
return True
except Exception as e:
client.close()
print(e)
return False
def check(client,outqueue):
while start(client):
outqueue.put("Command to run: ")
cmd = input()
if cmd == "exit":
client.close()
break
chan = client.get_transport().open_session()
outqueue.put("running '%s'" % cmd)
chan.exec_command(cmd)
while True:
if chan.recv_ready():
data = chan.recv(4096).decode('ascii')
outqueue.put("recv:\n%s" %data)
if chan.recv_stderr_ready():
error = chan.recv_stderr(4096).decode('ascii')
outqueue.put("error:\n%s" %error)
if chan.exit_status_ready():
exitcode = chan.recv_exit_status()
outqueue.put("exit status: %s" %exitcode)
#print('close s')
#print(client.close())
time.sleep(0.01)
client.close()
#print('close e')
#time.sleep(0.05)
break
def reader(outqueue):
while True:
while outqueue.qsize():
try:
data = outqueue.get()
if data:
print(data)
except Excetpiton as e:
print(e)
#continue
#time.sleep(0.5)
if __name__=='__main__':
pw = getpass.getpass()
client = SSHClient()
client.set_missing_host_key_policy(WarningPolicy())
#client.set_missing_host_key_policy(AutoAddPolicy())
outqueue = Queue()
r = Thread(target=reader,args=(outqueue,))
r.daemon = True
r.start()
t = Thread(target=check,args=(client,outqueue,))
#t.daemon = True
t.start()
t.join()

It's a mistake, I start the client more than one time, so solved by the following:
from threading import Thread
from queue import Queue, Empty
import _thread
import time
from paramiko import SSHClient, Transport, AutoAddPolicy, WarningPolicy
import getpass
def start(client):
try :
client.connect(hostname='127.0.0.1', port=22, username='ubuntu', password=pw)
return True
except Exception as e:
client.close()
print(e)
return False
def check(client,outqueue):
while True:
outqueue.put("Command to run: ")
cmd = input()
if cmd == "exit":
client.close()
break
chan = client.get_transport().open_session()
outqueue.put("running '%s'" % cmd)
chan.exec_command(cmd)
while True:
if chan.recv_ready():
data = chan.recv(4096).decode('ascii')
outqueue.put("recv:\n%s" %data)
if chan.recv_stderr_ready():
error = chan.recv_stderr(4096).decode('ascii')
outqueue.put("error:\n%s" %error)
if chan.exit_status_ready():
exitcode = chan.recv_exit_status()
outqueue.put("exit status: %s" %exitcode)
#print('close s')
#print(client.close())
#time.sleep(0.01)
#client.close()
#print('close e')
#time.sleep(0.05)
break
def reader(outqueue):
while True:
while outqueue.qsize():
try:
data = outqueue.get()
if data:
print(data)
except Excetpiton as e:
print(e)
#continue
#time.sleep(0.5)
if __name__=='__main__':
pw = getpass.getpass()
client = SSHClient()
client.set_missing_host_key_policy(WarningPolicy())
#client.set_missing_host_key_policy(AutoAddPolicy())
if not start(client):
#os._exit(0)
sys.exit(0)
outqueue = Queue()
r = Thread(target=reader,args=(outqueue,))
r.daemon = True
r.start()
t = Thread(target=check,args=(client,outqueue,))
#t.daemon = True
t.start()
t.join()

Related

Shutdown during recv on python socket

During the execution of this code, it blocks on the join
I have a TCP server running on ("127.0.0.1", 1777) for the test
I tried using directly the socket with recv, but the result is the same
Any idea, why the shutdown on READ doesn't interrupt the read ?
import socket
from threading import Thread
from time import sleep
class Parser(Thread):
rbufsize = 4096
wbufsize = 4096
encoding="utf-8"
new_line = "\n"
def __init__(self):
super().__init__()
self._socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
self._wfile = None
self._rfile = None
def run(self):
self._socket.connect(("127.0.0.1", 1777))
self._rfile = self._socket.makefile('rb', self.rbufsize, encoding=self.encoding, newline=self.new_line)
self._wfile = self._socket.makefile('wb', self.wbufsize, encoding=self.encoding, newline=self.new_line)
while True:
data = self._rfile.readline()
if not data:
break
self._handle_data(data)
self._cleanup()
def _cleanup(self):
"""
Fermeture
"""
if not self._wfile.closed:
try:
self._wfile.flush()
except socket.error:
# A final socket error may have occurred here, such as
# the local error ECONNABORTED.
pass
self._socket.shutdown(socket.SHUT_RDWR)
self._wfile.close()
self._rfile.close()
self._socket.close()
def stop(self):
self._socket.shutdown(socket.SHUT_RD)
if __name__ == "__main__":
p = Parser()
p.start()
sleep(5)
p.stop()
print("start join")
p.join()

Python CMD2 Interrupt printing promp

I am using CMD2 to create a server. I have broken my code down to the smallest bit of code that still produces the problem.
import socket
import _thread
from cmd2 import *
def grab_data(conn):
while True:
try:
data = conn.recv(1024)
print(data)
except:
print("disconnected.")
break
def grab_client(sock):
while True:
conn, addr = sock.accept()
print("New connection")
_thread.start_new_thread(grab_data, (conn,))
def start_conn(ip, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((ip, int(port)))
sock.listen(10)
print("Socket listening")
_thread.start_new_thread(grab_client, (sock,))
class CLI(Cmd):
def __init__(self):
Cmd.__init__(self)
self.prompt = "Prompt> "
def do_listen(self, arg):
start_conn('0.0.0.0', '4446')
def emptyline(self):
pass
def do_quit(self, arg):
return True
cli = CLI()
cli.cmdloop("Welcome to the server.")
The issue I run into is when a client connects. It does not reprint the prompt. It hangs at a empty line with just the cursor. I am not sure how to get the prompt to print back.
You're blocking trying to read 1024 bytes, so it's got to wait until that entire buffer is filled. To do proper line-based buffering is a bit tricky, but a simple (albeit non-performant) implementation is to read a character at a time and check if it's a newline:
line = ""
while True:
data = conn.recv(1)
line += data
if data in "\n\r":
break
(This is not great code, but let's see if that solves the problem and we can improve it.)

How to find why thread is suspended when using multiprocessing or bypass that?

I use feedparser to get rss feeds from some sites, my core code is like this:
def parseworker(procnum, result_queue, return_dict, source_link):
try:
data = feedparser.parse(source_link)
return_dict[procnum] = data
except Exception as e:
print(str(e))
result_queue.put(source_link + 'grabbed')
def infoworker(procnum, timeout, result_queue, source_name, source_link):
text = 'recheck ' + source_name + ': ' + '...'
progress = ''
for x in range(timeout):
progress += '.'
sys.stdout.write('\r' + text + progress)
sys.stdout.flush()
time.sleep(1)
result_queue.put('time out')
def parsecaller(link, timeout, timestocheck):
return_dict = multiprocessing.Manager().dict()
result_queue = multiprocessing.Queue()
counter = 1
jobs = []
result = []
while not (counter > timestocheck):
p1 = multiprocessing.Process(target=infoworker, args=(11, timeout, result_queue, source_name, link))
p2 = multiprocessing.Process(target=parseworker, args=(22, result_queue, return_dict, link))
jobs.append(p1)
jobs.append(p2)
p1.start()
p2.start()
result_queue.get()
p1.terminate()
p2.terminate()
p1.join()
p2.join()
result = return_dict.values()
if not result or result[0].bozo:
print(' bad - no data', flush=True)
result = -1
else:
print(' ok ', flush=True)
result = result[0]
break
counter += 1
if result == -1:
raise bot_exceptions.ParserExceptionData()
elif result == -2:
raise bot_exceptions.ParserExceptionConnection()
else:
return result
if __name__ == '__main__':
multiprocessing.freeze_support()
multiprocessing.set_start_method('spawn')
try:
data = parsecaller(source_link, timeout=wait_time, timestocheck=check_times)
except Exception as e:
print(str(e))
continue
It works good, but after some random time goes into suspended state and does nothing - like infinite bootloop. It may suspend after 4 hours or 3 days, that's random.
I try to solve that problem by multiprocessing: use main process with timer like infoworker. When infoworker stops, it will put "result" to queue and by that will call result_queue.get() in parsecaller which after continues it and terminates both processes. But it does not work. Today, after 11 hours I got my code in suspended state in multiprocessing managers.py:
def serve_forever(self):
'''
Run the server forever
'''
self.stop_event = threading.Event()
process.current_process()._manager_server = self
try:
accepter = threading.Thread(target=self.accepter)
accepter.daemon = True
accepter.start()
try:
while not self.stop_event.is_set():
self.stop_event.wait(1)
except (KeyboardInterrupt, SystemExit):
pass
finally:
if sys.stdout != sys.__stdout__: # what about stderr?
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
sys.exit(0)
for all time it was in:
while not self.stop_event.is_set():
self.stop_event.wait(1)
I thing that somewhere or GIL does not allow any other threads to work in processes or feedparser goes into loop. And of course it gets suspended with any random RSS sources.
My 'environment':
Mac OS 10.12.6 (also was that situation on win7 and win 10)
Python 3.7.0 (also wat that situation on 3.6.2, 3.6.5)
Pycharm 2017.2.2
My questions:
How to understand why it gets suspended (what to do, any recipe)?
How to bypass that state (what to do, any recipe)?

how to stop input() from other thread in python3?

How can I stop or reach to avoid hanging of the following:
import threading
mythread = Threading(target = input_read, args = (callback))
mythread.start()
running = True
def callback(msg):
if msg == 'stop': running = False
print(msg)
def input_read(callback):
while running:
callback(input())
while running:
try:
# some other code
except KeyboardInterrupt:
pass
Somehow the input should be stopped, time outted, killed, anything..
Solved with setting the thread to daemon:
mythread.daemon = True
mythread.start()

Stop server from client's thread / Modify server's variable from client's thread

I would like to write an application that could stop the server based on client's input. The server is multi-threaded and I do not understand how can I do this.
Basically, I described my problem here: Modify server's variable from client's thread (threading, python).
However, this is the Python solution, not the general solution I could implement in Java, C, C++, etc.
I need to close other clients, when one of them guesses the number, but the server should be still alive, ready for the new game.
Can I ask for some advices, explanations?
I tried this (still do not know how to port it to C or Java), but it lets the clients send the numbers even if one of them just guesses it. It seems to me that kill_em_all does not do it's job, it does not close all the connections and does not disconnect the other clients as it should. How to improve this?
#!/usr/bin/env python
from random import randint
import socket, select
from time import gmtime, strftime
import threading
import sys
class Handler(threading.Thread):
def __init__(self, connection, randomnumber, server):
threading.Thread.__init__(self)
self.connection = connection
self.randomnumber = randomnumber
self.server = server
def run(self):
while True:
try:
data = self.connection.recv(1024)
if data:
print(data)
try:
num = int(data)
if self.server.guess(num) :
print 'someone guessed!'
self.server.kill_em_all()
break
else :
msg = "Try again!"
self.connection.sendall(msg.encode())
except ValueError as e:
msg = "%s" % e
self.connection.sendall(msg.encode())
else:
msg = "error"
self.connection.send(msg.encode())
except socket.error:
break
self.connection.close()
def send(self, msg):
self.connection.sendall(msg)
def close(self):
self.connection.close()
class Server:
randnum = randint(1,100)
def __init__(self, ip, port):
self.ip = ip
self.port = port
self.address = (self.ip, self.port)
self.server_socket = None
def guess(self, no):
if self.randnum == no:
self.randnum = randint(1, 100)
print("New number is ", self.randnum )
result = True
else:
result = False
return result
def kill_em_all(self):
for c in self.clients:
c.send("BYE!")
c.close()
def run(self):
try:
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.bind((self.ip, self.port))
self.server_socket.listen(10)
self.clients = []
print('Num is %s' % self.randnum)
while True:
connection, (ip, port) = self.server_socket.accept()
c = Handler(connection, self.randnum, self)
c.start()
self.clients.append(c)
except socket.error as e:
if self.server_socket:
self.server_socket.close()
sys.exit(1)
if __name__ == '__main__':
s = Server('127.0.0.1', 7777)
s.run()
Client code:
import socket
import sys
port = 7777
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostname()
s.connect(('127.0.0.1', port))
except socket.error, (value, message):
if s:
s.close()
print "Could not open socket: " + message
sys.exit(1)
while True:
data = raw_input('> ')
s.sendall(data)
data = s.recv(1024)
if data:
if data == "BYE!":
break
else:
print "Server sent: %s " % data
s.close()
Log in. Using whatever protocol you have, send the server a message telliing it to shut down. In the server, terminate your app when you get the shutdown message. That's it. It's not a problem with any OS I have used - any thread of a process can terminate that process.

Resources