How to run all tests in serial per host while each host is in a single thread.
maybe some code will explain what I'm trying to do.
conftest.py
from paramiko import SSHClient, AutoAddPolicy
from shared import Host, HOSTS
from pytest import fixture
from typing import Dict
FMT = '[%(name)s %(levelname)-7s(%(lineno)-4d)][%(threadName)s] %(message)s'
#fixture(scope="session")
def _ssh_con(request) -> Dict[Host, SSHClient]:
print('>>>>>>>> setup')
cons: Dict[Host, SSHClient] = {}
for item in request.session.items:
host = item.callspec.params.get("host")
if host not in cons:
con = SSHClient()
con.set_missing_host_key_policy(AutoAddPolicy())
con.connect(host.ip, host.port, host.username, host.password)
cons[host] = con
print('>>>>>>>> setup done')
yield cons
print('<<<<<<<<<< teardown')
for value in cons.values():
value.close()
print('<<<<<<<<<< teardown done')
#fixture(autouse=True)
def ssh(host: Host, _ssh_con: Dict[Host, SSHClient], logger) -> SSHClient:
rp_logger.info(f'yield {host}')
yield _ssh_con[host]
def pytest_generate_tests(metafunc: Metafunc):
metafunc.parametrize('host', HOSTS, ids=str)
#fixture(scope="session")
def logger() -> logging.Logger:
logger = logging.getLogger('Tester')
logger.setLevel(logging.DEBUG)
fmt = logging.Formatter(FMT)
hdlr = logging.StreamHandler()
hdlr.setFormatter(fmt)
logger.addHandler(hdlr)
return logger
shared.py
from dataclasses import dataclass, field
from typing import List
#dataclass()
class Host:
name: str
ip: str
port: int = field(repr=False, default=22)
username: str = 'myusername'
password: str = field(repr=False, default='myuserpassowrd')
def __hash__(self):
return hash(self.ip)
def __str__(self):
return self.name
HOSTS: List[Host] = [
Host('Host-1', '192.168.195.1'),
Host('Host-2', '192.168.195.2'),
Host('Host-3', '192.168.195.3'),
]
tests.py
from time import sleep
from paramiko import SSHClient
from shared import Host, HOSTS
SLEEP_TIME = 2
def test_ip(ssh: SSHClient, host: Host, logger):
logger.info(f"test_ip[{host}][{ssh}]")
command = "ip a s ens33 | grep 'inet ' | awk '{print $2}' | cut -d'/' -f1"
sleep(SLEEP_TIME)
_, stdout, _ = ssh.exec_command(command)
output = stdout.read().decode('utf8').strip()
assert output == host.ip, "got un expected IP or didn't get any IP"
def test_machine_name(host: Host, ssh: SSHClient, logger):
logger.info(f"test_machine_name[{host}][{ssh}]")
command = f"ls /tmp | grep {host.name}"
sleep(SLEEP_TIME)
_, stdout, _ = ssh.exec_command(command)
output = stdout.read().decode('utf8').strip()
assert output, "didn't find file with host name"
What I want to achieve is the following:
Create all ssh connections for the session
start pytest_runtestloop
start Thread-1, Thread-2, Thread-3
for each thread start all tests in sequential order
Teardown all ssh connections for the session
I tried to use pytest-parallel and pytest-xdist (which doesn't fit my use case)
I also tried to write my own plugin, but I'm not able to get it right.
In the log output, I get that the thread name is MainThread _/(**)\_
from concurrent.futures import Future, ThreadPoolExecutor, wait
from typing import Dict, List
from shared import Host
WITH_SUB_THREAD = True
def _run_hosts_tests(session, host, tests):
if WITH_SUB_THREAD:
with ThreadPoolExecutor(1, f"Worker_{host}") as executor:
for test_idx in tests:
item = session.items[test_idx]
executor.submit(item.runtest)
else:
for test_idx in tests:
item = session.items[test_idx]
item.runtest()
def _done_callback(future: Future):
try:
result = future.result()
print(f"[\033[92;1mOK\033[0m] {result}")
return result
except Exception as e:
print(f"[\033[91;1mERR\033[0m] {e}")
raise e
class ParallelRunner:
def __init__(self):
self._tests_mapper: Dict[Host, List[int]] = dict()
def pytest_collection_finish(self, session):
for idx, item in enumerate(session.items):
host = item.callspec.getparam('host')
if host not in self._tests_mapper:
self._tests_mapper[host] = []
self._tests_mapper[host].append(idx)
def pytest_runtestloop(self, session):
if (
session.testsfailed
and not session.config.option.continue_on_collection_errors
):
raise session.Interrupted(
"%d error%s during collection"
% (session.testsfailed, "s" if session.testsfailed != 1 else "")
)
if session.config.option.collectonly:
return True
with ThreadPoolExecutor(len(self._tests_mapper), 'Worker') as executor:
for host, tests in self._tests_mapper.items():
executor.submit(_run_hosts_tests, session, host, tests)\
.add_done_callback(_done_callback)
The answer was it set the connection in a global variable,
although I'm sure that there is a better solution, in the meantime I will put this workaround here.
conftest.py
from threading import Lock
_cons: Dict[Host, SSHClient] = dict()
#fixture(scope="session")
def _ssh_con(request: SubRequest) -> Dict[Host, SSHClient]:
mutex.acquire()
global _cons
if not _cons:
for item in request.session.items:
host = item.callspec.params.get("host")
if host not in _cons:
con = SSHClient()
con.set_missing_host_key_policy(AutoAddPolicy())
con.connect(host.ip, host.port, host.username, host.password)
_cons[host] = con
mutex.release()
return _cons
def pytest_sessionfinish(session, exitstatus):
mutex.acquire()
global _cons
if _cons:
for value in _cons.values():
value.close()
mutex.release()
Related
Im currently working with a small project where I have done a proxy converter format as well as availability of proxies that are being in use.
Currently I have ended up doing something like this:
import random
import threading
import time
from typing import Dict
from loguru import logger
class ProxyManager(object):
def __init__(self, proxy_file_path=None):
self.proxies = self.load_proxies(proxy_file_path) if proxy_file_path else {}
#staticmethod
def load_proxies(proxy_file_path):
with open(proxy_file_path) as proxy_file:
proxies = {'https': [], 'http': []}
for proxy in proxy_file.readlines():
if proxy.count(':') > 1:
ip, port, username, password = proxy.rstrip().split(':')
url = f'http://{ip}:{port}:{username}#{password}'
else:
ip, port = proxy.rstrip().split(':')
url = f'http://{ip}:{port}'
proxies['https'].append(url)
return proxies
def random_proxy_https(self):
return random.choice(self.proxies['https'])
def all_proxies_https(self):
return [proxy for proxy in self.proxies['https']]
# -------------------------------------------------------------------------
# Proxies path
# -------------------------------------------------------------------------
proxies: Dict[str, ProxyManager] = {
"rotating": ProxyManager("./proxies/rotating.txt"),
"test": ProxyManager("./proxies/test.txt")
}
# -------------------------------------------------------------------------
# Proxies available
# -------------------------------------------------------------------------
all_proxies = proxies["rotating"].all_proxies_https()
proxy_dict = dict(zip(all_proxies, ['available'] * len(all_proxies)))
proxy_lock: threading = threading.Lock()
# -------------------------------------------------------------------------
# Initialize availability of proxies
# -------------------------------------------------------------------------
class AvailableProxies:
def __enter__(self):
proxy_lock.acquire()
self.proxy = None
while not self.proxy:
available = [
att for att, value in proxy_dict.items() if "available" in value
]
if available:
self.proxy = random.choice(available)
proxy_dict[self.proxy] = "busy"
break
else:
logger.info("Waiting ... no proxies available")
time.sleep(.2)
proxy_lock.release()
return self.proxy
def __exit__(self, exc_type, exc_val, exc_tb):
proxy_dict[self.proxy] = "available"
The idea is that I will have mulitple scripts that will be needing the ProxyManager class but not the AvailableProxies - That means that I do not need to set the variables
# -------------------------------------------------------------------------
# Proxies available
# -------------------------------------------------------------------------
all_proxies = proxies["rotating"].all_proxies_https()
proxy_dict = dict(zip(all_proxies, ['available'] * len(all_proxies)))
proxy_lock: threading = threading.Lock()
if specific script does not require the class. I wonder if its possible to set the variables inside the class AvailableProxies and that the variables all_proxies, proxy_dict & proxy_lock should be only set once inside the class when we import the AvailableProxies into a script?
I recently had an interview and was asked to write a code that logs into 3 routers and reloads each, one at a time - checks that BGP is established and the interfaces are up before moving to reload the next device. We have been provided an imported module that can SSH into the router and reload it. Thoughts anyone? I'm new to Python
Though a module was provided for SSH, I started by coding it out and here is what I tired; just to give an idea of how the router works and what I am checking for:
import socket
import paramiko
def run_ssh_command(username, password, host, command):
"""
Using paramiko to SSH into remote locations and run commands
"""
port = 22
s = paramiko.SSHClient()
s.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
s.connect(host, port, username, password, look_for_keys=False, timeout=5.0)
except paramiko.AuthenticationException as e:
print("authentication failed: ", e)
s.close()
return
except socket.timeout as e:
print("socket timeout: ", e)
s.close()
return
except paramiko.SSHException as e:
print("SSH Exception: ", e)
s.close()
return
except socket.error as e:
print("socket error: ", e)
s.close()
return
(stdin, stdout, stderr) = s.exec_command(command)
print ("ran command ", command)
# for line in stdout.readlines():
# print(line)
s.close()
if __name__ == "__main__":
while True:
run_ssh_command("CompRouter", "backbonedevice", "10.10.10.25", "show ip bgp summary")
So my line of thought is to SSH to the device, issue a "show ip bgp summary" command - this is what the table typically looks like:
Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down Statd
10.10.10.2 4 65536 7 7 1 0 0 00:03:04 0
The idea is to check that the InQ/OutQ values are zero or non of the BGP neighbor are "Down", then and only then do I move to reload the next router.
This is particularly where I am stuck. Given a table like this, how to I check the entire InQ/OutQ column (there might be more than one neighbor) and then take the necessary action?
Opted to use napalm and netmiko - works like a charm:
from simplecrypt import encrypt, decrypt
from pprint import pprint
from netmiko import ConnectHandler
from napalm import get_network_driver
import json
#from time import time
import time
def read_device_data(devices_filename):
devices = {} #Create a dictionary for each IP's unique info
with open(devices_filename) as device_info:
for lines in device_info:
this_dev = lines.strip().split(',')
dev = {'ipaddr': this_dev[0],
'type': this_dev[1],
'name': this_dev[2]}
devices[dev['ipaddr']] = dev
print('Displaying info for all devices below: ')
pprint(devices)
return devices
#Create a function to decrypt and read file containing credentials encrypted in the format ip,username,password
def read_device_creds(device_cred_filename, key):
print('\n Decoding encrypted device credentials....')
with open(device_cred_filename, 'rb') as device_creds_file:
device_creds_decry = decrypt(key, device_creds_file.read())
device_creds_list = json.loads(device_creds_decry.decode('utf-8'))
pprint(device_creds_list)
print('\n Displaying device credentials in dictionary format:')
"""
Convert device_creds_list to dictionary using list comprehension
"""
device_creds_dict = {this_dev[0]: this_dev for this_dev in device_creds_list}
print(device_creds_dict)
return device_creds_dict
def check_BGP(net_device, cred):
print(f"Connecting to {net_device['ipaddr']} right now to check BGP status.....")
while True:
try:
driver = get_network_driver('ios')
iosv = driver(net_device['ipaddr'], cred[1], cred[2])
iosv.open()
except:
print('Waiting to establish a socket...')
else:
time.sleep(30)
ios_output = iosv.get_bgp_neighbors()
for k,v in ios_output.items():
for y in v.values():
if type(y) == dict:
for z in y.values():
print(f"BGP peer is up? {z['is_up']}")
return z['is_up'] == True
def reload(creds):
iosv_device = {
'device_type': 'cisco_ios',
'ip': creds[0],
'username': creds[1],
'password': creds[2]
}
net_connect = ConnectHandler(**iosv_device)
output = net_connect.send_command_timing('wr mem')
time.sleep(10)
output += net_connect.send_command_timing('reload')
output += net_connect.send_command_timing('y')
print(output)
def is_alive(alive_dev, alive_cred): #check if device is back online after reload
while True:
try:
driver = get_network_driver('ios')
iosvl2 = driver(alive_dev['ipaddr'], alive_cred[1], alive_cred[2])
iosvl2.open()
except:
print(f"Attempting to reconnect to {alive_cred[0]}")
else:
alive_output = iosvl2.is_alive()
print(alive_output)
return alive_output['is_alive'] == True
break
if __name__ == '__main__':
net_devices = read_device_data('devices_data')
net_creds = read_device_creds('encrypted_device_creds', 'cisco')
# starting_time = time()
for ipadd, device_info in net_devices.items():
print(net_devices.items())
while True:
print (f'Connecting to: {ipadd}')
if check_BGP(device_info, net_creds[ipadd]) == True:
print(f'Reloading {ipadd} now')
reload(net_creds[ipadd])
else:
print(f'Re-checking BGP on {ipadd}')
if is_alive(device_info, net_creds[ipadd]) == True and check_BGP(device_info, net_creds[ipadd]) == True:
print(f'{ipadd} back online and BGP OK!')
break
else:
print('Router down or BGP failed to reconverged; exiting script')
break
# print ('\n---- End get config sequential, elapsed time=', time() - starting_time)
In the example below, I wrote a code that detects BGP route limits. Its purpose is to calculate the route limit rate by learning the information under the Interfaces. In this regard, I recommend the TTP module, where you can create your own templates.
from netmiko import ConnectHandler
from getpass import getpass
from pprint import pprint
from ttp import ttp
from genie.testbed import load
from pprint import pprint
import json
import time
from multiprocessing.dummy import Pool as ThreadPool
from netmiko import Netmiko
#**************************************************************************************************************************
with open("user_pass.txt", "r") as f5:
user_pass = f5.readlines()
for list_user_pass in user_pass:
if "username" in list_user_pass:
username = list_user_pass.split(":")[1].strip()
if "password" in list_user_pass:
password = list_user_pass.split(":")[1].strip()
def _ssh_(nodeip):
try:
huawei = {
'device_type': 'huawei', 'ip': nodeip, 'username':
username, 'password': password, }
con = Netmiko(**huawei)
print(nodeip.strip() + " " + "basarili giris")
except Exception as e:
print(e)
f_3.write(nodeip.strip() + "\n")
return
#**************************************************************************************************************************
data_to_parse_0 = con.send_command_timing('display ip vpn-instance | ignore-case i Customer_A')
print(data_to_parse_0)
ttp_template_0 ="""
{{Customer_Name}} {{nodeip}} {{IPV4}}
"""
parser_0 = ttp(data=data_to_parse_0, template=ttp_template_0)
parser_0.parse()
#print result in JSON format
results_0 = parser_0.result(format='json')[0]
print(results_0)
#str to list **convert with json.loads
result_0 = json.loads(results_0)
print(result_0[0]["Customer_Name"])
#**************************************************************************************************************************
data_to_parse = con.send_command_timing("display current-configuration configuration vpn-instance {}".format(result_0[0]["Customer_Name"]))
print(data_to_parse)
ttp_template ="""
{{routing-table}} limit {{ total_number | DIGIT }} {{total_number2}}
"""
parser = ttp(data=data_to_parse, template=ttp_template)
parser.parse()
#print result in JSON format
results = parser.result(format='json')[0]
print(results)
#str to list **convert with json.loads
result = json.loads(results)
print(result)
#**************************************************************************************************************************
data_to_parse_2 = con.send_command_timing('dis ip routing-table vpn-instance' + " " + result_0[0]["Customer_Name"] + " " + " statistics | i Summary Prefixes")
print(data_to_parse_2)
ttp_template_2 ="""
Summary Prefixes : {{ used_number | DIGIT }}
"""
parser2 = ttp(data=data_to_parse_2, template=ttp_template_2)
parser2.parse()
#print result in JSON format
results2 = parser2.result(format='json')[0]
print(results2)
#str to list **convert with json.loads
result2 = json.loads(results2)
print(result2[0]["used_number"])
#**************************************************************************************************************************
result3 = (int(result2[0]["used_number"]) / int(result[0]["total_number"])) * 100
print(int(result3))
with open("vrf_limit_result.txt", "a") as f:
f.write("Customer_Result" +"_" + nodeip +"==>" + str(result3)+ "\n")
f.close()
#**************************************************************************************************************************
f_2 = open("ip_list.txt", "r")
ip_list = f_2.readlines()
f_2.close()
f_3 = open("Ssh_unconnected_2.txt", "w")
# Therading method
myPool = ThreadPool(100)
result = myPool.map(_ssh_, ip_list)
I have a class function which unbuffers stdout and stderr, like so:
class Unbuffered:
def __init__(self, stream):
self.stream = stream
def write(self, data):
data = data.strip()
if data.startswith("INFO: "):
data = data[6:]
if '[' in data:
progress = re.compile(r"\[(\d+)/(\d+)\]")
data = progress.match(data)
total = data.group(2)
current = data.group(1)
data = '{0}/{1}'.format(current, total)
if data.startswith("ERROR: "):
data = data[7:]
self.stream.write(data + '\n')
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
The output is from a function run in ProcessPoolExecutor when inbound from websocket arrives.
I want the output printed in console as well as sent to my websocket client. I tried asyncing Unbuffered and passing websocket instance to it but no luck.
UPDATE: The essentials of run() and my websocket handler() look something like this:
def run(url, path):
logging.addLevelName(25, "INFO")
fmt = logging.Formatter('%(levelname)s: %(message)s')
#----
output.progress_stream = Unbuffered(sys.stderr)
stream = Unbuffered(sys.stdout)
#----
level = logging.INFO
hdlr = logging.StreamHandler(stream)
hdlr.setFormatter(fmt)
log.addHandler(hdlr)
log.setLevel(level)
get_media(url, opt)
async def handler(websocket, path):
while True:
inbound = json.loads(await websocket.recv())
if inbound is None:
break
url = inbound['url']
if 'path' in inbound:
path = inbound['path'].rstrip(os.path.sep) + os.path.sep
else:
path = os.path.expanduser("~") + os.path.sep
# blah more code
while inbound != None:
await asyncio.sleep(.001)
await loop.run_in_executor(None, run, url, path)
run(), handler() and Unbuffered are separate from each other.
Rewriting get_media() to use asyncio instead of running it in a different thread would be the best. Otherwise, there are some options to communicate between a regular thread and coroutines, for example, using a socketpair:
import asyncio
import socket
import threading
import time
import random
# threads stuff
def producer(n, writer):
for i in range(10):
# print("sending", i)
writer.send("message #{}.{}\n".format(n, i).encode())
time.sleep(random.uniform(0.1, 1))
def go(writer):
threads = [threading.Thread(target=producer, args=(i + 1, writer,))
for i in range(5)]
for t in threads:
t.start()
for t in threads:
t.join()
writer.send("bye\n".encode())
# asyncio coroutines
async def clock():
for i in range(11):
print("The time is", i)
await asyncio.sleep(1)
async def main(reader):
buffer = ""
while True:
buffer += (await loop.sock_recv(reader, 10000)).decode()
# print(len(buffer))
while "\n" in buffer:
msg, _nl, buffer = buffer.partition("\n")
print("Got", msg)
if msg == "bye":
return
reader, writer = socket.socketpair()
reader.setblocking(False)
threading.Thread(target=go, args=(writer,)).start()
# time.sleep(1.5) # socket is buffering
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait([clock(), main(reader)]))
loop.close()
You can also try this 3rd-party thread+asyncio compatible queue: janus
I try to make Async ping process using subprocess.Popen , I try to understand how i implement it in this case
aList = []
async def sn(frm, to):
i = 0
for i in list(range(frm, to)):
aList.append(i)
cmd = "ping -n 1 " + '10.0.0.'
coroutines = [subprocess.Popen(cmd + str(i), stdout=subprocess.PIPE) for i in aList]
results = await asyncio.gather(*coroutines)
print(results)
loop = asyncio.get_event_loop()
loop.run_until_complete(sn(frm, to))
loop.close()
You can find simpler code for pinging host without async-await. But if necessary you can try the following working example to ping with async-await
import platform
import subprocess
import aiohttp
import asyncio
async def getPingedHost(host, netTimeout=3):
""" Description: Function to ping a host and get string of outcome or False
Import: from shared.getPingedHost import getPingedHost
Testing: python -m shared.getPingedHost
"""
args = ['ping']
platformOs = platform.system().lower()
if platformOs == 'windows':
args.extend(['-n', '1'])
args.extend(['-w', str(netTimeout * 1000)])
elif platformOs in ('linux', 'darwin'):
args.extend(['-c', '1'])
args.extend(['-W', str(netTimeout)])
else:
raise NotImplemented('Unsupported OS: {}'.format(platformOs))
args.append(host)
output = ''
try:
outputList = []
if platformOs == 'windows':
output = subprocess.run(args, check=True, universal_newlines=True,
stdout=subprocess.PIPE, # Capture standard out
stderr=subprocess.STDOUT, # Capture standard error
).stdout
outputList = str(output).split('\n')
if output and 'TTL' not in output:
output = False
else:
subprocess.run(args, check=True)
output = outputList[2]
except (subprocess.CalledProcessError, subprocess.TimeoutExpired):
output = False
return output
async def main():
async with aiohttp.ClientSession() as client:
output = await getPingedHost('google.com')
print(output)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
class rscan(object):
state = {'online': [], 'offline': []} # Dictionary with list
ips = [] # Should be filled by function after taking range
# Amount of pings at the time
thread_count = 8
# Lock object to prevent race conditions
lock = threading.Lock()
# Using Windows ping command
def ping(self, ip):
answer = subprocess.call(['ping','-n','1',ip],stdout = open('1.txt','w'))
return answer == 0 and ip
def pop_queue(self):
ip = None
self.lock.acquire() # lock !!!
if self.ips:
ip = self.ips.pop()
self.lock.release()
return ip
def noqueue(self):
while True:
ip = self.pop_queue()
if not ip:
return None
result = 'online' if self.ping(ip) else 'offline'
self.state[result].append(ip) ### check again
def start(self):
threads = []
for i in range(self.thread_count):
t = threading.Thread(target=self.noqueue)
t.start()
threads.append(t)
# Wait for all threads
[ t.join() for t in threads ]
return self.state
def rng(self, frm, to, ip3):
self.frm = frm
self.to = to
self.ip3 = ip3
for i in range(frm, to):
ip = ip3 + str(i)
self.ips.append(ip)
if __name__== '__main__':
scant = rscan()
scant.thread_count = 8
edited a bit class i have found also used threads instead of Async & await
Credit: http://blog.boa.nu/2012/10/python-threading-example-creating-pingerpy.html
everyone.
I use the parmiko with python 3.3.3 x64 on window 7 x64, the following is my code, and the strange is I need add time.sleep(0.01) for delay before using client.close() to end the session. Otherwise, a lot of processes will be existed in the SSH host and could not end automatically.
Could anyone do me a favor to explain these?
The paramiko used for python3:
(https://travis-ci.org/nischu7/paramiko)
Here is the steps for repeat:
A)remove the time.sleep(0.01) before client.close() and run the script
B)type the password for the SSH host
C)type the first command, for example: ls -la
D)type the command very frequently, for example, keep pressing the up-arrow and enter alternatively very fast with several times
E)when using ps -ef | grep dropbear (the SSH server, I have not tested about OpenSSH), a lot of processes exists
F) type exit and Ctrl + Z to terminate the script
G)keep the time.sleep(0.01) before client.close() and run the script again
H)do the above steps of B, C, D, then check with ps -ef | grep dropbear again, only one SSH process will generate by this script.
and here is the code:
from tkinter import *
from threading import Thread
from queue import Queue, Empty
import _thread
import time
from paramiko import SSHClient, Transport, AutoAddPolicy, WarningPolicy
import getpass
def start(client):
try :
client.connect(hostname='127.0.0.1', port=22, username='ubuntu', password=pw)
return True
except Exception as e:
client.close()
print(e)
return False
def check(client,outqueue):
while start(client):
outqueue.put("Command to run: ")
cmd = input()
if cmd == "exit":
client.close()
break
chan = client.get_transport().open_session()
outqueue.put("running '%s'" % cmd)
chan.exec_command(cmd)
while True:
if chan.recv_ready():
data = chan.recv(4096).decode('ascii')
outqueue.put("recv:\n%s" %data)
if chan.recv_stderr_ready():
error = chan.recv_stderr(4096).decode('ascii')
outqueue.put("error:\n%s" %error)
if chan.exit_status_ready():
exitcode = chan.recv_exit_status()
outqueue.put("exit status: %s" %exitcode)
#print('close s')
#print(client.close())
time.sleep(0.01)
client.close()
#print('close e')
#time.sleep(0.05)
break
def reader(outqueue):
while True:
while outqueue.qsize():
try:
data = outqueue.get()
if data:
print(data)
except Excetpiton as e:
print(e)
#continue
#time.sleep(0.5)
if __name__=='__main__':
pw = getpass.getpass()
client = SSHClient()
client.set_missing_host_key_policy(WarningPolicy())
#client.set_missing_host_key_policy(AutoAddPolicy())
outqueue = Queue()
r = Thread(target=reader,args=(outqueue,))
r.daemon = True
r.start()
t = Thread(target=check,args=(client,outqueue,))
#t.daemon = True
t.start()
t.join()
It's a mistake, I start the client more than one time, so solved by the following:
from threading import Thread
from queue import Queue, Empty
import _thread
import time
from paramiko import SSHClient, Transport, AutoAddPolicy, WarningPolicy
import getpass
def start(client):
try :
client.connect(hostname='127.0.0.1', port=22, username='ubuntu', password=pw)
return True
except Exception as e:
client.close()
print(e)
return False
def check(client,outqueue):
while True:
outqueue.put("Command to run: ")
cmd = input()
if cmd == "exit":
client.close()
break
chan = client.get_transport().open_session()
outqueue.put("running '%s'" % cmd)
chan.exec_command(cmd)
while True:
if chan.recv_ready():
data = chan.recv(4096).decode('ascii')
outqueue.put("recv:\n%s" %data)
if chan.recv_stderr_ready():
error = chan.recv_stderr(4096).decode('ascii')
outqueue.put("error:\n%s" %error)
if chan.exit_status_ready():
exitcode = chan.recv_exit_status()
outqueue.put("exit status: %s" %exitcode)
#print('close s')
#print(client.close())
#time.sleep(0.01)
#client.close()
#print('close e')
#time.sleep(0.05)
break
def reader(outqueue):
while True:
while outqueue.qsize():
try:
data = outqueue.get()
if data:
print(data)
except Excetpiton as e:
print(e)
#continue
#time.sleep(0.5)
if __name__=='__main__':
pw = getpass.getpass()
client = SSHClient()
client.set_missing_host_key_policy(WarningPolicy())
#client.set_missing_host_key_policy(AutoAddPolicy())
if not start(client):
#os._exit(0)
sys.exit(0)
outqueue = Queue()
r = Thread(target=reader,args=(outqueue,))
r.daemon = True
r.start()
t = Thread(target=check,args=(client,outqueue,))
#t.daemon = True
t.start()
t.join()