wait till command completed in paramiko invoke_shell() [duplicate] - python-3.x

This question already has answers here:
Execute multiple dependent commands individually with Paramiko and find out when each command finishes
(1 answer)
Executing command using "su -l" in SSH using Python
(1 answer)
Closed 5 days ago.
I wanted to wait the given command execution has been completed on remote machines. this case it just executed and return and not waiting till its completed.
import paramiko
import re
import time
def scp_switch(host, username, PasswdValue):
ssh = paramiko.SSHClient()
try:
# Logging into remote host as my credentials
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, username=username, password=PasswdValue ,timeout=30)
try:
# switcing to powerbroker/root mode
command = "pbrun xyz -u root\n"
channel = ssh.invoke_shell()
channel.send(command)
time.sleep(3)
while not re.search('Password',str(channel.recv(9999), 'utf-8')):
time.sleep(1)
print('Waiting...')
channel.send("%s\n" % PasswdValue)
time.sleep(3)
#Executing the command on remote host with root (post logged as root)
# I dont have any specific keyword to search in given output hence I am not using while loop here.
cmd = "/tmp/slp.sh cool >/tmp/slp_log.txt \n"
print('Executing %s' %cmd)
channel.send(cmd) # its not waiting here till the process completed,
time.sleep(3)
res = str(channel.recv(1024), 'utf-8')
print(res)
print('process completed')
except Exception as e:
print('Error while switching:', str(e))
except Exception as e:
print('Error while SSH : %s' % (str(e)))
ssh.close()
""" Provide the host and credentials here """
HOST = 'abcd.us.domain.com'
username = 'heyboy'
password = 'passcode'
scp_switch(HOST, username, password)
As per my research, it will not return any status code, is there any logic to get the return code and wait till the process completed?

I know this is an old post, but leaving this here in case someone has the same problem.
You can use an echo that will run in case your command executes successfully, for example if you are doing an scp ... && echo 'transfer complete', then you can catch this output with a loop
while True:
s = chan.recv(4096)
s = s.decode()
if 'transfer done' in s:
break
time.sleep(1)

Related

sudo as user within a python program

I am trying to sudo as another user which is generic account in shell , however userid is still same after su , any idea on how to fix it ?
import os,glob,pwd,subprocess,pexpect,getpass
print(getpass.getuser())
try:
var_command = "su user"
var_child = pexpect.spawn(var_command)
i = var_child.expect(["Password:", pexpect.EOF])
if i==0: # send password
print('Login SusccessFul' )
var_child.sendline("password")
var_child.expect(pexpect.EOF)
elif i==1:
print("Got the key or connection timeout")
pass
except Exception as e:
print("Oops Something went wrong buddy")
print(e)
print(getpass.getuser())

How to execute commands in a remote server using python?

This question is related to this other one: How to use sockets to send user and password to a devboard using ssh
I want to connect to the devboard in order to execute a script. All the outputs of that script I want to send to a Elasticsearch machine.
I can connect to the devboard (see IMAGE below) using my laptop which happens to have Elasticsearch installed. But, when I want to send data to the devboard, the script shows nothing.
What I am doing is:
As soon as you find mendel#undefined-eft:~$ , send the command: cd coral/tflite/python/examples/classification/Auto_benchmark\n
What am I doing wrong?
import paramiko
import os
#Server's data
IP = '172.16.2.47'
PORT = 22
USER = 'mendel'
PASSWORD = 'mendel'
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname = IP, port=PORT, username = USER, password = PASSWORD)
channel = ssh.invoke_shell() #to get a dedicated channel
channel_data = str()
host = str()
while True:
if channel.recv_ready(): #is there data to be read?
channel_data += channel.recv(9999).decode("utf-8")
os.system('clear')
print(channel_data)
#ONLY WORKS UNTIL HERE!!!
else:
continue
if channel_data.endswith('mendel#undefined-eft:~$'):
channel.send('cd coral/tflite/python/examples/classification/Auto_benchmark\n')
channel_data += channel.recv(9999).decode("utf-8")
print(channel_data)
IMAGE
EDIT
channel = ssh.invoke_shell() #to get a dedicated channel
channel_data = str()
host = str()
while True:
if channel.recv_ready(): #is there data to be read?
channel_data += channel.recv(9999).decode("utf-8")
os.system('clear')
print(channel_data)
else:
continue
if channel_data.endswith('mendel#undefined-eft:~$ '):#it is good to send commands
channel.send('cd coral/tflite/python/examples/classification/Auto_benchmark\n')
#channel_data += channel.recv(9999).decode("utf-8")
#print(channel_data)
elif channel_data.endswith('mendel#undefined-eft:~/coral/tflite/python/examples/classification/Auto_benchmark$ '):
channel.send('ls -l\n') #python3 auto_benchmark.py')
channel_data += channel.recv(9999).decode("utf-8")
print(channel_data)
I guess you have to change the
if channel_data.endswith('mendel#undefined-eft:~$'):
to
if channel_data.endswith('mendel#undefined-eft:~$ '):
according to your prompt. Please note the space after :~$

Python code for telnetting DUT needs further optimization

I need to further optimize my code in Python.
I was earlier executing commands on the Device Under Test step by step which was a lot as I also required sleep timers. However I was able to minimize it through a list and calling elements of the list in a for loop:
I need your inputs to further optimize this code:
ConfigListBFD = ['conf t' , 'int Fa1/0' , 'ip address 10.10.10.1 255.255.255.0', 'no shut']
for i in ConfigListBFD:
tn.write(i.encode('ascii') + b"\n")
print (i, "command entered successfully")
time.sleep(2)
Please note: I am telnetting the DUT as ssh is not supported.
i am using this optimized common code for telnet. we can create a common file where you can add this method
import telnetlib
import time
def telnet(host):
user = <username>
password = <password>
try :
tn = telnetlib.Telnet(host)
except :
print("Unable to connect")
sys.exit()
tn.read_until(b"Username:") # read until username prompt
tn.write(user.encode('ascii') + b"\n")
if password:
tn.read_until(b"password:") #read until password prompt
tn.write(password.encode('ascii') + b"\n")
tn.read_until(b"#")
return tn #return telnetlib handle
than import this method to another file, where we write our script

Python Script Creates Directories In /tmp/, Taking Up System Space

I am running a script that acts as a server, allows two clients to connect to it, and for one specific client to send a message to the server, the server modifies it, then sends it to the other client.
This appears to work, as the receiving client acknowledges that the input was received and is valid. This is a script that I intend to run continuously.
However, a big issue is that my /tmp/ directory is filling up with directories named _M... (The ellipses representing a random string), that contains python modules (such as cryptography, which, as far as I'm aware, I'm not using), and timezone information (quite literally every timezone that python supports). It seems to be creating them very frequently, but I can't identify what in the process exactly is doing this.
I have created a working cleanup bash script that removes files older than 5 minutes from the directory every 5 minutes, however, I cannot guarantee that when I am duplicating this process for other devices, that the directories will have the same name formatting. Rather than create a unique bash script for each process that I create, I'd rather be able to clean up the directories from within the python script, or even better, to prevent the directories from being created at all.
The problem is, I'm not certain of how this is accomplished, and I do not see anything on SO regarding what is creating these directories, nor how to delete them.
The following is my script
import time, socket, os, sys, re, select
IP = '192.168.109.8'
PORT = [3000, 3001]
PID = str(os.getpid())
PIDFILE = "/path/to/pidfile.pid"
client_counter = 0
sockets_list = []
def runCheck():
if os.path.isfile(PIDFILE):
return False
else:
with open(PIDFILE, 'w') as pidfile:
pidfile.write(PID)
return True
def openSockets():
for i in PORT:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((IP, i))
s.listen(1)
sockets_list.append(s)
def receiveMessage(client_socket):
try:
message = client_socket.recv(2048).decode('utf-8')
if not message:
return False
message = str(message)
return message
except:
return False
def fixString(local_string):
#processes
return local_string
def main():
try:
openSockets()
clients = {}
print(f'Listening for connections on {IP}:{PORT[0]} and {PORT[1]}...')
client_count = 0
while True:
read_sockets, _, exception_sockets = select.select(sockets_list, [], sockets_list)
for notified_socket in read_sockets:
if notified_socket == sockets_list[0] or notified_socket == sockets_list[1]:
client_socket, client_address = sockets_list[client_count].accept()
client_count = (client_count + 1) % 2
sockets_list.append(client_socket)
clients[client_socket] = client_socket
print('Accepted new connection from: {}'.format(*client_address))
else:
message = receiveMessage(notified_socket)
if message is False:
continue
message = fixString(message)
for client_socket in clients:
if client_socket != notified_socket:
if message != "N/A":
client_socket.send(bytes(message, "utf-8"))
for notified_socket in exception_sockets:
sockets_list.remove(notified_socket)
del clients[notified_socket]
time.sleep(1)
except socket.timeout:
for i in sockets_list:
i.close()
os.remove(PIDFILE)
sys.exit()
except Exception as e:
for i in sockets_list:
i.close()
err_details = str('Error in line {}'.format(sys.exc_info()[-1].tb_lineno), type(e).__name__, e)
os.remove(PIDFILE)
print("Exception: {}".format(err_details))
sys.exit()
if __name__ == "__main__":
if runCheck():
main()
else:
pass
How might I set it up so that the python script will delete the directories it creates in the /tmp/ directory, or better, to not create them in the first place? Any help would be greatly appreciated.
As it would turn out, it is PyInstaller that was generating these files. In the documentation, it states that pyinstaller generates this _MEI directory when creating the executable in single-file mode, and it is supposed to delete it as well, but for some reason it didn't.

Program keeps running if TK window closed too fast

Python GUI with Tk that a user adds servers to and it displays port and resource information about the server. Each server line is running in its own thread and loops with a myQueue.put(executor.submit(lambda: <function here>)). I can load up 15 servers and then close the window. Sometimes the python.exe closes and the IDE shows the application end with exit code 0. Sometimes I close the window and the IDE and task manager show that the program is still running. After a while the pycharm console prints "main thread is not in main loop" and nothing else happens after that. I thought using a queue with the threads would keep this from happening but something is going wrong.
Workflow for the below code: user adds server info in a popup window> that is run through creation function> information is passed off to threadmaker function that watches an indicator and reruns the SSH session to query info when the previous query finishes.
main = Tk()
myQueue = queue.Queue()
executor = concurrent.futures.ThreadPoolExecutor(max_workers=16)
def creation(server, nickname, user, passw):
#Create label for server, nickname, user, password here and place them on the main window
myQueue.put(executor.submit(lambda: threadmaker(server, nickname, user, passw)))
def threadmaker(server, nickname, user, passw):
# this function loops until indicator is 0 and then runs refresh function
global workinglist
if 'normal' == main.state():
if workinglist[server + "counter"] == "1":
time.sleep(3)
myQueue.put(executor.submit(threadmaker(server, nickname, user, passw)))
if workinglist[server + "counter"] == "0":
myQueue.put(executor.submit(refresh(server, nickname, user, passw)))
time.sleep(3)
myQueue.put(executor.submit(threadmaker(server, nickname, user, passw)))
if 'normal' != main.state():
print(main.state())
pass
def refresh(server, nickname, user, passw):
global workinglist
workinglist[server + "counter"] = "1"
if 'normal' == main.state():
if 'normal' == main.state():
try:
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(str(server), username=str(user), password=str(passw), timeout=10, allow_agent=False, look_for_keys=False)
stdin, stdout, stderr = ssh.exec_command("DF -H")
type(stdin)
test2 = stdout.readlines()
stdin.flush()
stdin.close()
ssh.close()
#<< do soething with the test2 value>>
except Exception as E:
print(E)
if 'normal' == main.state():
try:
#<< another ssh query >>
except Exception as E:
pass
workinglist[server + "counter"] = "0"
main.mainloop()
Am I handling the threads or the queue incorrectly?
I've added print(threading.currentThread().getName(), 'Starting') to the beginning of the refresh function and the running thread number never gets over the number of servers added + 1 for the main thread. So if Im handling all the threads with my threadpool what is hanging up? I assume something with the ssh attempt.

Resources