proc.stderr.readlines() hangs with specific svn path - python-3.x

Python 3.6.5
This is my code for listing directory in svn. It works for most directories quite well, but with one path line error = proc.stderr.readlines() hangs forever (100% repeatable). Any ideas why or workaround on that? From terminal command works
class MyRemoteSvnClient(object):
def __init__(self, url):
self.url = url
def list(self, rel_path=None, retries=5):
url = self.url if self.url.endswith('/') else self.url + '/'
if rel_path:
url = '{}{}'.format(url, rel_path)
# print(url)
retries = retries + 1
for i in range(1, retries):
proc = Popen(['svn', 'ls', url], shell=True, stdout=PIPE,
stderr=PIPE, universal_newlines=True)
error = proc.stderr.readlines()
if i == retries - 1:
raise SvnException(error)
if error:
logger.warning('svn error occurred, retrying {}/{}'.format(i, retries - 1))
sleep(1)
continue
while True:
output = proc.stdout.readline().strip()
if proc.poll() is not None:
break
if output:
yield output
break

Related

Aws Lambda "Runtime.HandlerNotFound" python

I am new to python and AWS lambda. I am trying to run this script from the lambda function but I am getting error:
Runtime.HandlerNotFound
This script is working fine if I run it from the ec2 instance, but when I run the same script from AWS lambda it throws an error.
I would be really thankful if someone guides me on what I did wrong.
Thank you
import boto3
import requests
import time
AWS_Access_Key_ID =
AWS_Secret_Access_Key =
DELAY_TIME=10 # 10 Seconds
region = 'us-east-2'
# instances = ['']
instances = {
'instance id': 'http://link',
'instance id': 'http://link'
}
ec2 = None
try:
ec2 = boto3.client('ec2', aws_access_key_id=AWS_Access_Key_ID, aws_secret_access_key=AWS_Secret_Access_Key, region_name=region)
# ec2 = boto3.resource('ec2',aws_access_key_id=AWS_Access_Key_ID, aws_secret_access_key=AWS_Secret_Access_Key, region_name=region)
except Exception as e:
print(e)
print("AWS CREDS ERROR, Exiting...")
exit()
def startInstances(instancesIds):
if(type(instancesIds) != list):
instancesIds = [instancesIds]
try:
response = ec2.start_instances(InstanceIds=instancesIds, DryRun=False)
print(response)
print("Instances Started")
except ClientError as e:
print(e)
print("Instances Failed to Start")
def stopInstances(instancesIds):
if(type(instancesIds) != list):
instancesIds = [instancesIds
]
try:
response = ec2.stop_instances(InstanceIds=instancesIds, DryRun=False)
print(response)
print("Instances Stopped")
except ClientError as e:
print(e)
print("Instances Failed to Stop")
def check():
for x in instances:
retry = 0
live = False
print("Checking Webiste " + instances[x])
while(retry < 5):
try:
r = requests.get(instances[x] ,verify=True)
if(r.status_code == 200):
live = True
break
except:
print("Not Live, retry time " + str(retry + 1))
print("Delaying request for " + str(DELAY_TIME) + " seconds...")
retry += 1
time.sleep(DELAY_TIME)
if(live):
print("Website is live")
# call function to start the ec2 instance
startInstances(x)
else:
# call function to stop the ec2 instance
print('Website is dead')
stopInstances(x)
print("")
def main():
check()
if __name__ == '__main__':
main()
https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html You need to specify what is the name of the handler function, which is the function that AWS lambda will call. Then you need to implement that function in your Python script.
I had a similar problem recently. I was able to define a lambda handler function in my python code that solved the problem. Got the guidance from this post
in short, add this code (adjust naming conventions accordingly):
import botocore
import boto3
def lambda_handler(event, context):
s3 = boto3.resource('s3')
bucket = s3.Bucket('bucketname')
exists = True
try:
s3.meta.client.head_bucket(Bucket='bucketname')
except botocore.exceptions.ClientError as e:
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response['Error']['Code'])
if error_code == 404:
exists = False

Trying to make asyncio working with telnetlib

I'm having a hard time trying to make asyncio working with telnetlib for interrogate some hardware.
I think I clearly don't understand the way asyncio is working and I'm a completely lost in all of this. It's really unclear.
My basic version (which is synchrone) is working well but interrogating the complete list of equipments takes 6 hours actually and a large part of the equipements are not responding because they are unreachable.
Since Asyncio make us able to parallelize the connections without waiting for each timeout to trigger I would like to transform my code in proper asynchrone code, without success.
Here is what I tried :
import telnetlib
import time
import datetime
import asyncio
from env.fonctions import *
from env.variables import *
first_cmds = ['term length 0', \
'show run', \
'exit']
#create lists to iterate through
hosts = ['router-1', 'router-2', 'router-3', 'router-4', 'router-5']
async def main(hosts, user_rw_hw ,password_rw_hw, first_cmds):
class ContinueI(Exception):
pass
continue_i = ContinueI()
for host in hosts:
print(f'{host} | Trying to connect...')
try:
tn = await async_establish_telnet_connexion(user_rw_hw ,password_rw_hw, host, 23, 0.5, True)
except:
continue
print(f'{host} | Checking if equipment is not Nexus')
tn.write('show version'.encode('ascii') + b"\n")
sh_ver = await async_read_telnet_output(tn)
if 'Nexus' in sh_ver or 'NX-OS' in sh_ver or 'nexus' in sh_ver:
print(f'{host} | Equipment is Nexus, closing connection...')
tn.write('exit'.encode('ascii') + b"\n")
continue
tn.write(''.encode('ascii') + b"\n")
try:
for cmd in first_cmds:
tn.write(cmd.encode('ascii') + b"\n")
if not 'exit' in cmd:
response = await async_read_telnet_output(tn)
if '\r\n% Invalid' in response:
print(f'{host} | Commande "{cmd}" pas reconnue')
raise continue_i
else:
print(f'{host} | Commands are accepted')
except ContinueI:
tn.write(b"exit\n")
tn.write(b"exit\n")
print(f'{host} | Logout for command not recognized')
continue
if __name__ == "__main__":
try:
loop = asyncio.get_event_loop()
loop.set_debug(1)
loop.run_until_complete(main(hosts, user_rw_hw ,password_rw_hw, first_cmds))
except Exception as e:
pass
finally:
loop.close()
and functions :
async def async_read_telnet_output(tn, timeout=2, timestep=0.1):
timer = 0
data = b''
while timer <= timeout:
new_datas = tn.read_very_eager()
if len(new_datas) != 0:
timer = 0
data += new_datas
await asyncio.wait(timestep)
timer += timestep
return data.decode('utf-8')
async def async_establish_telnet_connexion(user_rw_hw, password_rw_hw, host, port=23, timeout=1, debug=False):
try:
tn = telnetlib.Telnet(host, port) # Here I don't know how to make it awaitable, if I put await before the IDE said that this method is not an awaitable, btw even if I put an awaitable like "asyncio.sleep" the behavior is still the same so it's not the only point bad
except:
if debug == True:
print(f"{host} | Telnet not responding.")
raise Exception
if debug == True:
print(f"{host} | Telnet is responding.")
response = loop.create_task(async_read_telnet_output(tn, 15))
if not 'Username:' in response and not 'login' in response:
if debug == True:
print(f"{host} | Don't see Username asked by equipment.")
raise Exception
else:
tn.write(user_rw_hw.encode('ascii') + b"\n")
if debug == True:
print(f"{host} | Username entered.")
try:
await tn.read_until(b"Password: ", timeout)
except:
if debug == True:
print(f"{host} | Don't see Password asked by equipment.")
raise Exception
finally:
tn.write(password_rw_hw.encode('ascii') + b"\n")
response = await async_read_telnet_output(tn, 10)
if '% Authentication failed' in response or 'Rejected' in response:
if debug == True:
print(f"{host} | Connection failed bad credentials.")
raise Exception
if debug == True:
print(f"{host} | Connection succeed waiting for commands.")
return tn
If some people know where I fail I would be grateful i'm stuck since one week on it... Reading some books and youtube tutos but nothing help me..
Thank you by advance !
For the ones landing here... I found
https://pypi.org/project/asynctelnet/
quick exmple:
client:
import anyio, asynctelnet
async def shell(tcp):
async with asynctelnet.TelnetClient(tcp, client=True) as stream:
while True:
# read stream until '?' mark is found
outp = await stream.receive(1024)
if not outp:
# End of File
break
elif '?' in outp:
# reply all questions with 'y'.
await stream.send('y')
# display all server output
print(outp, flush=True)
# EOF
print()
async def main():
async with await connect_tcp('localhost', 56023) as client:
await shell(client)
anyio.run(main)
Server:
import anyio, asynctelnet
async def shell(tcp):
async with asynctelnet.TelnetServer(tcp) as stream:
# this will fail if no charset has been negotiated
await stream.send('\r\nWould you like to play a game? ')
inp = await reader.receive(1)
if inp:
await stream.echo(inp)
await stream.send('\r\nThey say the only way to win '
'is to not play at all.\r\n')
async def main():
listener = await anyio.create_tcp_listener(local_port=56023)
await listener.serve(shell)
anyio.run(main)
Still this lib has some bugs. so the current state is "be prepared to code some bug workarounds if you want to use it".

List returning 0 when I use extend (multiprocessing pool)

I'm trying to do a proxy checker with multiprocessing pool, and I'm getting 0 from a variable that I have to save the proxies working and the proxies that don't work but it just return 0 in both, I'm on python 3.5 debian9.6, the file has 200 lines (one proxy for each line)
#!usr/bin/env python3
from multiprocessing import Pool
import requests
import time
import sys
if (sys.version_info > (3, 0)):
pass
else:
print("This program was written for python 3")
exit()
class ProxyChecker():
def __init__(self, proxy_list_file):
self.proxy_list = proxy_list_file
self.working = []
self.not_working = []
self.time_elapsed = 0
def start_pool_to_check_proxies(self):
start_time = time.time()
with Pool(processes=200) as p:
p.map(self.check_proxies, self.proxy_list)
self.time_elapsed = time.time() - start_time
print("Number of working proxies = " + str(len(self.working)))
print("Number of proxies that don't work = " \
+ str(len(self.not_working)))
print("Number of proxies that have been checked = " + \
str(len(self.proxy_list)))
print("Time elapsed while cheking " + str(len(self.proxy_list) \
+ self.time_elapsed))
def check_proxies(self, proxy):
try:
response = requests.get(
'http://google.com',
proxies={'http': 'http://' + proxy},
timeout=25
)
print('Checking ' + proxy + " ...")
self.working.extend(str(proxy))
except Exception as e:
print("Something went wrong")
self.not_working.extend(str(proxy))
"""else:
if response.status_code == 200:
self.working.extend(proxy)
print(self.working)
else:
self.not_working.extend(proxy)"""
def main():
try:
with open("proxies.txt", 'r') as f:
proxies = f.read().split('\n')
except IOError:
print('Error opening the file')
print('Check the name of the file')
else:
# with open("proxies.txt", 'a') as f:
# f.write("*************Working Proxies*************")
checker = ProxyChecker(proxies)
checker.start_pool_to_check_proxies()
if __name__ == '__main__':
main()
As I said the idea is to save in a list how many proxies works (and which ones) but it just return 0 and the proxy_list return the proxies right.
If anyone could help me I would be so pleased.
Happy new year!!

How to find why thread is suspended when using multiprocessing or bypass that?

I use feedparser to get rss feeds from some sites, my core code is like this:
def parseworker(procnum, result_queue, return_dict, source_link):
try:
data = feedparser.parse(source_link)
return_dict[procnum] = data
except Exception as e:
print(str(e))
result_queue.put(source_link + 'grabbed')
def infoworker(procnum, timeout, result_queue, source_name, source_link):
text = 'recheck ' + source_name + ': ' + '...'
progress = ''
for x in range(timeout):
progress += '.'
sys.stdout.write('\r' + text + progress)
sys.stdout.flush()
time.sleep(1)
result_queue.put('time out')
def parsecaller(link, timeout, timestocheck):
return_dict = multiprocessing.Manager().dict()
result_queue = multiprocessing.Queue()
counter = 1
jobs = []
result = []
while not (counter > timestocheck):
p1 = multiprocessing.Process(target=infoworker, args=(11, timeout, result_queue, source_name, link))
p2 = multiprocessing.Process(target=parseworker, args=(22, result_queue, return_dict, link))
jobs.append(p1)
jobs.append(p2)
p1.start()
p2.start()
result_queue.get()
p1.terminate()
p2.terminate()
p1.join()
p2.join()
result = return_dict.values()
if not result or result[0].bozo:
print(' bad - no data', flush=True)
result = -1
else:
print(' ok ', flush=True)
result = result[0]
break
counter += 1
if result == -1:
raise bot_exceptions.ParserExceptionData()
elif result == -2:
raise bot_exceptions.ParserExceptionConnection()
else:
return result
if __name__ == '__main__':
multiprocessing.freeze_support()
multiprocessing.set_start_method('spawn')
try:
data = parsecaller(source_link, timeout=wait_time, timestocheck=check_times)
except Exception as e:
print(str(e))
continue
It works good, but after some random time goes into suspended state and does nothing - like infinite bootloop. It may suspend after 4 hours or 3 days, that's random.
I try to solve that problem by multiprocessing: use main process with timer like infoworker. When infoworker stops, it will put "result" to queue and by that will call result_queue.get() in parsecaller which after continues it and terminates both processes. But it does not work. Today, after 11 hours I got my code in suspended state in multiprocessing managers.py:
def serve_forever(self):
'''
Run the server forever
'''
self.stop_event = threading.Event()
process.current_process()._manager_server = self
try:
accepter = threading.Thread(target=self.accepter)
accepter.daemon = True
accepter.start()
try:
while not self.stop_event.is_set():
self.stop_event.wait(1)
except (KeyboardInterrupt, SystemExit):
pass
finally:
if sys.stdout != sys.__stdout__: # what about stderr?
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
sys.exit(0)
for all time it was in:
while not self.stop_event.is_set():
self.stop_event.wait(1)
I thing that somewhere or GIL does not allow any other threads to work in processes or feedparser goes into loop. And of course it gets suspended with any random RSS sources.
My 'environment':
Mac OS 10.12.6 (also was that situation on win7 and win 10)
Python 3.7.0 (also wat that situation on 3.6.2, 3.6.5)
Pycharm 2017.2.2
My questions:
How to understand why it gets suspended (what to do, any recipe)?
How to bypass that state (what to do, any recipe)?

cx_freeze using sys.stdout.flush() and multiprocessing

I am using python 3.4.2 with cx_freeze 4.3.4 (all 64 bit)
the program I have created works fine under python but when frozen, it starts giving me problems with
sys.stdout.flush()
AttributeError:'TypeNone' object has not attribute'flush'
using methods recommended on here, I have managed to reduce the problem to a Traceback message which flashes on the screen for a few seconds before disappearing. how can I resolve this issue.
Windows Error Screen shot
The stdout.flush is only called when it meets the BREAK command in the multiprocessing section of the code.
Any suggestions to either suppress/redirect the error to my log file or help resolve the source of the problem would be greatly appreciated.
Karl
class vDiags(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
tk.Tk.wm_title(self, "Diagnostics")
do stuff ............
start_job()
def pinger(job_q, mdic, ping, result_q):
devnull = open(os.devnull, 'w')
logger.info("Starting Pinger")
while True:
ip = job_q.get()
if ip is None:
logger.info("No IP address, finishing early")
break
test_result = {}
try:
if is_valid_ipv4_address(ip) is True:
do more stuff.........
def start_job():
logger.info("Starting start_Job")
pool_size = Variables.poll_size
logger.info("Pool size %s" % pool_size)
jobs = multiprocessing.Queue()
logger.info("Jobs %s" % jobs)
results = multiprocessing.Queue()
logger.info("results %s" % results)
manager = multiprocessing.Manager()
logger.info("manager %s" % manager)
manager_test_dict = manager.dict()
logger.info("manager_test_dict %s" % manager_test_dict)
for key, val in Variables.test_dic.items():
manager_test_dict[key] = val
pool = [multiprocessing.Process(target=pinger, args=(jobs, manager_test_dict, Variables.ping, results))
for i in range(pool_size)
]
for p in pool:
logger.info("p in pool %s" % p)
p.start()
for i in Variables.source_ip:
logger.info("Source IP:> %s" % i)
jobs.put(i)
for p in pool:
logger.info("p in pool (jobs) %s" % p)
jobs.put(None)
for p in pool:
logger.info("p in pool (join) %s" % p)
p.join()
logger.info("Move Results to new Variable")
logger.info(results.qsize())
while not results.empty():
Variables.test_result_raw = updatetree(Variables.test_result_raw, results.get())
logger.info("Finished start_Job")
class fakestd(object):
def write(self, string):
logger.info("write %s" %string)
pass
def flush(self):
logger.info("Flush %s " % self )
pass
if __name__ == '__main__':
# ********** Main App **********
sys.stdout = fakestd()
sys.stderr = fakestd()
multiprocessing.freeze_support()
logger.info("tkinter Installed Version %s" % tk.TkVersion)
app = vDiags()
app.geometry("1280x720")
app.mainloop()
# ********** Main App **********
I am hitting this issue right now (just migrated to multiprocessing from threading)
It appears it is a questionable bug in the multiprocessing module & the multiprocessing.freeze_support() call.
https://bugs.python.org/issue20607
There are reports it is still present in py35 but that stands a chance to be fixed at the py source level.
From a py34 point of view the ONE file I needed to update was:
C:\Python34\Lib\multiprocessing\process.py
diff -u process_orig.py process.py
--- process_orig.py 2016-12-12 12:42:01.568186800 +0000
+++ process.py 2016-12-12 12:37:28.971929900 +0000
## -270,8 +270,14 ##
traceback.print_exc()
finally:
util.info('process exiting with exitcode %d' % exitcode)
- sys.stdout.flush()
- sys.stderr.flush()
+# ---------- JRB modify
+ #sys.stdout.flush() # jrb edit to fix cx_freeze
+ #sys.stderr.flush() # jrb edit to fix cx_freeze
+ if sys.stdout is not None:
+ sys.stdout.flush()
+ if sys.stderr is not None:
+ sys.stderr.flush()
+# ---------- JRB modify
return exitcode

Resources