I am using python 3.4.2 with cx_freeze 4.3.4 (all 64 bit)
the program I have created works fine under python but when frozen, it starts giving me problems with
sys.stdout.flush()
AttributeError:'TypeNone' object has not attribute'flush'
using methods recommended on here, I have managed to reduce the problem to a Traceback message which flashes on the screen for a few seconds before disappearing. how can I resolve this issue.
Windows Error Screen shot
The stdout.flush is only called when it meets the BREAK command in the multiprocessing section of the code.
Any suggestions to either suppress/redirect the error to my log file or help resolve the source of the problem would be greatly appreciated.
Karl
class vDiags(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
tk.Tk.wm_title(self, "Diagnostics")
do stuff ............
start_job()
def pinger(job_q, mdic, ping, result_q):
devnull = open(os.devnull, 'w')
logger.info("Starting Pinger")
while True:
ip = job_q.get()
if ip is None:
logger.info("No IP address, finishing early")
break
test_result = {}
try:
if is_valid_ipv4_address(ip) is True:
do more stuff.........
def start_job():
logger.info("Starting start_Job")
pool_size = Variables.poll_size
logger.info("Pool size %s" % pool_size)
jobs = multiprocessing.Queue()
logger.info("Jobs %s" % jobs)
results = multiprocessing.Queue()
logger.info("results %s" % results)
manager = multiprocessing.Manager()
logger.info("manager %s" % manager)
manager_test_dict = manager.dict()
logger.info("manager_test_dict %s" % manager_test_dict)
for key, val in Variables.test_dic.items():
manager_test_dict[key] = val
pool = [multiprocessing.Process(target=pinger, args=(jobs, manager_test_dict, Variables.ping, results))
for i in range(pool_size)
]
for p in pool:
logger.info("p in pool %s" % p)
p.start()
for i in Variables.source_ip:
logger.info("Source IP:> %s" % i)
jobs.put(i)
for p in pool:
logger.info("p in pool (jobs) %s" % p)
jobs.put(None)
for p in pool:
logger.info("p in pool (join) %s" % p)
p.join()
logger.info("Move Results to new Variable")
logger.info(results.qsize())
while not results.empty():
Variables.test_result_raw = updatetree(Variables.test_result_raw, results.get())
logger.info("Finished start_Job")
class fakestd(object):
def write(self, string):
logger.info("write %s" %string)
pass
def flush(self):
logger.info("Flush %s " % self )
pass
if __name__ == '__main__':
# ********** Main App **********
sys.stdout = fakestd()
sys.stderr = fakestd()
multiprocessing.freeze_support()
logger.info("tkinter Installed Version %s" % tk.TkVersion)
app = vDiags()
app.geometry("1280x720")
app.mainloop()
# ********** Main App **********
I am hitting this issue right now (just migrated to multiprocessing from threading)
It appears it is a questionable bug in the multiprocessing module & the multiprocessing.freeze_support() call.
https://bugs.python.org/issue20607
There are reports it is still present in py35 but that stands a chance to be fixed at the py source level.
From a py34 point of view the ONE file I needed to update was:
C:\Python34\Lib\multiprocessing\process.py
diff -u process_orig.py process.py
--- process_orig.py 2016-12-12 12:42:01.568186800 +0000
+++ process.py 2016-12-12 12:37:28.971929900 +0000
## -270,8 +270,14 ##
traceback.print_exc()
finally:
util.info('process exiting with exitcode %d' % exitcode)
- sys.stdout.flush()
- sys.stderr.flush()
+# ---------- JRB modify
+ #sys.stdout.flush() # jrb edit to fix cx_freeze
+ #sys.stderr.flush() # jrb edit to fix cx_freeze
+ if sys.stdout is not None:
+ sys.stdout.flush()
+ if sys.stderr is not None:
+ sys.stderr.flush()
+# ---------- JRB modify
return exitcode
Related
I have a function with two parameters
reqs =[1223,1456,1243,20455]
url = "pass a url"
def crawl(i,url):
print("%s is %s" % (i, url))
I want to trigger above function by multi processing concept.
from multiprocessing import Pool
if __name__ == '__main__':
p = Pool(5)
print(p.map([crawl(i,url) for i in reqs]))
above code is not working for me. can anyone please help me on this!
----- ADDING NEW CODE ---------
from multiprocessing import Pool
reqs = [1223,1456,1243,20455]
url = "pass a url"
def crawl(combined_args):
print("%s is %s" % (combined_args[0], combined_args[1]))
def main():
p = Pool(5)
print(p.map(crawl, [(i,url) for i in reqs]))
if __name__ == '__main__':
main()
when I am trying to execute above code, I am getting below error
According to the multiprocessing.Pool.map this is the function argument line:
map(func, iterable[, chunksize])
You are trying to pass to the map a iterator instead of (func, iterable).
Please refer to the following example of multiprocessing.pool (source):
import time
from multiprocessing import Pool
work = (["A", 5], ["B", 2], ["C", 1], ["D", 3])
def work_log(work_data):
print(" Process %s waiting %s seconds" % (work_data[0], work_data[1]))
time.sleep(int(work_data[1]))
print(" Process %s Finished." % work_data[0])
def pool_handler():
p = Pool(2)
p.map(work_log, work)
if __name__ == '__main__':
pool_handler()
Please note that he is passing one argument to the work_log function and in the function he use the index to get to the relevant fields.
Refering to your example:
from multiprocessing import Pool
reqs = [1223,1456,1243,20455]
url = "pass a url"
def crawl(combined_args):
print("%s is %s" % (combined_args[0], combined_args[1]))
def main():
p = Pool(5)
print(p.map(crawl, [(i,url) for i in reqs]))
if __name__ == '__main__':
main()
Results with:
1223 is pass a url
1456 is pass a url
1243 is pass a url
20455 is pass a url
[None, None, None, None] # This is the output of the map function
Issue resolved. crawl function should in separate module like below:
crawler.py
def crawl(combined_args):
print("%s is %s" % (combined_args[0], combined_args[1]))
run.py
from multiprocessing import Pool
import crawler
def main():
p = Pool(5)
print(p.map(crawler.crawl, [(i,url) for i in reqs]))
if __name__ == '__main__':
main()
Then output will be like below:
**output :**
1223 is pass a url
1456 is pass a url
1243 is pass a url
20455 is pass a url
[None, None, None, None] # This is the output of the map function
Could someone explain to me why my code doesn't print the expected output [Thread1 36] from the thread? I am currently using python3.7.0 on a mac OS Catalina 10.15.2
Here is my code:
import timeit, _thread
def expmod1(a, n, m):
return (a**n)%m
# avg_time is a higher order function
# argument is the different variations of the expmod(135, 202, 53) function
def avg_time(thread_name, expmod_function):
print("Start")
result = expmod_function(135, 202, 53)
print(thread_name + " " + str(result), flush=True)
return result
# analysis of all three functions based on average timecost using a constant function as defined by avg_time
def analysis1():
try:
_thread.start_new_thread(avg_time, ("Thread1", expmod1))
except:
print("Unable to start thread")
def main():
analysis1()
if __name__ == "__main__":
main()
I'm trying to do a proxy checker with multiprocessing pool, and I'm getting 0 from a variable that I have to save the proxies working and the proxies that don't work but it just return 0 in both, I'm on python 3.5 debian9.6, the file has 200 lines (one proxy for each line)
#!usr/bin/env python3
from multiprocessing import Pool
import requests
import time
import sys
if (sys.version_info > (3, 0)):
pass
else:
print("This program was written for python 3")
exit()
class ProxyChecker():
def __init__(self, proxy_list_file):
self.proxy_list = proxy_list_file
self.working = []
self.not_working = []
self.time_elapsed = 0
def start_pool_to_check_proxies(self):
start_time = time.time()
with Pool(processes=200) as p:
p.map(self.check_proxies, self.proxy_list)
self.time_elapsed = time.time() - start_time
print("Number of working proxies = " + str(len(self.working)))
print("Number of proxies that don't work = " \
+ str(len(self.not_working)))
print("Number of proxies that have been checked = " + \
str(len(self.proxy_list)))
print("Time elapsed while cheking " + str(len(self.proxy_list) \
+ self.time_elapsed))
def check_proxies(self, proxy):
try:
response = requests.get(
'http://google.com',
proxies={'http': 'http://' + proxy},
timeout=25
)
print('Checking ' + proxy + " ...")
self.working.extend(str(proxy))
except Exception as e:
print("Something went wrong")
self.not_working.extend(str(proxy))
"""else:
if response.status_code == 200:
self.working.extend(proxy)
print(self.working)
else:
self.not_working.extend(proxy)"""
def main():
try:
with open("proxies.txt", 'r') as f:
proxies = f.read().split('\n')
except IOError:
print('Error opening the file')
print('Check the name of the file')
else:
# with open("proxies.txt", 'a') as f:
# f.write("*************Working Proxies*************")
checker = ProxyChecker(proxies)
checker.start_pool_to_check_proxies()
if __name__ == '__main__':
main()
As I said the idea is to save in a list how many proxies works (and which ones) but it just return 0 and the proxy_list return the proxies right.
If anyone could help me I would be so pleased.
Happy new year!!
I am making a python script that initially created threads and used them to brute force port 22 on my local machine using a wordlist, part of an infosec project.
I had issues when there were too many threads and I wanted to kill them off elegantly and exit the program and so I started to look at multiprocessing instead based this post's answer by by user cfi.
The problem I have is that when I run the program I am getting the below error.
python3 ssh_brute.py
[*] Pass not found
Traceback (most recent call last):
File "/Users/richardcurteis/anaconda3/lib/python3.7/multiprocessing/queues.py", line 236, in _feed
obj = _ForkingPickler.dumps(obj)
File "/Users/richardcurteis/anaconda3/lib/python3.7/multiprocessing/reduction.py", line 51, in dumps
cls(buf, protocol).dump(obj)
File "/Users/richardcurteis/anaconda3/lib/python3.7/multiprocessing/process.py", line 330, in __reduce__
'Pickling an AuthenticationString object is '
TypeError: Pickling an AuthenticationString object is disallowed for security reasons
I assume I am doing something wrong with the multiprocessing API but I am not sure what exactly. I have looked at the docs and I believe I am more or less on track.
What am I missing?
Code:
import paramiko
from multiprocessing import Queue, Process
TARGET_IP = 'localhost'
USERNAME = 'richardcurteis'
WORDLIST = 'test2.txt'
MAX_THREADS = 10
processes = []
found = []
q = Queue()
def ssh_connect(target_ip, username, password):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy)
try:
ssh.connect(target_ip, username=username, password=password)
found.append(password)
q.put(password)
except paramiko.ssh_exception.AuthenticationException:
print("[*] Failed: ", password)
return False
finally:
ssh.close()
return True
def close_threads(abort=False):
for p in processes:
p.join()
if abort:
for x in processes:
x.terminate()
processes.clear()
def main():
with open(WORDLIST) as input_handle:
process_count = 0
for line in input_handle:
try:
password = line.rstrip()
p = Process(target=ssh_connect, args=[TARGET_IP, USERNAME, password])
processes.append(p)
p.start()
q.put(p)
process_count += 1
if not q.empty():
break
if process_count >= MAX_THREADS:
close_threads()
process_count = 0
except KeyboardInterrupt:
print("[!] Interrupted by user")
break
except (ConnectionResetError, paramiko.ssh_exception.SSHException):
print("[X] Connection reset by target. Reduce thread count")
break
close_threads()
if len(found) > 0:
for c in found:
print("[!] Found: ", c)
else:
print("[*] Pass not found")
if __name__ == '__main__':
main()
Python 3.6.5
This is my code for listing directory in svn. It works for most directories quite well, but with one path line error = proc.stderr.readlines() hangs forever (100% repeatable). Any ideas why or workaround on that? From terminal command works
class MyRemoteSvnClient(object):
def __init__(self, url):
self.url = url
def list(self, rel_path=None, retries=5):
url = self.url if self.url.endswith('/') else self.url + '/'
if rel_path:
url = '{}{}'.format(url, rel_path)
# print(url)
retries = retries + 1
for i in range(1, retries):
proc = Popen(['svn', 'ls', url], shell=True, stdout=PIPE,
stderr=PIPE, universal_newlines=True)
error = proc.stderr.readlines()
if i == retries - 1:
raise SvnException(error)
if error:
logger.warning('svn error occurred, retrying {}/{}'.format(i, retries - 1))
sleep(1)
continue
while True:
output = proc.stdout.readline().strip()
if proc.poll() is not None:
break
if output:
yield output
break