Python Django LineBot Error 54 'Connection reset by peer' - python-3.x

I'm trying to crete linebot through python Django, I want to send some messages and let it scrape the website. Since there is form on the website, I use post request to send the form.
Although I scrape the data successfully, there is error message showed in the python. It seems linebot occupy post method in Django and I send another post request again. I'm not sure my understanding is correct. I can't find any solution about this. Could someone teach me how to fix it?
Exception happened during processing of request from ('127.0.0.1', 50246)
Traceback (most recent call last):
File "/usr/local/anaconda3/lib/python3.8/socketserver.py", line 650, in process_request_thread
self.finish_request(request, client_address)
File "/usr/local/anaconda3/lib/python3.8/socketserver.py", line 360, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/usr/local/anaconda3/lib/python3.8/socketserver.py", line 720, in __init__
self.handle()
File "/usr/local/anaconda3/lib/python3.8/site-packages/django/core/servers/basehttp.py", line 174, in handle
self.handle_one_request()
File "/usr/local/anaconda3/lib/python3.8/site-packages/django/core/servers/basehttp.py", line 182, in handle_one_request
self.raw_requestline = self.rfile.readline(65537)
File "/usr/local/anaconda3/lib/python3.8/socket.py", line 669, in readinto
return self._sock.recv_into(b)
ConnectionResetError: [Errno 54] Connection reset by peer
Below is my code, I receive some keyword and post request to website. Finally, reply to user
#csrf_exempt
def callback(request):
if request.method == 'POST':
signature = request.META['HTTP_X_LINE_SIGNATURE']
body = request.body.decode('utf-8')
content = "None"
try:
events = parser.parse(body, signature)
except InvalidSignatureError:
return HttpResponseForbidden()
except LineBotApiError:
return HttpResponseBadRequest()
for event in events:
if isinstance(event, MessageEvent):
msg = event.message.text.strip()
if msg.startswith('!'):
msg = msg.replace('!', '')
if msg == 'temp':
content = "HELP"
elif msg.startswith(' '):
content = 'Command not found'
elif ' ' in msg:
info = msg.split(' ')
if len(info) > 2:
content = 'Too many arguments'
else:
ID = info[1]
temp = temperature.TempReport(ID)
content = temp.scrape()
#content = 'Test'
else:
content = 'Unknown command'
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=content)
)
print('submit success')
else:
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=msg)
)
return HttpResponse()
else:
return HttpResponseBadRequest()
Here is scrape code
def scrape(self):
if not self.postToServer():
return f'ID:{self.ID} submit temperature fail!'
return f'ID:{self.ID} submit temperature successfully
def postToServer(self):
self.session.post(self.url_in, data=self.payload)
sleep(0.1)
resp = self.session.get(self.url_out)
sleep(0.1)
soup = BeautifulSoup(resp.text, features='lxml')
result = soup.find(class_='title-text').text.strip()
return 0 if not 'completed' in result else
The post request work fine and the error happened when I return HttpResponse in callback function. I don't know what is the issue here...

Related

Reddit and Twitter bot in Python using PRAW

I am a beginner in Python, and trying out making a bot which automatically Tweets anything which is posted on a Subreddit that I have made.
I took help from some of the tutorials online which has the following code
import praw
import json
import requests
import tweepy
import time
access_token = '************************************'
access_token_secret = '************************************'
consumer_key = '************************************'
consumer_secret = '************************************'
def strip_title(title):
if len(title) == 94:
return title
else:
return title[:93] + "..."
def tweet_creator(subreddit_info):
post_dict = {}
post_ids = []
print("[bot] Getting posts from Reddit")
for submission in subreddit_info.get_hot(limit=20):
post_dict[strip_title(submission.title)] = submission.url
post_ids.append(submission.id)
print("[bot] Generating short link using goo.gl")
mini_post_dict = {}
for post in post_dict:
post_title = post
post_link = post_dict[post]
short_link = shorten(post_link)
mini_post_dict[post_title] = short_link
return mini_post_dict, post_ids
def setup_connection_reddit(subreddit):
print("[bot] setting up connection with Reddit")
r = praw.Reddit(' %s' %(subreddit))
subreddit = r.get_subreddit(subreddit)
return subreddit
def shorten(url):
headers = {'content-type': 'application/json'}
payload = {"longUrl": url}
url = "https://www.googleapis.com/urlshortener/v1/url"
r = requests.post(url, data=json.dumps(payload), headers=headers)
link = json.loads(r.text)['id']
return link
def duplicate_check(id):
found = 0
with open('posted_posts.txt', 'r') as file:
for line in file:
if id in line:
found = 1
return found
def add_id_to_file(id):
with open('posted_posts.txt', 'a') as file:
file.write(str(id) + "\n")
def main():
subreddit = setup_connection_reddit('*Name of the subreddit*')
post_dict, post_ids = tweet_creator(subreddit)
tweeter(post_dict, post_ids)
def tweeter(post_dict, post_ids):
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
for post, post_id in zip(post_dict, post_ids):
found = duplicate_check(post_id)
if found == 0:
print("[bot] Posting this link on twitter")
print(post + " " + post_dict[post] + " #Python #reddit #bot")
api.update_status(post+" "+post_dict[post]+" #Python #reddit #bot")
add_id_to_file(post_id)
time.sleep(30)
else:
print("[bot] Already posted")
if __name__ == '__main__':
main()
The code seems fine in PyCharm, however I am getting the following error when I try to run it directly from the folder via Terminal using the rolling code, reddit_bot2.py is my file name:
python3 reddit_bot2.py
When I try to run the code I am getting the following error:
mahesh#Maheshs-MacBook-Air Atoms % python3 reddit_bot2.py
[bot] setting up connection with Reddit
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/configparser.py", line 846, in items
d.update(self._sections[section])
KeyError: '**Name of the subreddit to fetch posts from**'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/mahesh/Python_Bot/Atoms/reddit_bot2.py", line 82, in <module>
main()
File "/Users/mahesh/Python_Bot/Atoms/reddit_bot2.py", line 62, in main
subreddit = setup_connection_reddit('Bot167')
File "/Users/mahesh/Python_Bot/Atoms/reddit_bot2.py", line 36, in setup_connection_reddit
r = praw.Reddit(' %s' %(subreddit))
File "/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/praw/reddit.py", line 227, in __init__
self.config = Config(
File "/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/praw/config.py", line 85, in __init__
self.custom = dict(Config.CONFIG.items(site_name), **settings)
File "/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/configparser.py", line 849, in items
raise NoSectionError(section)
configparser.NoSectionError: No section: ' Bot167'
You provided the name of a praw.ini configuration which does not exist.
For help with creating a Reddit instance, visit
https://praw.readthedocs.io/en/latest/code_overview/reddit_instance.html
For help on configuring PRAW, visit
https://praw.readthedocs.io/en/latest/getting_started/configuration.html
Any help in this regards would be highly appreciated.
Thanks :)

OS Error while doing requests for images with multiple threads in python

I'm making a program that gets info from a website about games, among that info, images, since i'm trying to download info of all games on that website, using a single thread with a 1Mbps connection would be very painful, so i decided to take action against this issue and programmed to spawn a thread for each letter of the alphabet that a game starts with, (games can be filtered by such). So, inside the function that downloads the corresponding image to certain game, while i have more than one thread, at some point in execution (sooner or later) an error is raised, then inside the except block that handles it, another exception is raised, and so on, over and over... this immediately causes threads to come to an end, but the fact is that, when i'm left with only a petty single thread to rely on, that thread goes on very well without giving any trouble.
Question:
How to solve this, and why is it happening?
Deduction:
I think that, when multiple threads get to requests.get line inside the download_image function (the function where the very problem must lie), maybe it fails because of multiple requests... that is as far as i can try to guess.
I really don't have the least idea of how to solve this, that being said, i would appreciate any help, thanks in advance.
I got rid of all the functions not having to do anything with the problem described above.
I spawn the threads at program's end, and each thread target function is named get_all_games_from_letter.
CODE
from bs4 import BeautifulSoup
from string import ascii_lowercase
from datetime import date
from vandal_constants import *
from PIL import Image
from requests.exceptions import ConnectionError
from exceptions import NoTitleException
from validator_collection import url as url_check
from rawgpy import RAWG
from io import BytesIO
import traceback
import requests
import threading
import sqlite3
import concurrent.futures
### GLOBALS #####
FROM_RAWG = False
INSERT_SQL = ''
# CONSTANTS ########
rawg = RAWG('A Collector')
#################
def download_image(tag=None, game=None, rawg_game=None):
if tag:
return sqlite3.Binary(requests.get(url).content) if (url := tag['data-src']) else None
elif game:
global FROM_RAWG
img_tag = game.select_one(IMG_TAG_SELECTOR)
if img_tag and img_tag.get('data-src', None):
try:
if url_check(img_tag['data-src']):
return sqlite3.Binary(requests.get(img_tag['data-src']).content)
print(f"{img_tag['data-src']} is NOT a valid url")
except ConnectionError:
try:
print('Error While downloading from "Vandal.elespannol.com" website:')
traceback.print_exc()
except Exception:
print('Another Exception Ocurred')
traceback.print_exc()
except OSError:
print('Error en el Handshake parece')
traceback.print_exc()
FROM_RAWG = True
if rawg_game and getattr(rawg_game, 'background_image', None):
try:
print('Continue to download from RAWG')
return sqlite3.Binary(requests.get(rawg_game.background_image).content)
except ConnectionError:
print('Error While downloading from RAWG:')
traceback.print_exc()
return None
def prepare_game_record(game, db_games_set):
global INSERT_SQL
title = getattr(game.select_one(TITLE_TAG_SELECTOR), 'text', None)
if not title:
raise NoTitleException()
if title in db_games_set:
print(f'Already Have {title} in database')
return None
description = game.select_one(DESCRIPTION_TAG_SELECTOR)
rawg_game = None
try:
rawg_game = rawg.search(title)[0]
except Exception as err:
print('No rawg')
traceback.print_exc()
game_data = {
'nombre': title,
'descripcion': description.text if description else rawg_game.description if rawg_game else '',
'genero': genres if (genres := translate_genres(game.select_one(GENRES_TAG_SELECTOR).contents[1].strip().split(' / '))) else '',
'fondo': resize_image(img) if (img := download_image(game=game, rawg_game=rawg_game)) and not FROM_RAWG else img,
'year': None,
}
if not INSERT_SQL:
INSERT_SQL = construct_sql_insert(**game_data)
if hasattr(rawg_game, 'released'):
game_data['year'] = date.fromisoformat(rawg_game.released).year
return tuple(game_data.values())
def get_all_games_from_letter(letter):
global FROM_RAWG
counter = 36
hashes_set = set()
with sqlite3.connect('/media/l0new0lf/LocalStorage/data.db') as connection:
cursor = connection.cursor()
cursor.execute(f'SELECT nombre FROM juegos where nombre like "{letter.upper()}%"')
db_games_set = []
for row in cursor:
db_games_set.append(row[0])
db_games_set = set(db_games_set)
while True:
try:
prepared_games = []
rq = requests.get(
f'https://vandal.elespanol.com/juegos/13/pc/letra/{letter}/inicio/{counter}')
if rq:
print('Request GET: from ' +
f'https://vandal.elespanol.com/juegos/13/pc/letra/{letter}/inicio/{counter}' + ' Got Workable HTML !')
else:
print('Request GET: from ' +
f'https://vandal.elespanol.com/juegos/13/pc/letra/{letter}/inicio/{counter}' + ' Not Working !!, getting next page!')
continue
if rq.status_code == 301 or rq.status_code == 302 or rq.status_code == 303 or rq.status_code == 304:
print(f'No more games in letter {letter}\n**REDIRECTING TO **')
break
counter += 1
soup = BeautifulSoup(rq.content, 'lxml')
main_table = soup.select_one(GAME_SEARCH_RESULTS_TABLE_SELECTOR)
if hash(main_table.get_text()) not in hashes_set:
hashes_set.add(hash(main_table.get_text()))
else:
print('Repeated page ! I\'m done with this letter.')
break
game_tables = main_table.find_all(
'table', {'class': GAME_TABLES_CLASS})
print('entering game_tables loop')
for game in game_tables:
FROM_RAWG = False
try:
game_record = prepare_game_record(game, db_games_set)
except NoTitleException:
print('There is no title for this game, DISCARDING!')
continue
except Exception as err:
print('Unknown ERROR in prepare_games_record function')
traceback.print_exc()
continue
if not game_record:
continue
prepared_games.append(game_record)
print('Game successfully prepared !')
if prepared_games:
print(f'Thread, Writing to Database')
try:
cursor.executemany(INSERT_SQL, prepared_games)
connection.commit()
except Exception as err:
print(err)
print('done')
except Exception as err:
print('TRULY UNEXPECTED EXCEPTION')
print(err)
traceback.print_exc()
continue
#get_all_games_from_letter('c') You use a single thread?, no trouble at all!!
with concurrent.futures.ThreadPoolExecutor(len(ascii_lowercase)) as executor:
for letter in ascii_lowercase:
executor.submit(get_all_games_from_letter, letter)
Error Stack Trace:
Note: This is only part of the errors, but the rest is the very same.
Game successfully prepared !
Error While downloading from "Vandal.elespannol.com" website:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/urllib3/connectionpool.py", line 665, in urlopen
httplib_response = self._make_request(
File "/usr/lib/python3/dist-packages/urllib3/connectionpool.py", line 376, in _make_request
self._validate_conn(conn)
File "/usr/lib/python3/dist-packages/urllib3/connectionpool.py", line 996, in _validate_conn
conn.connect()
File "/usr/lib/python3/dist-packages/urllib3/connection.py", line 366, in connect
self.sock = ssl_wrap_socket(
File "/usr/lib/python3/dist-packages/urllib3/util/ssl_.py", line 370, in ssl_wrap_socket
return context.wrap_socket(sock, server_hostname=server_hostname)
File "/usr/lib/python3.8/ssl.py", line 500, in wrap_socket
return self.sslsocket_class._create(
File "/usr/lib/python3.8/ssl.py", line 1040, in _create
self.do_handshake()
File "/usr/lib/python3.8/ssl.py", line 1309, in do_handshake
self._sslobj.do_handshake()
OSError: [Errno 0] Error
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/requests/adapters.py", line 439, in send
resp = conn.urlopen(
File "/usr/lib/python3/dist-packages/urllib3/connectionpool.py", line 719, in urlopen
retries = retries.increment(
File "/usr/lib/python3/dist-packages/urllib3/util/retry.py", line 400, in increment
raise six.reraise(type(error), error, _stacktrace)
File "/usr/lib/python3/dist-packages/six.py", line 702, in reraise
raise value.with_traceback(tb)
File "/usr/lib/python3/dist-packages/urllib3/connectionpool.py", line 665, in urlopen
httplib_response = self._make_request(
File "/usr/lib/python3/dist-packages/urllib3/connectionpool.py", line 376, in _make_request
self._validate_conn(conn)
File "/usr/lib/python3/dist-packages/urllib3/connectionpool.py", line 996, in _validate_conn
conn.connect()
File "/usr/lib/python3/dist-packages/urllib3/connection.py", line 366, in connect
self.sock = ssl_wrap_socket(
File "/usr/lib/python3/dist-packages/urllib3/util/ssl_.py", line 370, in ssl_wrap_socket
return context.wrap_socket(sock, server_hostname=server_hostname)
File "/usr/lib/python3.8/ssl.py", line 500, in wrap_socket
return self.sslsocket_class._create(
File "/usr/lib/python3.8/ssl.py", line 1040, in _create
self.do_handshake()
File "/usr/lib/python3.8/ssl.py", line 1309, in do_handshake
self._sslobj.do_handshake()
urllib3.exceptions.ProtocolError: ('Connection aborted.', OSError(0, 'Error'))
To solve the problem what one would need is just add a global lock in order that when each thread tries to request.get an image, it has to ask in the first place if some thread is already using it. That is, downloading an image is restricted to just simultaneously one use for all threads
#######GLOBALS####
lock = threading.Lock() #Add this to globals variables
##################
def download_image(tag=None, game=None, rawg_game=None):
if tag:
return sqlite3.Binary(requests.get(url).content) if (url := tag['data-src']) else None
elif game:
global FROM_RAWG
img_tag = game.select_one(IMG_TAG_SELECTOR)
if img_tag and img_tag.get('data-src', None):
try:
if url_check(img_tag['data-src']):
lock.acquire() #acquire the lock for downloading (it means other threads must wait until the one that acquired finishes)
temp = sqlite3.Binary(requests.get(img_tag['data-src']).content)
lock.release() # release the lock when done with receiving the HttpResponse
return temp
print(f"{img_tag['data-src']} is NOT a valid url")
except ConnectionError:
try:
print('Error While downloading from "Vandal.elespannol.com" website:')
traceback.print_exc()
except Exception:
print('Another Exception Ocurred')
traceback.print_exc()
except OSError:
print('Error en el Handshake parece')
traceback.print_exc()
FROM_RAWG = True
if rawg_game and getattr(rawg_game, 'background_image', None):
try:
print('Continue to download from RAWG')
lock.acquire() #acquire the lock for downloading (it means other threads must wait until the one that acquired finishes)
temp = sqlite3.Binary(requests.get(rawg_game.background_image).content)
lock.release() # release the lock when done with
return temp
except ConnectionError:
print('Error While downloading from RAWG:')
traceback.print_exc()
return None
And done, no more troubles with downloading images in multiple threads.... but still... i don't actually know why i would need to make sure of that one request.get is made for all threads, i thought OS handles this issue by using queues or something.

Nonetype object has no attribute 'user'

global twitter_user_info
twitter_user_info=[]
def get_user_info(twitter_user):
"""
An example of using the query_user_info method
:param twitter_user: the twitter user to capture user data
:return: twitter_user_data: returns a dictionary of twitter user data
"""
user_info = query_user_info(user=twitter_user)
twitter_user_data = {}
twitter_user_data["user"] = user_info.user
twitter_user_data["fullname"] = user_info.full_name
twitter_user_data["location"] = user_info.location
twitter_user_data["blog"] = user_info.blog
twitter_user_data["date_joined"] = user_info.date_joined
twitter_user_data["id"] = user_info.id
twitter_user_data["num_tweets"] = user_info.tweets
twitter_user_data["following"] = user_info.following
twitter_user_data["followers"] = user_info.followers
twitter_user_data["likes"] = user_info.likes
twitter_user_data["lists"] = user_info.lists
return twitter_user_data
absd=[]
def main():
start = time.time()
csv = pd.read_csv('operationbandar_users.csv')
users = csv['username']
pool = Pool(4)
for user in pool.map(get_user_info,users):
twitter_user_info.append(user)
cols=['id','fullname','date_joined','location','blog', 'num_tweets','following','followers','likes','lists']
data_frame = pd.DataFrame(twitter_user_info, index=absd, columns=cols)
data_frame.index.name = "Users"
data_frame.sort_values(by="followers", ascending=False, inplace=True, kind='quicksort', na_position='last')
elapsed = time.time() - start
print(f"Elapsed time: {elapsed}")
display(data_frame)
This code is returning the following error:
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "/home/nrjkumar/anaconda3/envs/Scraping/lib/python3.7/multiprocessing/pool.py", line 121, in worker
result = (True, func(*args, *kwds))
File "/home/nrjkumar/anaconda3/envs/Scraping/lib/python3.7/multiprocessing/pool.py", line 44, in mapstar
return list(map(*args))
File "examples/get_twitter_user_data_1.py", line 43, in get_user_info
twitter_user_data["user"] = user_info.user
AttributeError: 'NoneType' object has no attribute 'user'
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "examples/get_twitter_user_data_1.py", line 89, in
main()
File "examples/get_twitter_user_data_1.py", line 66, in main
for user in pool.map(get_user_info,users):
File "/home/nrjkumar/anaconda3/envs/Scraping/lib/python3.7/multiprocessing/pool.py", line 268, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "/home/nrjkumar/anaconda3/envs/Scraping/lib/python3.7/multiprocessing/pool.py", line 657, in get
raise self._value
AttributeError: 'NoneType' object has no attribute 'user'
I have searched here for possible cases with pool.map and passing dictionary as parameter but couldn't find the issue. I am a newbie to python. Can anybody help?
query_user_info()
def query_user_info(user):
"""
Returns the scraped user data from a twitter user page.
:param user: the twitter user to web scrape its twitter page info
"""
try:
user_info = query_user_page(INIT_URL_USER.format(u=user))
if user_info:
logger.info("Got user information from username {}".format(user))
return user_info
except KeyboardInterrupt:
logger.info("Program interrupted by user. Returning user information gathered so far...")
except BaseException:
logger.exception("An unknown error occurred! Returning user information gathered so far...")
logger.info("Got user information from username {}".format(user))
return user_info
query_user_page()
def query_user_page(url, retry=10, timeout=60):
"""
Returns the scraped user data from a twitter user page.
:param url: The URL to get the twitter user info from (url contains the user page)
:param retry: Number of retries if something goes wrong.
:return: Returns the scraped user data from a twitter user page.
"""
print("reached url:",url)
try:
proxy = next(proxy_pool)
logger.info('Using proxy {}'.format(proxy))
response = requests.get(url, headers=HEADER, proxies={"http": proxy})
html = response.text or ''
user_info = User.from_html(html)
if not user_info:
return None
return user_info
except requests.exceptions.HTTPError as e:
logger.exception('HTTPError {} while requesting "{}"'.format(
e, url))
except requests.exceptions.ConnectionError as e:
logger.exception('ConnectionError {} while requesting "{}"'.format(
e, url))
except requests.exceptions.Timeout as e:
logger.exception('TimeOut {} while requesting "{}"'.format(
e, url))
if retry > 0:
logger.info('Retrying... (Attempts left: {})'.format(retry))
return query_user_page(url, retry-1)
logger.error('Giving up.')
return None

Python - Multiprocessing Pool map returning can't pickle error

I have following code which creates a testrail client and executes testrail's GET_SUITES API call.
I have a function to call the GET_SUITES API and I am passing testrail client & test_rail_project_id as params
I am trying to use multiprocessing to execute over my list of projects to speed up things and I am can't pickle error
My code:
from itertools import product
def get_suites(client, project_id):
try:
path = 'get_suites/{projectid}'.format(projectid=project_id)
test_rail_response = client.send_get(path)
return test_rail_response
except Exception as e:
raise Exception(str(e))
if __name__ == "__main__":
testRailClient = APIClient(TESTRAIL_URL)
pool = Pool(2)
all_project_ids = [100, 200, 300]
data = pool.starmap(get_suites, product([testRailClient], all_project_ids))
Error stack:
Traceback (most recent call last):
File "main.py", line 57, in <module>
data = pool.starmap(testrailapi.get_suites, product([testRailClient], all_project_ids))
File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/multiprocessing/pool.py", line 274, in starmap
return self._map_async(func, iterable, starmapstar, chunksize).get()
File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/multiprocessing/pool.py", line 644, in get
raise self._value
File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/multiprocessing/pool.py", line 424, in _handle_tasks
put(task)
File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/multiprocessing/connection.py", line 206, in send
self._send_bytes(_ForkingPickler.dumps(obj))
File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/multiprocessing/reduction.py", line 51, in dumps
cls(buf, protocol).dump(obj)
TypeError: can't pickle SSLContext objects
Any suggestions please?
Thank you
PS: I am using Python3.6
UPDATE:
As suggested I tried removing the API client as a parameter and it worked but I am getting the same error when I have "get_suites" as a method. Please see my updated code below
class TestRailExecution:
def __init__(self, url, username, password):
self.url = url
self.username = username
self.password = password
self.client = APIClient(self.url)
self.client.user = username
self.client.password = password
def get_suites(self, project_id):
try:
path = 'get_suites/{projectid}'.format(projectid=project_id)
test_rail_response = self.client.send_get(path)
return test_rail_response
except Exception as e:
raise Exception(str(e))
if __name__ == "__main__":
testRailClient = TestRailExecution(TESTRAIL_URL, user, password)
pool = Pool(2)
data = pool.map(get_suites, [100, 200, 300])

ClientDisconnectedError from aiohttp

I have a websocket server built with aiohttp.
I keep getting this exception in the server error stream.
Task exception was never retrieved
future: <Task finished coro=<read() done, defined at /usr/local/lib/python3.4/dist-packages/aiohttp/streams.py:576> exception=ClientDisconnectedError()>
Traceback (most recent call last):
File "/usr/lib/python3.4/asyncio/tasks.py", line 234, in _step
result = coro.throw(exc)
File "/usr/local/lib/python3.4/dist-packages/aiohttp/streams.py", line 578, in read
result = yield from super().read()
File "/usr/local/lib/python3.4/dist-packages/aiohttp/streams.py", line 433, in read
yield from self._waiter
File "/usr/lib/python3.4/asyncio/futures.py", line 386, in __iter__
yield self # This tells Task to wait for completion.
File "/usr/lib/python3.4/asyncio/tasks.py", line 287, in _wakeup
value = future.result()
File "/usr/lib/python3.4/asyncio/futures.py", line 275, in result
raise self._exception
aiohttp.errors.ClientDisconnectedError
The client shows a message as:
Unclosed client session
client_session: <aiohttp.client.ClientSession object at 0x7f67ec0f0588>
The code in the handler is:
#asyncio.coroutine
def sync(self, request):
ws = web.WebSocketResponse()
yield from ws.prepare(request)
# while True:
msg = yield from ws.receive()
if msg.tp == aiohttp.MsgType.text:
payload = msg.data
pypayload = json.loads(payload)
result = {'result': {}}
for store in pypayload:
try:
sync_obj = yield from asyncio.async(self.prepare(store))
except (IndexError, TypeError, ValidationError) as exc:
yield from asyncio.async(self.handle_internal_error(exc, store))
else:
try:
sync_result, request_type = yield from asyncio.async(self.send_data(sync_obj))
except DuplicateMappingsFound as exc:
yield from asyncio.async(self.handle_internal_error(exc, store))
else:
if sync_result.status == 200 and request_type == 'post':
yield from asyncio.async(self.process_data(sync_result))
elif sync_result.status >= 400:
yield from asyncio.async(self.handle_error(sync_result, sync_obj))
result['result'].update(
{store['store_id']: sync_result.status}
)
yield from asyncio.async(sync_result.release())
ws.send_str(json.dumps(result))
elif msg.tp == aiohttp.MsgType.error:
print('ws connection closed with exception {0}'.format(ws.exception()))
yield from asyncio.async(ws.close())
print('websocket connection closed')
return ws
The client code is:
#asyncio.coroutine
def sync_store():
resp = yield from aiohttp.get('http://localhost/stores/search')
stores = yield from resp.json()
total_page = stores['page']['total_page']
page = stores['page']['current_page']
total_resp = []
ws_sockets = []
while True:
for page in range(page, total_page):
session = aiohttp.ClientSession()
ws = yield from asyncio.async(session.ws_connect('ws://localhost:8765/stores'))
ws_sockets.append(ws)
ws.send_str(json.dumps(stores['data']))
resp = yield from asyncio.async(ws.receive())
total_resp.append(resp.data)
# print(resp)
stores_resp = yield from asyncio.async(aiohttp.post('http://localhost/stores/search',
data=json.dumps({'page': page+1}),
headers={'content-type': 'application/json'}
))
stores = yield from asyncio.async(stores_resp.json())
while ws_sockets:
session = ws_sockets.pop(0)
msg = yield from session.receive()
if not(msg.tp == aiohttp.MsgType.close or msg.tp == aiohttp.MsgType.closed):
ws_sockets.append(session)
else:
print(ws_sockets)
break
print(total_resp)
What could be the problem with this?
I also tried enabling the debugging mode but that also doesn't seem to give any useful output.

Resources