python-trio: AttributeError: sendall - python-3.5

I'm just trying to run the echo-client-low-level.py from the python-trio docs:
# echo-client-low-level.py
import sys
import trio
# arbitrary, but:
# - must be in between 1024 and 65535
# - can't be in use by some other program on your computer
# - must match what we set in our echo server
PORT = 12345
# How much memory to spend (at most) on each call to recv. Pretty arbitrary,
# but shouldn't be too big or too small.
BUFSIZE = 16384
async def sender(client_sock):
print("sender: started!")
while True:
data = b"async can sometimes be confusing, but I believe in you!"
print("sender: sending {!r}".format(data))
await client_sock.sendall(data)
await trio.sleep(1)
async def receiver(client_sock):
print("receiver: started!")
while True:
data = await client_sock.recv(BUFSIZE)
print("receiver: got data {!r}".format(data))
if not data:
print("receiver: connection closed")
sys.exit()
async def parent():
print("parent: connecting to 127.0.0.1:{}".format(PORT))
with trio.socket.socket() as client_sock:
await client_sock.connect(("127.0.0.1", PORT))
async with trio.open_nursery() as nursery:
print("parent: spawning sender...")
nursery.start_soon(sender, client_sock)
print("parent: spawning receiver...")
nursery.start_soon(receiver, client_sock)
trio.run(parent)
However their example produce a nasty AttributeError: sendall error:
$ python echo-client-low-level.py
parent: connecting to 127.0.0.1:12345
parent: spawning sender...
parent: spawning receiver...
sender: started!
sender: sending b'async can sometimes be confusing, but I believe in you!'
receiver: started!
Traceback (most recent call last):
File "echo-client-low-level.py", line 43, in <module>
trio.run(parent)
File "/usr/lib/python3.6/site-packages/trio/_core/_run.py", line 1225, in run
return result.unwrap()
File "/usr/lib/python3.6/site-packages/trio/_core/_result.py", line 119, in unwrap
raise self.error
File "/usr/lib/python3.6/site-packages/trio/_core/_run.py", line 1334, in run_impl
msg = task.coro.send(next_send)
File "/usr/lib/python3.6/site-packages/trio/_core/_run.py", line 923, in init
self.entry_queue.spawn()
File "/usr/lib/python3.6/site-packages/trio/_util.py", line 109, in __aexit__
await self._agen.asend(None)
File "/usr/lib/python3.6/site-packages/async_generator/_impl.py", line 274, in asend
return await self._do_it(self._it.send, value)
File "/usr/lib/python3.6/site-packages/async_generator/_impl.py", line 290, in _do_it
return await ANextIter(self._it, start_fn, *args)
File "/usr/lib/python3.6/site-packages/async_generator/_impl.py", line 202, in send
return self._invoke(self._it.send, value)
File "/usr/lib/python3.6/site-packages/async_generator/_impl.py", line 209, in _invoke
result = fn(*args)
File "/usr/lib/python3.6/site-packages/trio/_core/_run.py", line 318, in open_nursery
await nursery._nested_child_finished(nested_child_exc)
File "/usr/lib/python3.6/contextlib.py", line 99, in __exit__
self.gen.throw(type, value, traceback)
File "/usr/lib/python3.6/site-packages/trio/_core/_run.py", line 203, in open_cancel_scope
yield scope
File "/usr/lib/python3.6/site-packages/trio/_core/_multierror.py", line 144, in __exit__
raise filtered_exc
File "/usr/lib/python3.6/site-packages/trio/_core/_run.py", line 203, in open_cancel_scope
yield scope
File "/usr/lib/python3.6/site-packages/trio/_core/_run.py", line 318, in open_nursery
await nursery._nested_child_finished(nested_child_exc)
File "/usr/lib/python3.6/site-packages/trio/_core/_run.py", line 427, in _nested_child_finished
raise MultiError(self._pending_excs)
File "/usr/lib/python3.6/site-packages/trio/_core/_run.py", line 1334, in run_impl
msg = task.coro.send(next_send)
File "echo-client-low-level.py", line 41, in parent
nursery.start_soon(receiver, client_sock)
File "/usr/lib/python3.6/site-packages/trio/_util.py", line 109, in __aexit__
await self._agen.asend(None)
File "/usr/lib/python3.6/site-packages/async_generator/_impl.py", line 274, in asend
return await self._do_it(self._it.send, value)
File "/usr/lib/python3.6/site-packages/async_generator/_impl.py", line 290, in _do_it
return await ANextIter(self._it, start_fn, *args)
File "/usr/lib/python3.6/site-packages/async_generator/_impl.py", line 202, in send
return self._invoke(self._it.send, value)
File "/usr/lib/python3.6/site-packages/async_generator/_impl.py", line 209, in _invoke
result = fn(*args)
File "/usr/lib/python3.6/site-packages/trio/_core/_run.py", line 318, in open_nursery
await nursery._nested_child_finished(nested_child_exc)
File "/usr/lib/python3.6/contextlib.py", line 99, in __exit__
self.gen.throw(type, value, traceback)
File "/usr/lib/python3.6/site-packages/trio/_core/_run.py", line 203, in open_cancel_scope
yield scope
File "/usr/lib/python3.6/site-packages/trio/_core/_multierror.py", line 144, in __exit__
raise filtered_exc
File "/usr/lib/python3.6/site-packages/trio/_core/_run.py", line 203, in open_cancel_scope
yield scope
File "/usr/lib/python3.6/site-packages/trio/_core/_run.py", line 318, in open_nursery
await nursery._nested_child_finished(nested_child_exc)
File "/usr/lib/python3.6/site-packages/trio/_core/_run.py", line 427, in _nested_child_finished
raise MultiError(self._pending_excs)
File "/usr/lib/python3.6/site-packages/trio/_core/_run.py", line 1334, in run_impl
msg = task.coro.send(next_send)
File "echo-client-low-level.py", line 20, in sender
await client_sock.sendall(data)
File "/usr/lib/python3.6/site-packages/trio/_socket.py", line 426, in __getattr__
raise AttributeError(name)
AttributeError: sendall
After checking the GitHub it seems that sendall has been intentionally omitted.
I'm a little confused, did I miss something?
In [3]: trio.__version__
Out[3]: '0.3.0'

Whoops, that's a bug in the docs – thanks for the catch. There used to be a sendall method on sockets, but it had problems and was a weird feature half-way between the low-level and high-level layer, so it was removed in 0.3.0. But I missed updating the docs there.
I really need to rewrite that section to use the new high-level APIs! (Bug filed.) But for now here's a quick translation of the examples into the new (better, higher-level) API:
Client:
# echo-client.py
import sys
import trio
# arbitrary, but:
# - must be in between 1024 and 65535
# - can't be in use by some other program on your computer
# - must match what we set in our echo server
PORT = 12345
# How much memory to spend (at most) on each call to recv. Pretty arbitrary,
# but shouldn't be too big or too small.
BUFSIZE = 16384
async def sender(client_stream):
print("sender: started!")
while True:
data = b"async can sometimes be confusing, but I believe in you!"
print("sender: sending {!r}".format(data))
await client_stream.send_all(data)
await trio.sleep(1)
async def receiver(client_stream):
print("receiver: started!")
while True:
data = await client_stream.receive_some(BUFSIZE)
print("receiver: got data {!r}".format(data))
if not data:
print("receiver: connection closed")
sys.exit()
async def parent():
print("parent: connecting to 127.0.0.1:{}".format(PORT))
client_stream = await trio.open_tcp_stream("127.0.0.1", PORT)
async with client_stream:
async with trio.open_nursery() as nursery:
print("parent: spawning sender...")
nursery.start_soon(sender, client_stream)
print("parent: spawning receiver...")
nursery.start_soon(receiver, client_stream)
trio.run(parent)
Server
# echo-server.py
import trio
from itertools import count
# Port is arbitrary, but:
# - must be in between 1024 and 65535
# - can't be in use by some other program on your computer
# - must match what we set in our echo client
PORT = 12345
# How much memory to spend (at most) on each call to recv. Pretty arbitrary,
# but shouldn't be too big or too small.
BUFSIZE = 16384
CONNECTION_COUNTER = count()
async def echo_server(server_stream):
# Assign each connection a unique number to make our logging easier to
# understand
ident = next(CONNECTION_COUNTER)
print("echo_server {}: started".format(ident))
try:
while True:
data = await server_stream.receive_some(BUFSIZE)
print("echo_server {}: received data {!r}".format(ident, data))
if not data:
print("echo_server {}: connection closed".format(ident))
return
print("echo_server {}: sending data {!r}".format(ident, data))
await server_stream.send_all(data)
except Exception as exc:
# Unhandled exceptions will propagate into our parent and take
# down the whole program. If the exception is KeyboardInterrupt,
# that's what we want, but otherwise maybe not...
print("echo_server {}: crashed: {!r}".format(ident, exc))
async def main():
await trio.serve_tcp(echo_server, PORT)
# We could also just write 'trio.run(serve_tcp, echo_server, PORT)', but real
# programs almost always end up doing other stuff too and then we'd have to go
# back and factor it out into a separate function anyway. So it's simplest to
# just make it a standalone function from the beginning.
trio.run(main)
They won't quite match the text, sorry! But hopefully they'll give you some chance at figuring out what's going on, until I can fix the docs for real.

Related

Django/PostgreSQL parallel concurrent incrementing field value with transaction causes OperationalError

I have model related to some bunch. Each record in one bunch should have it's own unique version (in order of creation records in DB).
As for me, the best way to do this is using transactions, but I faced a problem in parallel execution of transaction blocks. When I remove transaction.atomic() block, everything works, but versions are not updated after execution.
I wrote some banch of code to test concurrent incremention version of record in database:
def _save_instance(instance):
time = random.randint(1, 50)
sleep(time/1000)
instance.text = str(time)
instance.save()
def _parallel():
instances = MyModel.objects.all()
# clear version
print('-- clear old numbers -- ')
instances.update(version=None)
processes = []
for instance in instances:
p = Process(target=_save_instance, args=(instance,))
processes.append(p)
print('-- launching -- ')
for p in processes:
p.start()
for p in processes:
p.join()
sleep(1)
...
# assertions to check if versions are correct in one bunch
print('parallel Ok!')
save() method in MyModel is defined like this:
...
def save(self, *args, **kwargs) -> None:
with transaction.atomic():
if not self.number and self.banch_id:
max_number = MyModel.objects.filter(
banch_id=self.banch_id
).aggregate(max_number=models.Max('version'))['max_number']
self.version = max_number + 1 if max_number else 1
super().save(*args, **kwargs)
When I run my test code on random amount of records (30-300), I get an error:
django.db.utils.OperationalError: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
After that every process stacks and I can stop script only with KeyboardInterrupt.
Full process stack trace:
Process Process-14:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/django/db/backends/utils.py", line 86, in _execute
return self.cursor.execute(sql, params)
psycopg2.OperationalError: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/usr/local/lib/python3.6/multiprocessing/process.py", line 93, in run
self._target(*self._args, **self._kwargs)
File "/app/scripts/test_concurrent_saving.py", line 17, in _save_instance
instance.save()
File "/app/apps/incident/models.py", line 385, in save
).aggregate(max_number=models.Max('version'))['max_number']
File "/usr/local/lib/python3.6/site-packages/django/db/models/query.py", line 384, in aggregate
return query.get_aggregation(self.db, kwargs)
File "/usr/local/lib/python3.6/site-packages/django/db/models/sql/query.py", line 503, in get_aggregation
result = compiler.execute_sql(SINGLE)
File "/usr/local/lib/python3.6/site-packages/django/db/models/sql/compiler.py", line 1152, in execute_sql
cursor.execute(sql, params)
File "/usr/local/lib/python3.6/site-packages/django/db/backends/utils.py", line 100, in execute
return super().execute(sql, params)
File "/usr/local/lib/python3.6/site-packages/raven/contrib/django/client.py", line 123, in execute
return real_execute(self, sql, params)
File "/usr/local/lib/python3.6/site-packages/django/db/backends/utils.py", line 68, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/usr/local/lib/python3.6/site-packages/django/db/backends/utils.py", line 77, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/usr/local/lib/python3.6/site-packages/django/db/backends/utils.py", line 86, in _execute
return self.cursor.execute(sql, params)
File "/usr/local/lib/python3.6/site-packages/django/db/utils.py", line 90, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/usr/local/lib/python3.6/site-packages/django/db/backends/utils.py", line 86, in _execute
return self.cursor.execute(sql, params)
django.db.utils.OperationalError: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
What is the reason of such behavior?
I will be gratefull for any help or advice!

Random connection errors with aio_pika after 2 days of running

I have asyncio script which connects to rabbitmq with aio_pika library every 40 seconds and checks if there are any messages and prints them out which then repeats forever. However, usually, after 2 or so days of running, I will start receiving endless connection exception errors which will only be solved by restarting the script. Perhaps there are some obvious mistakes in the logic of my asyncio script which I am missing?
#!/usr/bin/python3
import time
import async_timeout
import asyncio
import aio_pika
async def got_message(message: aio_pika.IncomingMessage):
with message.process():
print(message.body.decode())
async def main(loop):
try:
with async_timeout.timeout(10):
connection = await aio_pika.connect_robust(
host='#',
virtualhost='#',
login='#',
password='#',
port=5671,
loop=loop,
ssl=True
)
channel = await connection.channel()
await channel.set_qos(prefetch_count=100)
queue_name='mm_message'
queue = await channel.declare_queue(auto_delete=False, name=queue_name)
routing_key='mm_msg'
await queue.bind("amq.topic", routing_key)
que_len = queue.declaration_result.message_count
if(que_len > 0):
await queue.consume(got_message)
except:
print("connection problems..")
if __name__ == "__main__":
loop = asyncio.get_event_loop()
while(True):
time.sleep(40)
loop.run_until_complete(main(loop))
This is the error I endlessly receive after some time:
Traceback (most recent call last):
File "/usr/lib/python3.5/asyncio/events.py", line 125, in _run
self._callback(*self._args)
File "/usr/local/lib/python3.5/dist-packages/aio_pika/pika/adapters/base_connection.py", line 364, in _handle_events
self._handle_read()
File "/usr/local/lib/python3.5/dist-packages/aio_pika/pika/adapters/base_connection.py", line 415, in _handle_read
self._on_data_available(data)
File "/usr/local/lib/python3.5/dist-packages/aio_pika/pika/connection.py", line 1347, in _on_data_available
self._process_frame(frame_value)
File "/usr/local/lib/python3.5/dist-packages/aio_pika/pika/connection.py", line 1414, in _process_frame
if self._process_callbacks(frame_value):
File "/usr/local/lib/python3.5/dist-packages/aio_pika/pika/connection.py", line 1384, in _process_callbacks
frame_value) # Args
File "/usr/local/lib/python3.5/dist-packages/aio_pika/pika/callback.py", line 60, in wrapper
return function(*tuple(args), **kwargs)
File "/usr/local/lib/python3.5/dist-packages/aio_pika/pika/callback.py", line 92, in wrapper
return function(*args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/aio_pika/pika/callback.py", line 236, in process
callback(*args, **keywords)
File "/usr/local/lib/python3.5/dist-packages/aio_pika/pika/connection.py", line 1332, in _on_connection_tune
self._send_connection_open()
File "/usr/local/lib/python3.5/dist-packages/aio_pika/pika/connection.py", line 1517, in _send_connection_open
self._on_connection_open, [spec.Connection.OpenOk])
File "/usr/local/lib/python3.5/dist-packages/aio_pika/pika/connection.py", line 1501, in _rpc
self._send_method(channel_number, method_frame)
File "/usr/local/lib/python3.5/dist-packages/aio_pika/pika/connection.py", line 1569, in _send_method
self._send_frame(frame.Method(channel_number, method_frame))
File "/usr/local/lib/python3.5/dist-packages/aio_pika/pika/connection.py", line 1548, in _send_frame
raise exceptions.ConnectionClosed
aio_pika.pika.exceptions.ConnectionClosed
except:
print("connection problems..")
This will catch service Exceptions like KeyboardInterrupt, SystemExit. You should never do such thing if you're not going to reraise exception. At very lest you should write:
except Exception:
print("connection problems..")
However in context of asyncio snippet above will break mechanism of cancellation. To avoid it as explained here you should write:
try:
await operation
except asyncio.CancelledError:
raise
except Exception:
log.log('an error has occurred')
Not less important is to understand that connection should not only be opened, but closed also (regardless of what happened between opening and closing). To achieve that people usually use context managers (and in asyncio - asynchronous context managers).
aio_pika doesn't seem to be exception. As example shows you should use async with when dealing with connection:
connection = await aio_pika.connect_robust(
"amqp://guest:guest#127.0.0.1/", loop=loop
)
async with connection:
# ...

Python threading causing issues with google api

I'm running through a list of locations and trying to find places along my route. This is my first attempt at threading, so any tips would be appreciated! When i run this it'll work fine for the first few iterations, but then i start getting a KeyError and the API response says route is not found (even though it should be). If I search along a shorter route, everything runs fine. When I extend the route past a couple of hours of drive time I start getting these errors. Is it possible that I'm overloading it or does my code look off?
import pandas as pd
from threading import Thread
import threading
import requests
start_input = input("start: ")
end_input = input("end: ")
out_way = input("out of the way: ")
out_way_secs = int(out_way) * 60
thread_local = threading.local()
def get_session():
if not getattr(thread_local, "session", None):
thread_local.session = requests.Session()
return thread_local.session
def get_routes(url, start, end, waypoint, idx):
session = get_session()
with session.get(url, params={'origins': f'{start}|{waypoint}', 'destinations': f'{start}|{end}',
'key': '# key'}) as response:
route = response.json()
if route['rows'][1]['elements'][0]['status'] != 'OK':
results[idx] = {'# info'}
else:
nonstop_route = route['rows'][0]['elements'][1]['duration']['value']
leg1 = route['rows'][1]['elements'][0]['duration']['value']
leg2 = route['rows'][1]['elements'][1]['duration']['value']
time_added = (leg1 + leg2) - nonstop_route
time_added_mins = str(datetime.timedelta(seconds=(leg1 + leg2) - nonstop_route))
more_time = time_added_mins.split(':')
added_time_str = str(f'{more_time[0]}:{more_time[1]}:{more_time[2]} away!')
if time_added < allowable_time:
results[idx] = {# info to return}
return results[idx]
if __name__ == "__main__":
start_time = time.time()
output_df = pd.DataFrame(columns=['Location', 'Added Time', 'Notes'])
threads = [None] * coords[0]
results = [None] * coords[0]
for i in range(len(threads)):
threads[i] = Thread(target=get_routes, args=('https://maps.googleapis.com/maps/api/distancematrix/json',
start_input, end_input, stops[i], i))
threads[i].start()
for i in range(len(threads)):
threads[i].join()
for x in range(len(results)):
output_df = output_df.append(results[x], ignore_index=True)
output_df = output_df.sort_values(['Added Time'], ascending=True)
output_df.to_csv('output.csv', index=False)
there are 3 errors that it will get, this first one pops up by itself and the last 2 will come together. The code is the same when I run it, so not sure why i'm getting different errors.
This is the most common error that comes by itself (the routing duration works fine when run individually):
Exception in thread Thread-171:
Traceback (most recent call last):
File "C:\Python37-32\lib\threading.py", line 917, in _bootstrap_inner
self.run()
File "C:\Python37-32\lib\threading.py", line 865, in run
self._target(*self._args, **self._kwargs)
File "C:program.py", line 46, in get_routes
nonstop_route = route['rows'][0]['elements'][1]['duration']['value']
KeyError: 'duration'
The two below I get together and are less common:
Exception in thread Thread-436:
Traceback (most recent call last):
File "C:\Python37-32\lib\threading.py", line 917, in _bootstrap_inner
self.run()
File "C:\Python37-32\lib\threading.py", line 865, in run
self._target(*self._args, **self._kwargs)
File "C:/program.py", line 40, in get_routes
route = response.json()
File "C:\requests\models.py", line 897, in json
return complexjson.loads(self.text, **kwargs)
File "C:\Python37-32\lib\json\__init__.py", line 348, in loads
return _default_decoder.decode(s)
File "C:\Python37-32\lib\json\decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "C:\Python37-32\lib\json\decoder.py", line 355, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
second error:
Exception in thread Thread-196:
Traceback (most recent call last):
File "C:\site-packages\urllib3\response.py", line 360, in _error_catcher
yield
File "C:\urllib3\response.py", line 442, in read
data = self._fp.read(amt)
File "C:\Python37-32\lib\http\client.py", line 447, in read
n = self.readinto(b)
File "C:\Python37-32\lib\http\client.py", line 491, in readinto
n = self.fp.readinto(b)
File "C:\Python37-32\lib\socket.py", line 589, in readinto
return self._sock.recv_into(b)
File "C:\Python37-32\lib\ssl.py", line 1052, in recv_into
return self.read(nbytes, buffer)
File "C:\Python37-32\lib\ssl.py", line 911, in read
return self._sslobj.read(len, buffer)
ConnectionAbortedError: [WinError 10053] An established connection was aborted by the software in your host machine
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\site-packages\requests\models.py", line 750, in generate
for chunk in self.raw.stream(chunk_size, decode_content=True):
File "C:\site-packages\urllib3\response.py", line 494, in stream
data = self.read(amt=amt, decode_content=decode_content)
File "C:\site-packages\urllib3\response.py", line 459, in read
raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
File "C:\Python37-32\lib\contextlib.py", line 130, in __exit__
self.gen.throw(type, value, traceback)
File "C:\site-packages\urllib3\response.py", line 378, in _error_catcher
raise ProtocolError('Connection broken: %r' % e, e)
urllib3.exceptions.ProtocolError: ("Connection broken: ConnectionAbortedError(10053, 'An established connection was aborted by the software in your host machine', None, 10053, None)", ConnectionAbortedError(10053, 'An established connection was aborted by the software in your host machine', None, 10053, None))

Aiohttp error "Connect call failed" for multiple async requests to localhost

I am new to aiohttp and was just testing some of it's functionality. So I created a simple client and server script to see how many requests the async code could handle but I'm running into an error.
The server script is as follows:
from aiohttp import web
# Initialize counter
counter = 1
# View index function
async def index(request):
# Increments counter
global counter
counter += 1
# Return data
data = {'test': counter}
return web.json_response(data)
# Creating application
app = web.Application()
# Registering route
app.router.add_get('/', index)
# Starting server
web.run_app(app, host='127.0.0.1', port=8080)
The client script is as follows:
import asyncio, time, aiohttp
from aiohttp import ClientSession
# Initialize counter
COUNT = 0
requests = 2
async def fetch(url, session):
async with session.get(url) as response:
return await response.read()
async def run(r):
url = "http://localhost:8080/"
tasks = []
global COUNT
# Fetch all responses within one Client session,
# keep connection alive for all requests.
async with ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session:
for i in range(r):
task = asyncio.ensure_future(fetch(url, session))
tasks.append(task)
COUNT += 1
responses = await asyncio.gather(*tasks)
# you now have all response bodies in this variable
print(responses)
# Start timing the process
start = time.time()
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(run(requests))
loop.run_until_complete(future)
# Process complete output
print("The final counter value is: " + str(COUNT))
print("The total time elapsed is: " + str(time.time() - start))
The output error message is as follows when I try to run the client script:
Traceback (most recent call last):
File "C:\ProgramData\Anaconda3\lib\site-packages\aiohttp\connector.py", line 796, in _wrap_create_connection
return (yield from self._loop.create_connection(*args, **kwargs))
File "C:\ProgramData\Anaconda3\lib\asyncio\base_events.py", line 776, in create_connection
raise exceptions[0]
File "C:\ProgramData\Anaconda3\lib\asyncio\base_events.py", line 763, in create_connection
yield from self.sock_connect(sock, address)
File "C:\ProgramData\Anaconda3\lib\asyncio\selector_events.py", line 451, in sock_connect
return (yield from fut)
File "C:\ProgramData\Anaconda3\lib\asyncio\selector_events.py", line 481, in _sock_connect_cb
raise OSError(err, 'Connect call failed %s' % (address,))
ConnectionRefusedError: [Errno 10061] Connect call failed ('::1', 8080)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "async.py", line 34, in <module>
loop.run_until_complete(future)
File "C:\ProgramData\Anaconda3\lib\asyncio\base_events.py", line 466, in run_until_complete
return future.result()
File "async.py", line 25, in run
responses = await asyncio.gather(*tasks)
File "async.py", line 9, in fetch
async with session.get(url) as response:
File "C:\ProgramData\Anaconda3\lib\site-packages\aiohttp\client.py", line 690, in __aenter__
self._resp = yield from self._coro
File "C:\ProgramData\Anaconda3\lib\site-packages\aiohttp\client.py", line 267, in _request
conn = yield from self._connector.connect(req)
File "C:\ProgramData\Anaconda3\lib\site-packages\aiohttp\connector.py", line 402, in connect
proto = yield from self._create_connection(req)
File "C:\ProgramData\Anaconda3\lib\site-packages\aiohttp\connector.py", line 748, in _create_connection
_, proto = yield from self._create_direct_connection(req)
File "C:\ProgramData\Anaconda3\lib\site-packages\aiohttp\connector.py", line 859, in _create_direct_connection
raise last_exc
File "C:\ProgramData\Anaconda3\lib\site-packages\aiohttp\connector.py", line 831, in _create_direct_connection
req=req, client_error=client_error)
File "C:\ProgramData\Anaconda3\lib\site-packages\aiohttp\connector.py", line 803, in _wrap_create_connection
raise client_error(req.connection_key, exc) from exc
aiohttp.client_exceptions.ClientConnectorError: Cannot connect to host localhost:8080 ssl:False [Connect call failed ('::1', 8080)]
I am running:
Python 3.6.1
Anaconda 4.4.0 (64-bit)
Windows 10
aiohttp 2.3.6
The client script works if I am only making 1 request which isn't very useful or if I make requests to example.com but not to localhost. What am I missing here?
Replace localhost by 127.0.0.1 in client code.
Looks like localhost is resolved to ::1 but server is hosted on 127.0.0.1 only.

sending chunks of data using webob

I tried to write a simple server-client program using webob.
1. The client send data using 'Transfer-Encoding', 'chunked'
2. the received data is then print in the server side.
The Server.py received the data correctly.
However, I got error a bunch of message from webob.
However, anyone kindly tell me what had happened or just
give a simple guide line for write such a simple program(sending chunk) with webob.
thanks!
the codes and error are as below:
server.py
from webob import Request
from webob import Response
class ChunkApp(object):
def __init__(self):
pass
def __call__(self, environ, start_response):
req = Request(environ)
for buf in self.chunkreadable(req.body_file, 65535):
print buf
resp = Response('chunk received')
return resp(environ, start_response)
def chunkreadable(self, iter, chunk_size=65536):
return self.chunkiter(iter, chunk_size) if \
hasattr(iter, 'read') else iter
def chunkiter(self, fp, chunk_size=65536):
while True:
chunk = fp.read(chunk_size)
if chunk:
yield chunk
else:
break
if __name__ == '__main__':
app = ChunkApp()
from wsgiref.simple_server import make_server
httpd = make_server('localhost', 8080, app)
print 'Serving on http://localhost:%s' % '8080'
try:
httpd.serve_forever()
except KeyboardInterrupt:
print '^C'
client.py
from httplib import HTTPConnection
conn = HTTPConnection("localhost:8080")
conn.putrequest('PUT', "/")
body = "1234567890"
conn.putheader('Transfer-Encoding', 'chunked')
conn.endheaders()
for chunk in body:
#print '%x\r\n%s\r\n' % (len(chunk), chunk)
conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
conn.send('0\r\n\r\n')
Error
Traceback (most recent call last):
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/wsgiref/handlers.py", line 86, in run
self.finish_response()
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/wsgiref/handlers.py", line 127, in finish_response
self.write(data)
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/wsgiref/handlers.py", line 210, in write
self.send_headers()
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/wsgiref/handlers.py", line 268, in send_headers
self.send_preamble()
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/wsgiref/handlers.py", line 192, in send_preamble
'Date: %s\r\n' % format_date_time(time.time())
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/socket.py", line 324, in write
self.flush()
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/socket.py", line 303, in flush
self._sock.sendall(view[write_offset:write_offset+buffer_size])
error: [Errno 32] Broken pipe
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 52575)
Traceback (most recent call last):
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/SocketServer.py", line 284, in _handle_request_noblock
self.process_request(request, client_address)
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/SocketServer.py", line 310, in process_request
self.finish_request(request, client_address)
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/SocketServer.py", line 323, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/SocketServer.py", line 641, in __init__
self.finish()
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/SocketServer.py", line 694, in finish
self.wfile.flush()
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/socket.py", line 303, in flush
self._sock.sendall(view[write_offset:write_offset+buffer_size])
error: [Errno 32] Broken pipe

Resources