Client request get stuck when server is down - python-3.x

I have a client that does a simple request to a server. It usually works fine, but if the server is down or it simply doesn't exist, the request get stuck and does nothing else.
This is my code:
import requests
from requests.exceptions import Timeout
class Client:
def __init__(self):
# do things for initilization
def do_request(self):
request_url = 'http://fhksdjhfksdhfk.com'
try:
response = requests.post(request_url, timeout=5)
print('Response received from registration:', response) <-- Never reach this statement
if response.status_code != 200:
print('Request error at', request_url, 'error:', response.reason)
else:
print('Request finished successfully')
except Timeout:
print('Request timeout')
Any clue?

Related

Access vault credentials using API

I am trying to access Vault credentials using Python API. I am doing something wrong as I always get access denied. I have used the following code:
def fetch():
url1="http://127.0.0.1:8200/v1/auth/gcp/config"
payload1={}
headers2={
'X-VAULT-TOKEN': 's.DsqQCKCY1JMhSe1k8A5rIyku'
}
try:
response1=requests.request("GET", url1, headers=headers2, data=payload1)
return response1.text
except Exception as err1:
return str(err1)
url="http://127.0.0.1:8200/v1/myengine/data/myspringapplication/staging"
payload={}
headers={
'X-Vault-Token': 'myroot'
}
try:
response=requests.request("GET", url, headers=headers, data=payload)
return response.text
except Exception as err:
return str(err)
when you are sending a request that contains a payload you should use the POST method. I would write it like this. If that doesn't work since the payload is empty maybe it is the GET request method so try removing data=payload.
import requests
payload = {}
# https://www.vaultproject.io/api/auth/gcp
url = "http://127.0.0.1:8200/v1/sys/auth"
ses = requests.session()
ses.headers.update({'X-VAULT-TOKEN': 's.Ezr0s63KQhW72knZHCIkjMHh'})
try:
r = ses.get(url, timeout=60)
if r.status_code == 200:
print(r.json())
else:
print("bad status code", r.status_code)
except requests.exceptions.Timeout:
pass
Edit: changed the code with default URL for testing, and it doesn't need a payload. So GET request is O.K. and this returns data for me.

Django: Unable to run async requests.get on google. Exception Response can't be used in 'await' expression

I am unable to understand how async works. I send simple get requests to google with a proxy to check the validity of proxy in a async method. I get the error:
'''object Response can't be used in 'await' expression'''
Method to get proxies. Code for getting the list of proxies is copied from a tutorial:
def get_proxies(self, number_of_proxies=10):
"""Returns max 10 free https proxies by scraping
free-proxy website.
#arg number_of_proxies to be returned"""
try:
if number_of_proxies > 10: number_of_proxies = 10
url = 'https://abc-list.net/'
response = requests.get(url)
response_text = response.text
parser = fromstring(response_text)
proxies = set()
for i in parser.xpath('//tbody/tr'):
if len(proxies) >= number_of_proxies:
break
if i.xpath('.//td[7][contains(text(),"yes")]'):
#Grabbing IP and corresponding PORT
proxy = ":".join([i.xpath('.//td[1]/text()')[0], i.xpath('.//td[2]/text()')[0]])
proxies.add(proxy)
return proxies
except Exception as e:
print('Exception while abc list from url: ', e)
return None
Method to check the validity of proxy:
async def is_valid_proxy(self, proxy):
"""Check the validity of a proxy by sending
get request to google using the given proxy."""
try:
response = await requests.get("http://8.8.4.4", proxies={"http": proxy, "https": proxy}, timeout=10)
if await response.status_code == requests.codes.ok:
print('got a valid proxy')
return True
except Exception as e:
print('Invalid proxy. Exception: ', e)
return False
Method to get the valid proxies:
async def get_valid_proxies(self, number_of_proxies=10):
proxies = self.get_proxies(number_of_proxies)
print(len(proxies), proxies)
valid_proxies = []
valid_proxies = await asyncio.gather(*[proxy for proxy in proxies if await self.is_valid_proxy(proxy)])
return valid_proxies
And a call to the above method:
proxies = asyncio.run(get_valid_proxies())
Now the best solution for me will be to check the validity of a proxy in def get_proxies(self, number_of_proxies=10): before adding it to the proxies list. But have no clue how to achieve that in async way. Therefore, I tried to do a workaround but that is also not working. The method works without async but I call this method many times and it is very slow, therefore would like to make it async.
Thank you
Now after changing the above code by using aiohttp it still throws an exception and doesn't look like async as the requests seem to be sent after one finishes as its very slow same as before.
New is_valid_proxy:
async with aiohttp.ClientSession() as session:
session.proxies={"http": proxy, "https": proxy}
async with session.get('http://8.8.4.4',
timeout=10) as response:
status_code = await response.status_code
# response = await requests.get("https://www.google.com/", proxies={"http": proxy, "https": proxy}, timeout=10)
# if await response.status_code == requests.codes.ok:
if status_code == requests.codes.ok:
print('got a valid proxy')
return True
except Exception as e:
print('Invalid proxy. Exception: ', e)
return False
Won't even display the error or exception. Here is the message:
Invalid proxy. Exception:

Handling POST requests with http.server module

I'm using Python 3.7.4 and http.server module to receive POST requests that contain a file from an ERP.
Everything works fine (file get received and written correctly) except ERP get connection timeout error.
It's first time I use http.server which looks pretty simple but for sure I'm missing something.
See code below, isnt't "self.send_response(200)" enough?
On ERP vendor website they provide an example in PHP to receive data:
if (move_uploaded_file ($_FILES['file']['tmp_name'], "items.xml")){
echo "OK";
} else {
echo "Error";
}
So ERP expect "OK" after successful connection/transfer
Here it is my Python code:
from http.server import BaseHTTPRequestHandler, HTTPServer
import time
import cgi
class Test_Server(BaseHTTPRequestHandler):
def do_POST(self):
print("POST request received")
self.send_response(200)
form = cgi.FieldStorage(fp=self.rfile, headers=self.headers, environ={'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': self.headers['Content-Type'],})
f = open("data/test-orig.xml","wb+")
f.write(form['file'].value)
f.close()
httpd = HTTPServer((hostName, hostPort), Test_Server)
print(time.asctime(), "Server Starts - %s:%s" % (hostName, hostPort))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print(time.asctime(), "Server Stops - %s:%s" % (hostName, hostPort))
Best regards,
cid
Manage to do it easily with Flask:
from flask import Flask
from flask import request
app = Flask(__name__)
#app.route('/post-data', methods=['POST'])
def test_server():
data = request.files['file']
data.save('data/test.xml')
return "OK"
if __name__ == '__main__':
app.run(host='0.0.0.0')
Solved!

Message: Unsupported method ('POST'). Error 501 Python

I am trying to learn some Http Server in an udacity online academy. The thing is that the folllowing code is triggering the error Message: Unsupported method ('POST'). Error 501 Python:
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import parse_qs
class MessageHandler(BaseHTTPRequestHandler):
def do_POST(self):
# 1. How long was the message?
length = int(self.headers.get('Content-length', 0))
# 2. Read the correct amount of data from the request.
data = self.rfile.read(length).decode()
# 3. Extract the "message" field from the request data.
message = parse_qs(data)["message"][0]
# Send the "message" field back as the response.
self.send_response(200)
self.send_header('Content-type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write(message.encode())
if __name__ == '__main__':
server_address = ('', 8000)
httpd = HTTPServer(server_address, MessageHandler)
httpd.serve_forever()
Which Python? Your code is correct. Tested it right now, it sends the response.
The only modification I've made is
#message = parse_qs(data)["message"][0]
message = 'hello'
Client code:
import requests
res = requests.post('http://localhost:8000/abc', data = {'key':'value'})
print(res)
Client gets 200 response

How to capture API failure while using oauthlib.oauth2 fetch_token

The Python3 fetch_token method in this library does not check the response status before consuming the response. If the API call it makes fails, then the response will be invalid and the script crashes. Is there something I can set so that an exception will be raised on a non-success response before the library can read the response?
import requests
from requests.auth import HTTPBasicAuth
from requests_oauthlib import OAuth2Session
from oauthlib.oauth2 import BackendApplicationClient
from oauthlib.oauth2 import OAuth2Error
AUTH_TOKEN_URL = "https://httpstat.us/500" # For testing
AUTH = HTTPBasicAuth("anID", "aSecret")
CLIENT = BackendApplicationClient(client_id="anID")
SCOPES = "retailer.orders.write"
MAX_API_RETRIES = 4
class MyApp:
def __init__(self):
"""Initialize ... and obtain initial auth token for request"""
self.client = OAuth2Session(client=CLIENT)
self.client.headers.update(
{
"Content-Type": "application/json"
}
)
self.__authenticate()
def __authenticate(self):
"""Obtain auth token."""
server_errors = 0
# This needs more work. fetch_token is not raising errors but failing
# instead.
while True:
try:
self.token = self.client.fetch_token(
token_url=AUTH_TOKEN_URL, auth=AUTH, scope=SCOPES
)
break
except (OAuth2Error, requests.exceptions.RequestException) as e:
server_errors = MyApp.__process_retry(
server_errors, e, None, MAX_API_RETRIES
)
#staticmethod
def __process_retry(errors, exception, resp, max_retries):
# Log and process retries
# ...
return errors + 1
MyApp() # Try it out
You can add a "compliance hook" that will be passed the Response object from requests before the library attempts to parse it, like so:
def raise_on_error(response):
response.raise_for_status()
return response
self.client.register_compliance_hook('access_token_response', raise_on_error)
Depending on exactly when you may get errors, you might want to do this with 'refresh_token_response' and/or 'protected_request' as well. See the docstring for the register_compliance_hook method for more info.

Resources