Message: Unsupported method ('POST'). Error 501 Python - python-3.x

I am trying to learn some Http Server in an udacity online academy. The thing is that the folllowing code is triggering the error Message: Unsupported method ('POST'). Error 501 Python:
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import parse_qs
class MessageHandler(BaseHTTPRequestHandler):
def do_POST(self):
# 1. How long was the message?
length = int(self.headers.get('Content-length', 0))
# 2. Read the correct amount of data from the request.
data = self.rfile.read(length).decode()
# 3. Extract the "message" field from the request data.
message = parse_qs(data)["message"][0]
# Send the "message" field back as the response.
self.send_response(200)
self.send_header('Content-type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write(message.encode())
if __name__ == '__main__':
server_address = ('', 8000)
httpd = HTTPServer(server_address, MessageHandler)
httpd.serve_forever()

Which Python? Your code is correct. Tested it right now, it sends the response.
The only modification I've made is
#message = parse_qs(data)["message"][0]
message = 'hello'
Client code:
import requests
res = requests.post('http://localhost:8000/abc', data = {'key':'value'})
print(res)
Client gets 200 response

Related

Slack API request, limiting to 1 request per DAG failure (Airflow)

Hello jr data engineer here!
For some strange reason my task_fail_slack_alert module is triggering the Slack API request a ridiculous amount of times, which is then showing up in our Slack channel that many times and is really annoying. My module should only run and show up in in Slack channel the same amount as the number of tasks that failed.
What am I missing?
import os
from airflow.models
import Variable
import json import requests
def get_channel_name():
channel = '#airflow_alerts_local'
env = Variable.get('env', None)
if env == 'prod':
channel = '#airflow_alerts'
elif env == 'dev':
channel = '#airflow_alerts_dev'
return channel
def task_fail_slack_alert(context):
webhook_url = os.environ.get('SLACK_URL')
slack_data = {
'channel': get_channel_name(),
'text':
""" :red_circle: Task Failed.
*Task*: {task}
*Dag*: {dag}
*Execution Time*: {exec_date}
*Log Url*: {log_url}
""".format(
task=context.get('task_instance').task_id,
dag=context.get('task_instance').dag_id,
ti=context.get('task_instance'),
exec_date=context.get('execution_date'),
log_url=context.get('task_instance').log_url,
)}
response = requests.post(webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'})
if response.status_code != 200:
raise ValueError( 'Request to slack returned an error %s,
the response is:\n%s'(response.status_code, response.text))
task_fail_slack_alert(context)
This is how I have it showing up in the arguments for each dag:
default_args = {
'on_failure_callback': task_fail_slack_alert,
}
The code you provided is recursive:
def task_fail_slack_alert(context):
......
task_fail_slack_alert(context)
Remove the recursion as it's not needed.

Client request get stuck when server is down

I have a client that does a simple request to a server. It usually works fine, but if the server is down or it simply doesn't exist, the request get stuck and does nothing else.
This is my code:
import requests
from requests.exceptions import Timeout
class Client:
def __init__(self):
# do things for initilization
def do_request(self):
request_url = 'http://fhksdjhfksdhfk.com'
try:
response = requests.post(request_url, timeout=5)
print('Response received from registration:', response) <-- Never reach this statement
if response.status_code != 200:
print('Request error at', request_url, 'error:', response.reason)
else:
print('Request finished successfully')
except Timeout:
print('Request timeout')
Any clue?

How to receive an image from a POST request on Google Cloud Function with Python?

I'm struggling to reassemble an image sent via a POST request to a GCP Cloud Function.
I've looked at advice here about how to package a file with POST request.
I would like the function to reconstruct the image from bytes for further processing, everytime I send the request I get 'Failure' back. Any help would be really appreciated!
### client_side.py
import requests
url = 'https://region-project.cloudfunctions.net/function' # Generic GCP functions URL
files = {'file': ('my_image.jpg', open('my_image.jpg', 'rb').read(), 'application/octet-stream')}
r = requests.post(url, files=files)
### gcp_function.py
from io import BytesIO
def handler(request):
try:
incoming = request.files.get('file')
bytes = BytesIO(incoming)
image = open_image(bytes)
message = 'Success'
except:
message = 'Failure'
return message
Sorted it.
Needed the read method to convert FileStorage Object to bytes.
### gcp_function.py
from io import BytesIO
import logging
def handler(request):
try:
incoming = request.files['file'].read()
bytes = BytesIO(incoming)
image = open_image(bytes)
message = 'Success'
except Exception as e:
message = 'Failure'
logging.critical(str(e))
return message

Handling POST requests with http.server module

I'm using Python 3.7.4 and http.server module to receive POST requests that contain a file from an ERP.
Everything works fine (file get received and written correctly) except ERP get connection timeout error.
It's first time I use http.server which looks pretty simple but for sure I'm missing something.
See code below, isnt't "self.send_response(200)" enough?
On ERP vendor website they provide an example in PHP to receive data:
if (move_uploaded_file ($_FILES['file']['tmp_name'], "items.xml")){
echo "OK";
} else {
echo "Error";
}
So ERP expect "OK" after successful connection/transfer
Here it is my Python code:
from http.server import BaseHTTPRequestHandler, HTTPServer
import time
import cgi
class Test_Server(BaseHTTPRequestHandler):
def do_POST(self):
print("POST request received")
self.send_response(200)
form = cgi.FieldStorage(fp=self.rfile, headers=self.headers, environ={'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': self.headers['Content-Type'],})
f = open("data/test-orig.xml","wb+")
f.write(form['file'].value)
f.close()
httpd = HTTPServer((hostName, hostPort), Test_Server)
print(time.asctime(), "Server Starts - %s:%s" % (hostName, hostPort))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print(time.asctime(), "Server Stops - %s:%s" % (hostName, hostPort))
Best regards,
cid
Manage to do it easily with Flask:
from flask import Flask
from flask import request
app = Flask(__name__)
#app.route('/post-data', methods=['POST'])
def test_server():
data = request.files['file']
data.save('data/test.xml')
return "OK"
if __name__ == '__main__':
app.run(host='0.0.0.0')
Solved!

How to capture API failure while using oauthlib.oauth2 fetch_token

The Python3 fetch_token method in this library does not check the response status before consuming the response. If the API call it makes fails, then the response will be invalid and the script crashes. Is there something I can set so that an exception will be raised on a non-success response before the library can read the response?
import requests
from requests.auth import HTTPBasicAuth
from requests_oauthlib import OAuth2Session
from oauthlib.oauth2 import BackendApplicationClient
from oauthlib.oauth2 import OAuth2Error
AUTH_TOKEN_URL = "https://httpstat.us/500" # For testing
AUTH = HTTPBasicAuth("anID", "aSecret")
CLIENT = BackendApplicationClient(client_id="anID")
SCOPES = "retailer.orders.write"
MAX_API_RETRIES = 4
class MyApp:
def __init__(self):
"""Initialize ... and obtain initial auth token for request"""
self.client = OAuth2Session(client=CLIENT)
self.client.headers.update(
{
"Content-Type": "application/json"
}
)
self.__authenticate()
def __authenticate(self):
"""Obtain auth token."""
server_errors = 0
# This needs more work. fetch_token is not raising errors but failing
# instead.
while True:
try:
self.token = self.client.fetch_token(
token_url=AUTH_TOKEN_URL, auth=AUTH, scope=SCOPES
)
break
except (OAuth2Error, requests.exceptions.RequestException) as e:
server_errors = MyApp.__process_retry(
server_errors, e, None, MAX_API_RETRIES
)
#staticmethod
def __process_retry(errors, exception, resp, max_retries):
# Log and process retries
# ...
return errors + 1
MyApp() # Try it out
You can add a "compliance hook" that will be passed the Response object from requests before the library attempts to parse it, like so:
def raise_on_error(response):
response.raise_for_status()
return response
self.client.register_compliance_hook('access_token_response', raise_on_error)
Depending on exactly when you may get errors, you might want to do this with 'refresh_token_response' and/or 'protected_request' as well. See the docstring for the register_compliance_hook method for more info.

Resources