I understand how to do this using the requests library
import requests
def start_requests(self):
token = requests.get('https://support.hpe.com/hpesc/public/km/api/coveo/search/token').text
headers = {
...
'Authorization': f'Bearer {json.loads(token)["persistentSearchToken"]}',
...
}
Tell me how to do this using scrapy?
At first I thought of doing this:
def start_requests(self):
token = scrapy.Request(
url='https://support.hpe.com/hpesc/public/km/api/coveo/search/token',
callback=self.get_token
)
headers = {
...
'Authorization': f'Bearer {json.loads(token)["persistentSearchToken"]}',
...
}
def get_token(self, response):
return response.text
But as it turned out, the "token" variable is not an object of the "Response"class. It is an object of the "Request" class.
Try this
def start_requests(self):
yield scrapy.Request(
url='https://support.hpe.com/hpesc/public/km/api/coveo/search/token',
callback=self.get_token,
headers = {
...
'Authorization': f'Bearer {json.loads(token)["persistentSearchToken"]}',
...
})
def get_token(self, response):
return response.text
Related
I have
file1.py
def _request_token(self):
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
data = {
'id': self.id,
}
response = requests.request(
method="POST",
url=self.url,
headers=headers,
data=data
)
self.token_json = response.json()
test_token.py
def test_request_token(test_token):
with patch('mod1.file1.requests.request') as mock_request:
mock_request.return_value = json.loads('{"response": "returned_data"}')
res = test_token._request_token()
assert res.token_json == {"response": "returned_data"}
conftest.py
#pytest.fixture(scope="session")
def test_token(test_container):
return test_container.token_mgr(id="abc")
#pytest.fixture(scope="session")
def test_container():
test_container = initialize(test_yaml_dir)
return test_container
I'm using dependency-injectors, and the traceback I currently see:
AttributeError: 'dict' object has no attribute 'json'
Do I need to fully mock a response, using a different method?
I am checking a bunch of website response statuses and exporting them to a CSV file. There are a couple of websites having DNSLookupError or NO WEBSITE FOUND and not storing anything in the CSV file. How can I also store the DNSLookupError message to the CSV along with the URL?
def parse(self, response):
yield {
'URL': response.url,
'Status': response.status
}
You can use the errback function to catch DNS errors or any other types of errors. See below sample usage.
import scrapy
from twisted.internet.error import DNSLookupError
class TestSpider(scrapy.Spider):
name = 'test'
allowed_domains = ['example.com']
def start_requests(self):
yield scrapy.Request(url="http://example.com/error", errback=self.parse_error)
def parse_error(self, failure):
if failure.check(DNSLookupError):
# this is the original request
request = failure.request
yield {
'URL': request.url,
'Status': failure.value
}
def parse(self, response):
yield {
'URL': response.url,
'Status': response.status
}
Trying to work through this signed request example script with no success.
I notice both "http_method" and "params" in "def send_signed_request" are both greyed out to indicate unused code.
I have tried to add the params to the request as below, but that's not going to solve the unused code issue, both are still showing as unused code.
response = send_signed_request('POST', '/sapi/v1/margin/loan', {"asset": "", "isIsolated": "TRUE", "symbol": "", "amount": ""} )
print(response)
I'm just learning Python and maybe missing some assumed knowledge I guess and have been reading a lot to no avail before posting.
I read somewhere that Binance are seeing lots of traders spending hours trying to solve signature authentication as I am, and maybe this will help others in the same situation.
thx in advance to anyone that takes a look.
Just to clarify the script didn't come from Binance and I would have to dig for the link if anyone wants it.
import hmac
import time
import hashlib
import requests
from urllib.parse import urlencode
KEY = ''
SECRET = ''
BASE_URL = 'https://sapi.binance.com' # production base url
#BASE_URL = 'https://testnet.binancefuture.com' # testnet base url
''' ====== begin of functions, you don't need to touch ====== '''
def hashing(query_string):
return hmac.new(SECRET.encode('utf-8'), query_string.encode('utf-8'), hashlib.sha256).hexdigest()
def get_timestamp():
return int(time.time() * 1000)
def dispatch_request(http_method):
session = requests.Session()
session.headers.update({
'Content-Type': 'application/json;charset=utf-8',
'X-MBX-APIKEY': KEY
})
return {
'GET': session.get,
'DELETE': session.delete,
'PUT': session.put,
'POST': session.post,
}.get(http_method, 'GET')
# used for sending request requires the signature
def send_signed_request(http_method, url_path, payload={}):
query_string = urlencode(payload)
url = BASE_URL + url_path + '?' + query_string + '&signature=' + hashing(query_string)
params = {'url': url, 'params': {}}
# used for sending public data request
# def send_public_request(url_path, payload={}):
# query_string = urlencode(payload, True)
# url = BASE_URL + url_path
# if query_string:
# url = url + '?' + query_string
# print("{}".format(url))
# response = dispatch_request('GET')(url=url)
# return response.json()
response = send_signed_request('POST', '/sapi/v1/margin/loan', params )
print(response)
Below is a working script if anyone has the same issue
import hmac
import time
import hashlib
import requests
from urllib.parse import urlencode
KEY = ''
SECRET= ''
BASE_URL = 'https://api.binance.com' # production base url
#BASE_URL = 'https://testnet.binancefuture.com' # testnet base url
''' ====== begin of functions, you don't need to touch ====== '''
def hashing(query_string):
return hmac.new(SECRET.encode('utf-8'), query_string.encode('utf-8'), hashlib.sha256).hexdigest()
def get_timestamp():
return int(time.time() * 1000)
def dispatch_request(http_method):
session = requests.Session()
session.headers.update({
'Content-Type': 'application/json;charset=utf-8',
'X-MBX-APIKEY': KEY
})
return {
'GET': session.get,
'DELETE': session.delete,
'PUT': session.put,
'POST': session.post,
}.get(http_method, 'GET')
# used for sending request requires the signature
def send_signed_request(http_method, url_path, payload={}):
query_string = urlencode(payload)
# replace single quote to double quote
query_string = query_string.replace('%27', '%22')
if query_string:
query_string = "{}×tamp={}".format(query_string, get_timestamp())
else:
query_string = 'timestamp={}'.format(get_timestamp())
url = BASE_URL + url_path + '?' + query_string + '&signature=' + hashing(query_string)
print("{} {}".format(http_method, url))
params = {'url': url, 'params': {}}
response = dispatch_request(http_method)(**params)
print(response)
return response.json()
# used for sending public data request
def send_public_request(url_path, payload={}):
query_string = urlencode(payload, True)
url = BASE_URL + url_path
if query_string:
url = url + '?' + query_string
print("{}".format(url))
response = dispatch_request('GET')(url=url)
return response.json()
#response = send_signed_request('GET', '/api/v3/time')
#print(response)
response = send_signed_request('POST', '/sapi/v1/margin/loan', {"asset": "SHIB", "isIsolated": "TRUE", "symbol": "SHIBUSDT", "amount": "1000.00"} )
print(response)
I have to do load test using locust.io for multiple APIs, so I want to execute 10 requests for each and every API (suppose I have 10 APIs , so each and every API should get 10 requests) and should terminate the process automatically if all requests were done.
Below is the code snippet i have tried using for loop but not yet worked.
class UserBehavior(TaskSet):
def __init__(self, parent):
super().__init__(parent)
self.camp_code = random.choice(t.camp_code)
self.pgm_id = random.choice(t.pgm_id)
#task
def alloction_details(self):
pgm_id = self.pgm_id
camp_code = self.camp_code
print("L21",camp_code)
myheaders = {'Content-Type': 'application/json', 'Accept': 'application/json'}
load = {
"Code": str(camp_code),
"Id": str(pgm_id)}
# print("L43", users)
for i in range(10):
if i==9:
self.interrupt()
else:
with self.client.post('/ContentDetails', data= json.dumps(load), headers=myheaders,catch_response=True) as response:
print(response.status_code)
if response.status_code==200:
response.success()
# res = response.json()
print("Response of allocation details ", response.text, '\n\n')
else:
response.failure('API failure')
#task
def get_content_details(self):
myheaders = {'Content-Type': 'application/json', 'Accept': 'application/json'}
pgm_id = self.pgm_id
camp_code = self.camp_code
for i in range(10):
if i == 9:
# sleep(1000)
self.interrupt()
else:
with self.client.post('/ScheduleDetails?Code='+str(camp_code), catch_response=True) as response:
print(response.status_code)
if response.status_code==200:
response.success()
# res = response.json()
print("Response : ", response.text, '\n\n')
else:
response.failure('API failure')
class WebsiteUser(HttpUser):
tasks = [UserBehavior]
host = 'localhost'
wait_time = between(1, 3)
Im trying to get the emotions from a video
Below is my code,
Always when I run this code i get this error
b'{"error":{"code":"BadArgument","message":"Failed to deserialize JSON request."}}' any idea why?
import http.client, urllib.request, urllib.parse, urllib.error, base64, sys
headers = {
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': 'xxxxxxxxxxx',
}
params = urllib.parse.urlencode({
})
body = "{ 'url': 'http://www.dropbox.com/s/zfmaswf8s9c58om/blog2.mp4' }"
try:
conn = http.client.HTTPSConnection('westus.api.cognitive.microsoft.com')
conn.request("POST", "/emotion/v1.0/recognizeinvideo?%s" % params, "
{body}", headers)
response = conn.getresponse()
data = response.read()
print(data)
conn.close()
except Exception as e:
print(e.args)
You forgot to substitute the placeholder {body} with the real thing.
conn.request("POST", "/emotion/v1.0/recognizeinvideo?%s" % params, body, headers)