str(int(time.time())) gives error string indices must be integers in python 3 - python-3.x

I am new to python i create one python pip package for my site API in this i need to pass the nonce. I write the code like below
payload = {}
headers = {}
if self.api_key is not None:
payload["api_key"] = self.api_key
if self.secret_key is not None:
payload["secret_key"] = self.secret_key
payload["request"] = method
payload["nonce"] = str(int(time.time()))
payload.update(kwargs)
headers["X-WCX-APIKEY"] = self.api_key
headers["X-WCX-PAYLOAD"] = base64.b64encode(json.dumps(payload).encode('utf-8'))
headers["X-WCX-SIGNATURE"] = 'SIGNATURE'
url = self.base_url.replace('API_CALL',method)
# update the parameters with the API key
session = requests.session()
response = session.post(url, data = payload, headers = headers)
when run this code i got this error "string indices must be integers"
How can i fix it please help anyone
Note i import the "time"

Related

same API call on different URL's python/django

I'm trying to get all orders of multiple webshops with the same api call but im not sure whats the fastest/best way to accomplish this. Every shop has its own KEY, SECRET & HOST and i'm trying to figure out how to loop over the different shops and get the results in the "order" -json dictionary in the get_all_orders().
As you can see below i tried to loop through the API model which contains all API_KEY, API_Secret and API_Hosts. The API_prefix has to stay the same.
Any tips or suggestions ? Thanks in advance!
Here is a small example:
# API_Key = "<KEY>"
# API_Secret = "<SECRET>"
# API_Host = "<HOST>"
API_Prefix = "/api/rest/v1/"
def headers(method, uri, data):
for x in Api.objects.all():
upper_method = str.upper(method)
hash_string = x.API_Key + "|" + upper_method + "|" + uri + "|" + data
hash = hash_string
headers = {
"x-hash": hash,
"x-public": x.API_Key,
}
return headers
def get(path, data):
for x in Api.objects.all():
uri = API_Prefix + path
url = x.API_Host + uri
headers = headers("GET", uri, data)
results = requests.get(url, data=data, headers=headers)
return results
def get_all_orders(request):
res = get("/orders", "")
if res.status_code == 200:
orders = json.loads(res.text)
return render(request, "orders.html", {'orders': orders})

I am unable to obtain access token using OAUTH 2.0 SAML Assertion Bearer flow in python

I am trying to create a SAML Assertion and then post it to the salesforce
oauth endpoint to obtain the access token in response in my office sandbox environment.
The below code is based on the code mentioned in this repository: https://github.com/salesforceidentity/apex-saml-bearer- flow/blob/master/SAMLBearerAssertion.apex.txt I have tried to convert
the code into python.
import datetime
import random
import hashlib
import binascii
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA256
from base64 import b64encode, b64decode,urlsafe_b64decode,urlsafe_b64encode
import requests
from xml.etree import ElementTree
class SFSAMLAssertion:
def __init__(self, subject, issuer, audience, action):
self.subject = subject
self.issuer = issuer
self.action = action
self.audience = audience
self.not_before = "2019-08-16T06:35:13.654Z"
self.not_on_or_after = "2024-08-14T06:35:13.654Z"
self.assertion_id = self.create_assertion_id()
self.encoded_key = ""
self.preCannonicalizedResponse = """<saml:Assertion xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" ID="ASSERTION_ID" IssueInstant="NOT_BEFORE" Version="2.0"><saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">ISSUER</saml:Issuer><saml:Subject><saml:NameID Format="urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified">SUBJECT</saml:NameID><saml:SubjectConfirmation Method="urn:oasis:names:tc:SAML:2.0:cm:bearer"><saml:SubjectConfirmationData NotOnOrAfter="NOT_ON_OR_AFTER" Recipient="RECIPIENT"></saml:SubjectConfirmationData></saml:SubjectConfirmation></saml:Subject><saml:Conditions NotBefore="NOT_BEFORE" NotOnOrAfter="NOT_ON_OR_AFTER"><saml:AudienceRestriction><saml:Audience>AUDIENCE</saml:Audience></saml:AudienceRestriction></saml:Conditions><saml:AuthnStatement AuthnInstant="NOT_BEFORE"><saml:AuthnContext><saml:AuthnContextClassRef>urn:oasis:names:tc:SAML:2.0:ac:classes:unspecified</saml:AuthnContextClassRef></saml:AuthnContext></saml:AuthnStatement></saml:Assertion>"""
self.preCannonicalizedSignedInfo = """<ds:SignedInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#"><ds:CanonicalizationMethod Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"></ds:CanonicalizationMethod><ds:SignatureMethod Algorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1"></ds:SignatureMethod><ds:Reference URI="#ASSERTION_ID"><ds:Transforms><ds:Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature"></ds:Transform><ds:Transform Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"></ds:Transform></ds:Transforms><ds:DigestMethod Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"></ds:DigestMethod><ds:DigestValue>DIGEST</ds:DigestValue></ds:Reference></ds:SignedInfo>"""
self.signatureBlock = """<ds:Signature xmlns:ds="http://www.w3.org/2000/09/xmldsig#">SIGNED_INFO<ds:SignatureValue>SIGNATURE_VALUE</ds:SignatureValue></ds:Signature><saml:Subject>"""
def create_assertion_id(self):
num = random.randint(1, 999999)
hash = hashlib.sha256()
hash.update(bytes(num))
digest = hash.digest()
return binascii.hexlify(digest).decode('UTF-8')
def sign_signedinfo(self, data):
f = open('../etc/privateKey.pem', 'rb')
key = b64decode(self.encoded_key)
rsakey = RSA.importKey(f.read())
signer = PKCS1_v1_5.new(rsakey)
digest = SHA256.new()
digest.update(b64encode(data))
sign = signer.sign(digest)
return b64encode(sign)
def create_assertion(self):
self.preCannonicalizedResponse = self.preCannonicalizedResponse.replace('ASSERTION_ID',self.assertion_id)
self.preCannonicalizedResponse = self.preCannonicalizedResponse.replace('ISSUER', self.issuer)
self.preCannonicalizedResponse = self.preCannonicalizedResponse.replace('AUDIENCE', self.audience)
self.preCannonicalizedResponse = self.preCannonicalizedResponse.replace('RECIPIENT', self.action)
self.preCannonicalizedResponse = self.preCannonicalizedResponse.replace('SUBJECT', self.subject)
self.preCannonicalizedResponse = self.preCannonicalizedResponse.replace('NOT_BEFORE', self.not_before)
self.preCannonicalizedResponse = self.preCannonicalizedResponse.replace('NOT_ON_OR_AFTER', self.not_on_or_after)
# Prepare the digest
m = hashlib.sha256()
m.update(bytes(self.preCannonicalizedResponse, encoding='UTF-8'))
digest = b64encode(m.digest())
self.preCannonicalizedSignedInfo = self.preCannonicalizedSignedInfo.replace('ASSERTION_ID',self.assertion_id)
self.preCannonicalizedSignedInfo = self.preCannonicalizedSignedInfo.replace('DIGEST',digest.decode('UTF-8'))
# Prepare the signedinfo
input = bytes(self.preCannonicalizedSignedInfo,encoding='UTF-8')
signature = self.sign_signedinfo(input)
signature = b64encode(signature)
# Prepare the signature block
self.signatureBlock = self.signatureBlock.replace('SIGNED_INFO',self.preCannonicalizedSignedInfo)
self.signatureBlock = self.signatureBlock.replace('SIGNATURE_VALUE',signature.decode('UTF-8'))
self.preCannonicalizedResponse = self.preCannonicalizedResponse.replace('<saml:Subject>',self.signatureBlock)
return self.preCannonicalizedResponse
def get_base64urlencode_string(self):
data = self.create_assertion()
data = '<?xml version="1.0" encoding = "UTF-8"?>' + data
print("Assertion: "+data)
tree = ElementTree.ElementTree(ElementTree.fromstring(data))
root = tree.getroot()
output = urlsafe_b64encode(bytes(ElementTree.tostring(root, encoding='UTF-8',method = 'xml').decode(encoding='UTF-8'),encoding='UTF-8'))
return output
def postSAML(self):
url = "https://<sandbox_domain>.my.salesforce.com/services/oauth2/token"
assertion = self.get_base64urlencode_string()
print(assertion.decode('UTF-8'))
headers = {
"Content-Type" : "application/x-www-form-urlencoded"
}
params = {
"grant-type" : "urn:ietf:params:oauth:grant-type:saml2-bearer",
"assertion" : assertion.decode('UTF-8')
}
r = requests.post(url,params=params,headers=headers)
print(r.status_code)
print(r.content)
print(r.headers)
print(r.url)
Currently I am getting an error as "invalid assertion" using my python code. What is wrong in my code?
The code you copied from SAMLBearerAssertion.apex uses SHA-1 for message digest and RSA-SHA1 for signature whereas your code has SHA-256/RSA-SHA256. Change the relevant XML fragments to this:
<ds:DigestMethod Algorithm="http://www.w3.org/2001/04/xmlenc#sha256"></ds:DigestMethod>
<ds:SignatureMethod Algorithm="http://www.w3.org/2001/04/xmldsig-more#rsa-sha256"></ds:SignatureMethod>
For other values of Algorithm, see IANA assignments cheatsheet.

How to create a simple csv on the fly in python?

I have an endpoint that takes a csv file.
Now, I want to write a test that makes a post request with this file.
I am trying to generate this csv file on the fly (rather than manually create and store it)
I tried this:
def csv_fixture(rows, type):
headers = None
if type == "merchant_upload":
headers = MerchantCSV.ordered_columns()
elif type == "invoice_upload":
headers = InvoiceCSV.ordered_columns()
assert headers is not None
rows = [headers] + rows
with open("file.csv", "w+") as f:
writer = csv.writer(f)
writer.writerows(rows)
yield f
my_file = csv_fixture(merchants, type="merchant_upload")
request = rf.post("/invoice_admin/upload_organisations/",
{"onboarding_file": my_file})
My endpoint does something like this:
if filename not in request.FILES:
raise Exception("Upload Failed: No file submitted.")
file = TextIOWrapper(
request.FILES[filename].file, encoding=request.encoding)
headers = peek_first_row(file)
missing = required_cols - set(headers)
if missing:
raise Exception(f"Columns missing in csv: {str(missing)})")
return csv.DictReader(file)
My endpoint works if I manually upload the file. However, if I try doing it programatically with the first snipper, I get an error:
def peek_first_row(file):
rows = csv.reader(file)
> headers = next(rows)
E StopIteration
app/invoice_admin/csv_parser.py:11: StopIteration
Please could someone guide me? I have looked at lots of tutorials, but I'm lost at this point.
This might help.
Ex:
def csv_fixture(rows, type):
headers = None
if type == "merchant_upload":
headers = MerchantCSV.ordered_columns()
elif type == "invoice_upload":
headers = InvoiceCSV.ordered_columns()
assert headers is not None
rows = [headers] + rows
with open("file.csv", "w+") as f:
writer = csv.writer(f)
writer.writerows(rows)
return open("file.csv", "rb")
my_file = csv_fixture(merchants, type="merchant_upload")
request = rf.post("/invoice_admin/upload_organisations/",
files={"onboarding_file": my_file})

KeyError: 'lattitude'

I'm currently trying to use an API to get the data in Buffalo and return it in from a JSON URL and them place it in the format of: longitude, Latitude, and Viodesc.
However, I believe I am reaching difficulties when iterating due to some values not having latitude and longitude thus giving me a KeyError of 'latitude'.
I'm not sure if this is the fault in my code as well as how to go about changing it
import json
from urllib import request
def get_ticket_data(string):
answer = []
urlData = string
webURL = request.urlopen(urlData)
data = webURL.read()
ans = json.loads(data.decode())
for x in ans:
arr = []
arr.append(x["lattitude"])
arr.append(x["longtitude"])
arr.append(x["viodesc"])
return answer.append(ans)
You can catch the Exception 'KeyError' which is raised when the particular key is not found. Handle the exception so that even if the key is missing you can move on to the next record without stopping the code.
Code Snippet:
import json
from urllib import request
def get_ticket_data(string):
answer = []
urlData = string
webURL = request.urlopen(urlData)
data = webURL.read()
ans = json.loads(data.decode())
for x in ans:
try:
arr = []
arr.append(x["lattit**strong text**ude"])
arr.append(x["longtitude"])
arr.append(x["viodesc"])
except KeyError:
continue
return answer.append(ans)
Hope it helps!
Another different attempt would be to check before appending:
import json
from urllib import request
def get_ticket_data(string):
answer = []
urlData = string
webURL = request.urlopen(urlData)
data = webURL.read()
ans = json.loads(data.decode())
for x in ans:
arr = []
arr.append(x["lattit**strong text**ude"]) if x["lattit**strong text**ude""] else pass
arr.append(x["longtitude"]) if x["longitude"] else pass
arr.append(x["viodesc"]) if x["viodesc"] else pass
return answer.append(ans)
Using the inline-if will let you append if the value exists other wise it would not append.
It will all depend on how you will treat the information latter. Another approach would be to fill it with "" in case there is no latitude. For this approach you could do:
import json
from urllib import request
def get_ticket_data(string):
answer = []
urlData = string
webURL = request.urlopen(urlData)
data = webURL.read()
ans = json.loads(data.decode())
for x in ans:
arr = []
arr.append(x["lattit**strong text**ude"]) if x["lattit**strong text**ude""] else ""
arr.append(x["longtitude"]) if x["longitude"] else ""
arr.append(x["viodesc"]) if x["viodesc"] else ""
return answer.append(ans)

urllib error: Too many requests

The below python program asks the user for two reddit usernames and compares their score.
import json
from urllib import request
def obtainKarma(users_data):
users_info = []
for user_data in users_data:
data = json.load(user_data)
posts = data["data"]["children"]
num_posts = len(posts)
scores = []
comments = []
for post_id in range(num_posts):
score = posts[post_id]["data"]["score"]
comment = posts[post_id]["num_comments"]
scores.append(score)
comments.append(comment)
users_info.append((scores,comments))
user_id = 0
for user_info in users_info:
user_id+=1
print("User"+str(user_id))
for user_attr in user_info:
print(user_attr)
def getUserInfo():
count = 2
users_data = []
while count:
count = count + 1
username = input("Please enter username:\n")
url = "https://reddit.com/user/"+username+".json"
try:
user_data = request.urlopen(url)
except:
print("No such user.\nRetry Please.\n")
count = count + 1
raise
users_data.append(user_data)
obtainKarma(users_data)
if __name__ == '__main__':
getUserInfo()
However, when I run the program and enter a username, I get an error:
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 429: Too Many Requests
I tried looking for similar issues but none of them satisfied to solve this specific issue. Looking at the error, it would make sense to say that the URL includes an amount of data that exceeds a specific limit? But that still sounds absurd because it is not that much of a data.
Thanks.
The problem seems to be resolved when you supply a User-Agent with your request.
import json
from urllib import request
def obtainKarma(users_data):
users_info = []
for user_data in users_data:
data = json.loads(user_data) # I've changed 'json.load' to 'json.loads' because you want to parse a string, not a file
posts = data["data"]["children"]
num_posts = len(posts)
scores = []
comments = []
for post_id in range(num_posts):
score = posts[post_id]["data"]["score"]
comment = posts[post_id]["data"]["num_comments"] # I think you forgot '["data"]' here, so I added it
scores.append(score)
comments.append(comment)
users_info.append((scores,comments))
user_id = 0
for user_info in users_info:
user_id+=1
print("User"+str(user_id))
for user_attr in user_info:
print(user_attr)
def getUserInfo():
count = 2
users_data = []
while count:
count = count + 1
username = input("Please enter username:\n")
url = "https://reddit.com/user/"+username+".json"
user_data = None
try:
req = request.Request(url)
req.add_header('User-Agent', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)')
resp = request.urlopen(req)
user_data = resp.read().decode("utf-8")
except Exception as e:
print(e)
print("No such user.\nRetry Please.\n")
count = count + 1
raise # why raise? --> Program will end if user is not found
if user_data:
print(user_data)
users_data.append(user_data)
obtainKarma(users_data)
if __name__ == '__main__':
getUserInfo()
There were still other issues with your code:
You should not write json.load(user_data), because you are parsing a string. So I changed it to use json.loads(user_data).
The Python documentation for json.loads states:
Deserialize s (a str instance containing a JSON document) to a Python object using this conversion table.
And in the code comment = posts[post_id]["num_comments"], I think you forgot to index on 'data', so I changed it to comment = posts[post_id]["data"]["num_comments"]
And why are you raising the exception in the except-block? This will end the program, however it seems that you expect it not to, from looking at the following code:
print("No such user.\nRetry Please.\n")

Resources