I am currently working on a python program to query public github API url to get github user email address. The response from the python object is a huge list with a lot of dictionaries.
My code so far
import requests
import json
# username = ''
username = 'FamousBern'
base_url = 'https://api.github.com/users/{}/events/public'
url = base_url.format(username)
try:
res = requests.get(url)
r = json.loads(res.text)
# print(r) # List slicing
print(type(r)) # List that has alot dictionaries
for i in r:
if 'payload' in i:
print(i['payload'][6])
# matches = []
# for match in r:
# if 'author' in match:
# matches.append(match)
# print(matches)
# print(r[18:])
except Exception as e:
print(e)
# data = res.json()
# print(data)
# print(type(data))
# email = data['author']
# print(email)
By manually accessing this url in chrome browser i get the following
[
{
"id": "15069094667",
"type": "PushEvent",
"actor": {
"id": 32365949,
"login": "FamousBern",
"display_login": "FamousBern",
"gravatar_id": "",
"url": "https://api.github.com/users/FamousBern",
"avatar_url": "https://avatars.githubusercontent.com/u/32365949?"
},
"repo": {
"id": 332684394,
"name": "FamousBern/FamousBern",
"url": "https://api.github.com/repos/FamousBern/FamousBern"
},
"payload": {
"push_id": 6475329882,
"size": 1,
"distinct_size": 1,
"ref": "refs/heads/main",
"head": "f9c165226201c19fd6a6acd34f4ecb7a151f74b3",
"before": "8b1a9ac283ba41391fbf1168937e70c2c8590a79",
"commits": [
{
"sha": "f9c165226201c19fd6a6acd34f4ecb7a151f74b3",
"author": {
"email": "bernardberbell#gmail.com",
"name": "FamousBern"
},
"message": "Changed input functionality",
"distinct": true,
"url": "https://api.github.com/repos/FamousBern/FamousBern/commits/f9c165226201c19fd6a6acd34f4ecb7a151f74b3"
}
]
},
The json object is huge as well, i just sliced it. I am interested to get the email address in the author dictionary.
You're attempting to index into a dict() with i['payload'][6] which will raise an error.
My personal preferred way of checking for key membership in nested dicts is using the get method with a default of an empty dict.
import requests
import json
username = 'FamousBern'
base_url = 'https://api.github.com/users/{}/events/public'
url = base_url.format(username)
res = requests.get(url)
r = json.loads(res.text)
# for each dict in the list
for event in r:
# using .get() means you can chain .get()s for nested dicts
# and they won't fail even if the key doesn't exist
commits = event.get('payload', dict()).get('commits', list())
# also using .get() with an empty list default means
# you can always iterate over commits
for commit in commits:
# email = commit.get('author', dict()).get('email', None)
# is also an option if you're not sure if those keys will exist
email = commit['author']['email']
print(email)
Related
I want to generate 6 random numbers for pokemon api ID.
Put in list.
Then use the 6 numbers in url search.
The url doesn't recognise the list.
I need to convert the list to numbers. I'm not sure how to format them into the url.
import random
import requests
pokemon_ID = []
# pokemon_ID_add = str(pokemon_ID)[1:-1]
# pokemon_ID2 = str(pokemon_ID)[1:-1]
for i in range(0,6):
number = random.randint(1 ,151)
while i in pokemon_ID:
number = random.randint(1, 151)
pokemon_ID.append(number)
url = 'https://pokeapi.co/api/v2/pokemon/{}/'.format(pokemon_ID)
response = requests.get(url)
pokemon = response.json()
print(pokemon)
You can use loop to iterate over random IDs and store the result to a list:
import json
import random
import requests
url = "https://pokeapi.co/api/v2/pokemon/{}/"
random_pokemon_ids = [random.randint(1, 151) for i in range(6)]
result = []
for id_ in random_pokemon_ids:
pokemon = requests.get(url.format(id_)).json()
result.append(pokemon)
# pretty print the result:
print(json.dumps(result, indent=4))
Prints:
[
{
"abilities": [
{
"ability": {
"name": "rock-head",
"url": "https://pokeapi.co/api/v2/ability/69/"
},
"is_hidden": false,
"slot": 1
},
{
"ability": {
"name": "lightning-rod",
"url": "https://pokeapi.co/api/v2/ability/31/"
},
"is_hidden": false,
"slot": 2
},
{
"ability": {
"name": "battle-armor",
"url": "https://pokeapi.co/api/v2/ability/4/"
},
"is_hidden": true,
"slot": 3
}
],
"base_experience": 64,
"forms": [
{
"name": "cubone",
"url": "https://pokeapi.co/api/v2/pokemon-form/104/"
}
],
...
When i use presigned post to generate the url and other attributes, when i try to upload my image with server side encryption that is customer managed keys, this keys is created by me. In my case, I can upload with {"x-amz-server-side-encryption": "aws:kms"}. How can i upload customer managed key?
If, i want to upload image with Customer managed key, am i using the x-amz-server-side-encryption-customer-key and x-amz-server-side-encryption-customer-key-MD5?
here is my sample code:
import logging
import boto3
from botocore.exceptions import ClientError
s3_client = boto3.client("s3", config=Config(signature_version="s3v4"))
try:
bucket_name = "s3-bucket"
fields = {
"x-amz-server-side-encryption": "aws:kms",
# "x-amz-server-side-encryption-customer-algorithm": "AES256",
# "x-amz-server-side-encryption-customer-key": "<customer-managed-key>",
# "x-amz-server-side-encryption-customer-key-MD5": "<customer-managed-key>"
}
conditions = [
# 1Byte - 25MB
["content-length-range", 1, 26214400],
{"x-amz-server-side-encryption": "aws:kms"},
# {"x-amz-server-side-encryption-customer-algorithm": "AES256"},
# {"x-amz-server-side-encryption-customer-key": "<customer-managed-key>"},
# {"x-amz-server-side-encryption-customer-key-MD5": "<customer-managed-key>"}
]
file_name = "test.png"
response = s3_client.generate_presigned_post(bucket_name,
Key=file_name,
Fields=fields,
Conditions=conditions,
ExpiresIn=3000)
print(response)
except ClientError as e:
print(logging.error(e))
after i use "x-amz-server-side-encryption-aws-kms-key-id": "<KEY ID>", I got access d9
This is new sample code:
import logging
import boto3
from botocore.exceptions import ClientError
s3_client = boto3.client("s3", config=Config(signature_version="s3v4"))
try:
bucket_name = "s3-bucket"
fields = {
"x-amz-server-side-encryption": "aws:kms",
"x-amz-server-side-encryption-aws-kms-key-id": "<KEY ID>"
}
conditions = [
# 1Byte - 25MB
["content-length-range", 1, 26214400],
{"x-amz-server-side-encryption": "aws:kms"},
{"x-amz-server-side-encryption-aws-kms-key-id": "<KEY ID>"}
]
file_name = "test.png"
response = s3_client.generate_presigned_post(bucket_name,
Key=file_name,
Fields=fields,
Conditions=conditions,
ExpiresIn=300)
print(response)
except ClientError as e:
print(logging.error(e))
{
"code": 2000,
"messages": [],
"payload": {
"url": "https://s3-bucket.s3.amazonaws.com/",
"fields": {
"Content-Type": "image/png",
"x-amz-server-side-encryption": "aws:kms",
"x-amz-server-side-encryption-aws-kms-key-id": "12345678-01s1-abba-abcd-fb9f6e5bf13d",
"key": "kms005.png",
"x-amz-algorithm": "AWS4-HMAC-SHA256",
"x-amz-credential": "AKIAXHC4C5L2YWPYEWHO/20210223/us-east-1/s3/aws4_request",
"x-amz-date": "20210223T073640Z",
"policy": "eyJleHBpcmF0aW9uIjogIjIwMjEtMDItMjNUMDc6NDE6NDBaIiwgImNvbmRpdGlvbnMiOiBbWyJjb250ZW50LWxlbmd0aC1yYW5nZSIsIDEsIDI2MjE0NDAwXSwgeyJ4LWFtei1zZXJ2ZXItc2lkZS1lbmNyeXB0aW9uIjogImF3czprbXMifSwgeyJidWNrZXQiOiAiczMtYWRyaWFuLXRlc3QtYnVja2V0In0sIHsia2V5IjogImttczAwNS5wbmcifSwgeyJ4LWFtei1hbGdvcml0aG0iOiAiQVdTNC1ITUFDLVNIQTI1NiJ9LCB7IngtYW16LWNyZWRlbnRpYWwiOiAiQUtJQVhIQzRDNUwyWVdQWUVXSE8vMjAyMTAyMjMvdXMtZWFzdC0xL3MzL2F3czRfcmVxdWVzdCJ9LCB7IngtYW16LWRhdGUiOiAiMjAyMTAyMjNUMDczNjQwWiJ9XX0=",
"x-amz-signature": "e0c40e744d1989578517168341fa17a21c297ffa0e1be6c84e448dea373b7d16"
}
},
"request_id": "1234567890"
}"
Errors msg
Customer managed key, am i using the x-amz-server-side-encryption-customer-key and x-amz-server-side-encryption-customer-key-MD5?
There is no such header as x-amz-server-side-encryption-customer-key for SSE-KMS (its for SSE-C, see below). Instead, if you are going to use "x-amz-server-side-encryption": "aws:kms" and what to use your own CMK (not AWS Managed CMK) then you have to use:
x-amz-server-side-encryption-aws-kms-key-id - to specify the ID of the customer managed CMK used to protect the data
Header x-amz-server-side-encryption-customer-key-MD5 is for SSE-C (customer-provided keys), not for SSE-KMS.
In kms key policy must have a kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey* and kms:DescribeKey. After add the action into kms key policy it will upload successfully.
"Statement": [
"Action": [
"kms:Encrypt",
"kms:Decrypt",
"kms:ReEncrypt*",
"kms:GenerateDataKey*",
"kms:DescribeKey"
],
"Resource": "*"
}
]
I have 4 string variables like this:
a = 'a long string'
b = 'a longer string'
c = 'a path (with a single slash \)'
d = 'another path (with a single slash \)'
I am supposed to be adding this to a variable, which is a list of dictionaries. Something like this:
var = [
{
"op": "add",
"path": "/fields/System.Title",
"from": null,
"value": a
},
{
"op": "add",
"path": "/fields/System.Description",
"from": null,
"value": b
},
{
"op": "add",
"path": "/fields/System.AreaPath",
"from": null,
"value": c
},
{
"op": "add",
"path": "/fields/System.State",
"from": null,
"value": "New"
},
{
"op": "add",
"path": "/fields/System.IterationPath",
"from": null,
"value": d
}
]
FYI, var[3] does not take the variables I created. Only var[0], var[1], var[2] and var[4]
All this works fine. As you may have guessed by now, this is my payload for a POST operation that I am supposed to be sending (it's actually to create a work item in my Azure DevOps organization with the above parameters). Please note, the from in var only accepts null.
When I use POSTMAN to send the above request, it works (except I am not passing the variables in the body, but actually the hard coded values). When I do the same using Python, on VSCode, I am always thrown a 203, which is essentially an incorrect/incomprehensible payload. I am not able to get this working.
This is essentially the code (please assume the variables):
url = f'https://dev.azure.com/{organization}/{project}/_apis/wit/workitems/${workitemtype}?api-version=6.0'
header = {'Content-Type': 'application/json-patch+json', 'Authorization': f'Basic {PAT}'}
request = requests.post(url = url, json = var, headers = header)
I've tried everything I can think of:
request = requests.post(url = url, **data = json.dumps(var)**, headers = header),
request = requests.post(url = url, **data = json.loads(var)**, headers = header),
request = requests.post(url = url, **data = json.loads(str(var))**, headers = header) -> this because loads(var) was throwing TypeError: the JSON object must be str, bytes or bytearray, not list
I also tried to include the entire var variable as a docstring - but the problem with that is that I need to pass the variables (a, b, c and d) into it, and docstrings cannot accept them.
How may I overcome this?
I have a feeling that it's the null that's causing this issue, but I could be wrong.
I tested with your code, and i failed to pass the authorization if i defined the Authorization in the request headers. I fixed it by passing the PAT to the auth parameter in requests.post method. I made a little to your code. And it worked fine for me. See below:
import requests
if __name__ == '__main__':
a = 'a longer string'
b = 'a longer string'
c = 'project\area'
d = 'project\iteration'
var = [
{
"op": "add",
"path": "/fields/System.Title",
"from": None,
"value": a
},
{
"op": "add",
"path": "/fields/System.Description",
"from": None,
"value": b
},
{
"op": "add",
"path": "/fields/System.AreaPath",
"from": None,
"value": c
},
{
"op": "add",
"path": "/fields/System.State",
"from": None,
"value": "New"
},
{
"op": "add",
"path": "/fields/System.IterationPath",
"from": None,
"value": d
}
]
pat = "Personal access token"
url = 'https://dev.azure.com/{organization}/{project}/_apis/wit/workitems/$Task?api-version=6.1-preview.3'
header = {'Content-Type': 'application/json-patch+json'}
request = requests.post(url = url, json = var, headers = header, auth=("", pat))
However you can also check out azure-devops-python-api. See below example code to create a work item.
from azure.devops.connection import Connection
from msrest.authentication import BasicAuthentication
from azure.devops.v5_1.work_item_tracking.models import JsonPatchOperation
def _create_patch_operation(op, path, value):
patch_operation = JsonPatchOperation()
patch_operation.op = op
patch_operation.path = path
patch_operation.value = value
return patch_operation
def _create_work_item_field_patch_operation(op, field, value):
path = '/fields/{field}'.format(field=field)
return _create_patch_operation(op=op, path=path, value=value)
if __name__=='__main__':
a = 'a longer string'
b = 'a longer string'
c = 'project\area'
d = 'project\iteration'
# Fill in with your personal access token and org URL
personal_access_token = 'PAT'
organization_url = 'https://dev.azure.com/{org}/'
# Create a connection to the org
credentials = BasicAuthentication('', personal_access_token)
connection = Connection(base_url=organization_url, creds=credentials)
# Get a client
wit_client = connection.clients.get_work_item_tracking_client()
patch_document=[]
patch_document.append(_create_work_item_field_patch_operation('add', 'System.Title', a))
patch_document.append(_create_work_item_field_patch_operation('add', 'System.Description', b))
patch_document.append(_create_work_item_field_patch_operation('add', 'System.AreaPath', c))
wit_client.create_work_item(patch_document, "Project", 'Task')
I am new to Python. I am writhing a code to generate a excel file having the data sourced by calling API and correlate those to get desired result.
basically taking input from one database and search that in others and fetch related information.
The 4 databases have below data :
EEp
---------------------
{u'data': [{u'_id': u'5c30702c8ca9f51da8178df4',
u'encap': u'vlan-24',
u'ip': u'7.12.12.16',
u'mac': u'5B:P9:01:9E:42:08'}]}
PathEp
-----------
{u'data': [{u'_id': u'5c54a81a8ca9f51da84ae08e',
u'paths': u'paths-1507',
u'endpoint': u'eth1/10',
u'cep': u'5B:P9:01:9E:42:08',
u'tenant': u'ESX'}]}
ip4_address
-----------------------
{u'data': [{u'Allocation': u'Build_Reserved',
u'address': u'7.12.12.16',
u'name': u'fecitrix-1',
u'state': u'RESERVED'}]}
asset
---------------
{u'data': [{u'_id': u'57ccce8110dd54f02881fedc',
u'client': u'CES',
u'hostname': u'fecitrix-1'
u'os_team': u'Window'}]}
Logic:
If "mac" of EEp and "cep" of PathEp is same than take "encap","ip" ,"mac"
"paths" ,'endpoint","cep" and "tenant" (these values need to be exported
to excel)
Take ip of EEp and search in "ip4_address"
and get the "name" from ip4_address ( name need to be exported to excel).
If "name" of ip4_address is equal to "hostname" of database "asset" then take
"client" and "os_team" ( export that to excel)
I have written the script but not getting the desired result.
def get_host_details(self):
data = {
"find": {
"hostname": self.controller
},
"projection":{
"tenant": 1,
"paths": 1,
"endpoint":1
}
}
host_details = self.post("https://database.app.com/api/data/devices/PathEp/find", data)
#print host_details
hosts = []
for record in host_details:
if "mig" not in record["endpoint"]:
hosts.append(record)
return hosts
def get_ipaddress(self, controller):
host_record = {"tenant": "UNKNOWN",
"paths": "UNKNOWN",
"endpoint": "UNKNOWN",
"ip": "UNKNOWN",
"mac": "UNKNOWN",
"encap": "UNKNOWN"}
data = {
"find": {
"hostname": controller,
"ip": {
"$ne": "0.0.0.0"
}
},
"projection": {
"ip": 1,
"mac":1,
"encap":1,
}
}
endpoints = self.post("https://database.app.com/api/data/devices/EEp/find", data)
IPAM = self.get_dns()
print endpoints
host_details = self.get_host_details()
host_details_record = []
for record in endpoints:
for host in host_details:
if record["mac"] == host["cep"]:
host_record = {"tenant": host["tenant"],
"paths": host["paths"],
"endpoint": host["endpoint"],
"ip": record["ip"],
"mac": record["mac"],
"encap": record["encap"]}
host_details_record.append(host_record)
self.get_excel(host_details_record)
def get_dns(self, endpoints):
ip_dns_record = []
for each_endpoint in endpoints:
data = {
"find":
{
"address": {
"$eq": each_endpoint["ip"]
},
},
"projection":
{
"name": 1
}
}
dns_record = {"client":"UNKNOWN",
"os_team":"UNKNOWN",
ipam_record = self.post("https://database.app.com/api/data/"
"internal/ip4_address/find", data)
if ipam_record:
dns_record["ip_address"] = each_endpoint["ip"]
dns_record["hostname"] = ipam_record[0]["name"]
dns_record = self.get_remedy_details(ipam_record[0]["name"],
dns_record)
ip_dns_record.append(dns_record)
else:
dns_record["ip_address"] = each_endpoint["ip"]
dns_record["hostname"] = "UNKNOWN"
ip_dns_record.append(dns_record)
self.get_excel(ip_dns_record)
def get_remedy_details(self, hostname, dns_record):
data = {
"find":
{
"hostname": hostname.upper(),
}
}
remedy_data = self.post("https://database.app.com/api/data/internal/asset/find", data)
print(remedy_data)
#remedy_data = remedy_data["data"]
if remedy_data:
dns_record["client"] = remedy_data[0].get("client","UNKNOWN")
dns_record["os_team"] = remedy_data[0].get("os_team", "UNKNOWN")
else:
dns_record["client"] = "UNKNOWN"
dns_record["os_team"] = "UNKNOWN"
return dns_record
def get_excel(self, ip_dns_record):
filename = self.controller + ".xls"
excel_file = xlwt.Workbook()
sheet = excel_file.add_sheet('HOSTLIST')
sheet.write(0, 0, "IP Address")
sheet.write(0, 1, "HostName")
sheet.write(0, 2, "Client")
sheet.write(0, 3, "OS Team")
for count in xrange(1, len(ip_dns_record)+1):
sheet.write(count, 0,ip_dns_record[count - 1]["ip_address"])
sheet.write(count, 1,ip_dns_record[count - 1]["hostname"])
sheet.write(count, 2,ip_dns_record[count - 1]["client"])
sheet.write(count, 3,ip_dns_record[count - 1]["os_team"])
excel_file.save(filename)
if __name__ == "__main__":
controller = sys.argv[1]
OBJ = ACIHostList(controller)
print "SCRIPT COMPLETED"
No idea where I am going wrong and what needs to be done .
Your question leaves too much out. You should include all errors that you get. You should also comment your code as well so we can understand what you are trying to achieve in each step.
This is not an answer but something to try:
Rather than trying to wrap your head around a module like excel, wright your data to a simple CSV file. A CSV file can be opened up in excel and it formats correctly but is a lot easier to create.
import csv
data = [["a", "b"], ["c", "d"]]
with open("file.csv", "w+") as csv_file:
create_csv = csv.writer(csv_file)
create_csv .writerows(data)
simply grab all your data into a 2D list and using the above code dump it into a file so you can easily read it.
check the output of the file and see if you are getting the data you expect.
If you are not getting the desired data into this CSV file then there is an issue with your database queries.
I've been trying to find some documentation for jsonpatch==1.16 on how to make PATCH paths case-insensitive. The problem is that:
PATCH /users/123
[
{"op": "add", "path": "/firstname", "value": "Spammer"}
]
Seems to mandate that the DB (MySQL / MariaDB) column is also exactly firstname and not for example Firstname or FirstName. When I change the path in the JSON to /FirstName, which is what the DB column is, then the patch works just fine. But I'm not sure if you are supposed to use CamelCase in the JSON in this case? It seems a bit non-standard.
How can I make jsonpatch at least case-insensitive? Or alternatively, is there some way to insert some mapping in the middle, for example like this:
def users_mapping(self, path):
select = {
"/firstname": "FirstName",
"/lastname": "last_name", # Just an example
}
return select.get(path, None)
Using Python 3.5, SQLAlchemy 1.1.13 and Flask-SQLAlchemy 2.2
Well, the answer is: yes, you can add mapping. Here's my implementation with some annotations:
The endpoint handler (eg. PATCH /news/123):
def patch(self, news_id):
"""Change an existing News item partially using an instruction-based JSON,
as defined by: https://tools.ietf.org/html/rfc6902
"""
news_item = News.query.get_or_404(news_id)
self.patch_item(news_item, request.get_json())
db.session.commit()
# asdict() comes from dictalchemy method make_class_dictable(news)
return make_response(jsonify(news_item.asdict()), 200)
The method it calls:
# news = the db.Model for News, from SQLAlchemy
# patchdata = the JSON from the request, like this:
# [{"op": "add", "path": "/title", "value": "Example"}]
def patch_item(self, news, patchdata, **kwargs):
# Map the values to DB column names
mapped_patchdata = []
for p in patchdata:
# Replace eg. /title with /Title
p = self.patch_mapping(p)
mapped_patchdata.append(p)
# This follows the normal JsonPatch procedure
data = news.asdict(exclude_pk=True, **kwargs)
# The only difference is that I pass the mapped version of the list
patch = JsonPatch(mapped_patchdata)
data = patch.apply(data)
news.fromdict(data)
And the mapping implementation:
def patch_mapping(self, patch):
"""This is used to map a patch "path" or "from" to a custom value.
Useful for when the patch path/from is not the same as the DB column name.
Eg.
PATCH /news/123
[{ "op": "move", "from": "/title", "path": "/author" }]
If the News column is "Title", having "/title" would fail to patch
because the case does not match. So the mapping converts this:
{ "op": "move", "from": "/title", "path": "/author" }
To this:
{ "op": "move", "from": "/Title", "path": "/Author" }
"""
# You can define arbitrary column names here.
# As long as the DB column is identical, the patch will work just fine.
mapping = {
"/title": "/Title",
"/contents": "/Contents",
"/author": "/Author"
}
mutable = deepcopy(patch)
for prop in patch:
if prop == "path" or prop == "from":
mutable[prop] = mapping.get(patch[prop], None)
return mutable