I need to use bulk_create to create a lot of "detalle"(details), the problem is i have to iterate trough a json to get the arguments, and i got 4 fk so django ask to me for the instance, not the id. but to have id i have to do a .get(), so i got a bad performance, because its 4 gets by each iteration.
its there a way to get all objects instances and put in a list or something to perform load then the instance without using get every time?
class DetalleFichaAllViewSet(viewsets.ModelViewSet):
serializer_class = DetalleFichaUpdateAllSerializer
def create(self, request, *args, **kwargs):
user = self.request.user
data = request.data
try:
ficha = Ficha.objects.get(autor=user.id)
DetalleFicha.objects.filter(ficha=ficha.id).delete()
except Http404:
pass
# Create Ficha
now = datetime.now()
date_time = now.strftime("%Y-%m-%d %H:%M")
print("AAAAAA DATA:", data)
Ficha.objects.filter(autor=user.id).update(fecha_creacion=date_time, autor=user,
nombre=data["nombreFicha"], descripcion=data["descripcionFicha"])
ficha = Ficha.objects.filter(autor=user.id).last()
recintos = Recinto.objects.all()
productos = Producto.objects.all()
estandar_productos = EstandarProducto.objects.all()
cotizaciones = Cotizacion.objects.all()
detalles_ficha = []
for detalle in data["detalles"]:
recinto = recintos.get(id=detalle[1])
producto = productos.get(id=detalle[10])
estandar_producto = estandar_productos.get(id=detalle[9])
try:
cotizacion = cotizaciones.get(id=detalle[4])
except ObjectDoesNotExist:
cotizacion = None
print("Fecha: ", detalle[8])
detalle = DetalleFicha(carreras=detalle[0],
recinto=recinto, nombre=detalle[2],
cantidad_a_comprar=detalle[3], cotizacion=cotizacion,
valor_unitario=detalle[5], valor_total=detalle[6],
documento=detalle[7], fecha_cotizacion=detalle[8],
estandar_producto=estandar_producto, producto=producto,
ficha=ficha)
detalles_ficha.append(detalle)
DetalleFicha.objects.bulk_create(detalles_ficha)
print("Array convertida", detalles_ficha)
print(detalles_ficha[0])
return Response(status=status.HTTP_200_OK)
Related
Team:
My concern is on redundancy, efficient use of loops and best approach to get the desired result.
Usecase: get on call user name and create jira ticket with it.
below is my entire code and it runs fine for me. This is my very first OOP project.
Flow: I am calling two APIS (jira and pager api).
First calling pager api and getting who is oncall currently. Here am getting a list of nested dicts as response that am looping on.
Then calling jira api to create ticket with that above oncall user.
i want to learn to calculate Big0 and improve.
since this is my very first time can I get to see if there any problems or inefficiency or divergence from standard practices?
import requests
import json
import os
from jira import JIRA
from pdpyras import APISession
from collections import OrderedDict
JIRA_DICT_KEY = "JIRA"
JIRA_CONFIG = {'server': "https://jirasw.tom.com"}
JIRA_USER = os.environ['JIRA_USER']
JIRA_PW = os.environ['JIRA_PW']
PD_API_KEY = os.environ['PD_API_KEY']
USER_EMAIL = os.environ['USER_EMAIL']
class ZidFinder(object):
def __init__(self):
self.active_zid_errors = dict()
self.team_oncall_dict = dict()
self.onCall = self.duty_oncall()
self.jira = self.init_jira()
def init_jira(self):
jira = JIRA(options=JIRA_CONFIG, auth=(JIRA_USER, JIRA_PW))
return jira
def duty_oncall(self, *args):
session = APISession(PD_API_KEY, default_from=USER_EMAIL)
total = 1 #true or false
limit = 100 # this var is to pull limit records at a time.
teamnm = "Product SRE Team"
team_esp_name = "Product SRE Escalation Policy"
teamid = ""
teamesplcyid = ""
if args:
offset = args[0]
total_teams = args[1]
if offset <= total_teams:
print("\nfunc with args with new offset {} called\n".format(offset))
teams = session.get('/teams?limit={0}&total={1}&offset={2}'.format(limit,total,offset))
else:
print("Reached max teams, no more team records to pull")
return
else:
print("\nPull first set of {} teams as defined by limit var and loop more if team not found..\n".format(limit))
teams = session.get('/teams?limit={0}&total={1}'.format(limit,total))
if not teams.ok:
return
else:
tj = teams.json()
tjd = tj['teams']
print("\n")
for adict in tjd:
if not adict['name'] == teamnm:
continue
elif adict['name'] == teamnm:
teamid = adict['id']
print("Found team..\n",adict['name'], "id: {0}".format(teamid))
esclp = session.get('/escalation_policies?total={0}&team_ids%5B%5D={1}'.format(total,teamid))
if not esclp.ok:
print("Failed pulling Escalation polices for team '{}'".format(teamnm))
return
else:
ep = esclp.json()
epj = esclp.json()['escalation_policies']
if not epj:
print("Escalation polices for team '{}' not defined".format(teamnm))
return
else:
for adict in epj:
if not adict['summary'] == team_esp_name:
continue
else:
teamesplcyid = adict['id']
print("{} id: {}\n".format(team_esp_name, teamesplcyid))
oncalls = session.get('/oncalls?total={0}&escalation_policy_ids%5B%5D={1}'.format(total,teamesplcyid))
if not oncalls.ok:
print("Issue in getting oncalls")
return
else:
ocj = oncalls.json()['oncalls']
for adict in ocj:
if adict['escalation_level'] == 1 or adict['escalation_level'] == 2:
self.team_oncall_dict[adict['schedule']['summary']] = adict['user']['summary']
continue
if self.team_oncall_dict:
if len(self.team_oncall_dict) == 1:
print("\nOnly Primary onCall is defined")
print("\n",self.team_oncall_dict)
else:
print(" Primary and other calls defined")
print("\n",OrderedDict(self.team_oncall_dict),"\n")
return
else:
print("Calling with next offset as team was not found in the records pulled under limit..")
if tj['offset'] <= tj['total'] or tj['more'] == True:
setoffset = limit + tj['offset']
self.onCall(setoffset, tj['total'])
def create_jiras(self):
node = ["node1", "node2"]
zid_label = ["id90"]
labels = [node, zid_label]
print("Creating a ticket for node {} with description: {}".format(node, str(self.active_zid_errors[node])))
if self.msre_oncall_dict:
print("Current onCalls pulled from Duty, use them as assignee in creating jira tickets..")
new_issue = self.jira.create_issue(project='TEST', summary='ZID error on node {}'.format(node),
description=str(self.active_zid_errors[node]), issuetype={'name': 'Bug'}, assignee={'name': self.msre_oncall_dict['Product SRE Primary']},labels=labels)
print("Created a new ticket: ", new_issue.key, new_issue.fields.summary)
self.active_zid_errors[node][JIRA_DICT_KEY] = new_issue.key
else:
print("Current onCalls were not pulled from Duty, create jira with defautl assignee..")
new_issue = self.jira.create_issue(project='TEST', summary='ZID error on node {}'.format(node),
description=str(self.active_zid_errors[node]), issuetype={'name': 'Bug'},labels=labels)
print("Created a new ticket: ", new_issue.key, new_issue.fields.summary)
self.active_zid_errors[node][JIRA_DICT_KEY] = new_issue.key
if __name__== "__main__":
o = ZidFinder()
I'm creating a boto3 script that scrapes and uploads our entire accounts Public Ips and NatGateway Ips to our S3 bucket. I'm stuck on writing files for both returns. I would ideally like to write two separate files while still using the same filename variable you see in main(). Right now I can get this to work with only one return(either nat_ips or public_ips)
import boto3
from datetime import datetime
from csv import writer
def get_ips():
# Uses STS to assume the role needed.
boto_sts=boto3.client('sts')
sts_response = boto_sts.assume_role(
RoleArn='arn:aws:iam::1234:role/foo',
RoleSessionName='Foo'
)
# Save the details from assumed role into vars
sts_credentials = sts_response["Credentials"]
session_id = sts_credentials["AccessKeyId"]
session_key = sts_credentials["SecretAccessKey"]
session_token = sts_credentials["SessionToken"]
# List and store all the regions
ec2_client=boto3.client('ec2',aws_access_key_id=session_id,aws_secret_access_key=session_key,aws_session_token=session_token,region_name='us-west-1')
all_regions=[region['RegionName'] for region in ec2_client.describe_regions()['Regions']]
nat_ips = []
public_ips = []
for region in all_regions:
max_results = 1000
next_token = ''
ec2_client=boto3.client('ec2',aws_access_key_id=session_id,aws_secret_access_key=session_key,aws_session_token=session_token,region_name=region)
session=boto3.Session(aws_access_key_id=session_id, aws_secret_access_key=session_key, aws_session_token=session_token, region_name=region)
while next_token or next_token == '':
response = ec2_client.describe_nat_gateways(MaxResults=max_results, NextToken=next_token)
filters = [{'Name':'tag:Name', 'Values':['*sgw-eip']}]
get_ips = ec2_client.describe_addresses(Filters=filters)
for gateway in response["NatGateways"]:
for address in gateway["NatGatewayAddresses"]:
nat_ips.append(address["PublicIp"]+'/32')
for eip_dict in get_ips['Addresses']:
public_ip_string = eip_dict['Tags'][0]['Value'] + ' : ' + eip_dict['PublicIp']
public_ips.append(public_ip_string)
next_token = response.get("NextToken", None)
return nat_ips, public_ips
def _s3_upload(filename):
s3 = boto3.resource('s3')
bucket = 'foo-bar'
object_name = 'foo/'
s3.meta.client.upload_file(Filename=filename,Bucket=bucket,Key=object_name+filename)
print(f'Uploading {filename} to {bucket}')
def write_list_to_file(filename, data):
lines_string = '\n'.join(str(x) for x in data)
with open(filename,'w') as output:
output.writelines(lines_string)
print(f'Writing file to {filename}')
if __name__ == "__main__":
date = datetime.now().strftime('%Y%m%d')
# Stuck here since I want to make it one variable
filename_nat_ips = f'natgateway_ips{date}.csv'
filename_sga_ips = f'sga_ips{date}.csv'
public_ips = get_ips()
nat_ips = get_ips()
print(filename)
write_list_to_file(filename, nat_ips)
_s3_upload(filename)
I see that you are already returning a tuple of public_ips and nat_ips from your get_ips() function. So in your main, you could collect them together as well.
You might try something like this:
if __name__ == "__main__":
date = datetime.now().strftime('%Y%m%d')
# Stuck here since I want to make it one variable
filename_nat_ips = f'natgateway_ips{date}.csv'
filename_sga_ips = f'sga_ips{date}.csv'
nat_ips, public_ips = get_ips()
write_list_to_file(filename_nat_ips, nat_ips)
write_list_to_file(filename_public_ips, public_ips)
_s3_upload(filename_nat_ips)
_s3_upload(filename_public_ips)
I was doing it right the first time. And was trying to make it more complicated.
if __name__ == "__main__":
date = datetime.now().strftime('%Y%m%d')
filename_nat_ips = f'natgateway_ips{date}.csv'
filename_sga_ips = f'sga_ips{date}.csv'
nat_ips, public_ips = get_ips()
print(filename_nat_ips)
print(filename_sga_ips)
write_list_to_file(filename_nat_ips, nat_ips)
write_list_to_file(filename_sga_ips, public_ips)
_s3_upload(filename_nat_ips)
_s3_upload(filename_sga_ips)
When I trying to call the method POST, it successfully creates, but I get the error's message in my serializers. What I should do, to get success when I call this method in my RESTful API?
Serializers.py:
def create(self, validated_data):
if 'branches' in validated_data:
branches_data = validated_data.pop('branches')
if 'contacts' in validated_data:
contacts_data = validated_data.pop('contacts')
course = Course.objects.create(**validated_data)
branches_list = []
contacts_list = []
print(contacts_data)
print(branches_data)
for branches_details in branches_data:
branches_list.append(Branch.objects.create(
course_id = course.id,
**branches_details))
for contacts_details in contacts_data:
contacts_list.append(Contact.objects.create(
course_id = course.id,
**contacts_details))
course.save()
return course
I will assume that branches_data and contacts_data are lists, so in case they are not present, they should be declared as empty lists. The following code will run:
def create(self, validated_data):
branches_data = validated_data.pop('branches') if 'branches' in validated_data else []
contacts_data = validated_data.pop('contacts') if 'contacts' in validated_data else []
course = Course.objects.create(**validated_data)
branches_list = []
contacts_list = []
print(contacts_data)
print(branches_data)
for branches_details in branches_data:
branches_list.append(Branch.objects.create(
course_id = course.id,
**branches_details))
for contacts_details in contacts_data:
contacts_list.append(Contact.objects.create(
course_id = course.id,
**contacts_details))
course.save()
return course
The below python program asks the user for two reddit usernames and compares their score.
import json
from urllib import request
def obtainKarma(users_data):
users_info = []
for user_data in users_data:
data = json.load(user_data)
posts = data["data"]["children"]
num_posts = len(posts)
scores = []
comments = []
for post_id in range(num_posts):
score = posts[post_id]["data"]["score"]
comment = posts[post_id]["num_comments"]
scores.append(score)
comments.append(comment)
users_info.append((scores,comments))
user_id = 0
for user_info in users_info:
user_id+=1
print("User"+str(user_id))
for user_attr in user_info:
print(user_attr)
def getUserInfo():
count = 2
users_data = []
while count:
count = count + 1
username = input("Please enter username:\n")
url = "https://reddit.com/user/"+username+".json"
try:
user_data = request.urlopen(url)
except:
print("No such user.\nRetry Please.\n")
count = count + 1
raise
users_data.append(user_data)
obtainKarma(users_data)
if __name__ == '__main__':
getUserInfo()
However, when I run the program and enter a username, I get an error:
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 429: Too Many Requests
I tried looking for similar issues but none of them satisfied to solve this specific issue. Looking at the error, it would make sense to say that the URL includes an amount of data that exceeds a specific limit? But that still sounds absurd because it is not that much of a data.
Thanks.
The problem seems to be resolved when you supply a User-Agent with your request.
import json
from urllib import request
def obtainKarma(users_data):
users_info = []
for user_data in users_data:
data = json.loads(user_data) # I've changed 'json.load' to 'json.loads' because you want to parse a string, not a file
posts = data["data"]["children"]
num_posts = len(posts)
scores = []
comments = []
for post_id in range(num_posts):
score = posts[post_id]["data"]["score"]
comment = posts[post_id]["data"]["num_comments"] # I think you forgot '["data"]' here, so I added it
scores.append(score)
comments.append(comment)
users_info.append((scores,comments))
user_id = 0
for user_info in users_info:
user_id+=1
print("User"+str(user_id))
for user_attr in user_info:
print(user_attr)
def getUserInfo():
count = 2
users_data = []
while count:
count = count + 1
username = input("Please enter username:\n")
url = "https://reddit.com/user/"+username+".json"
user_data = None
try:
req = request.Request(url)
req.add_header('User-Agent', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)')
resp = request.urlopen(req)
user_data = resp.read().decode("utf-8")
except Exception as e:
print(e)
print("No such user.\nRetry Please.\n")
count = count + 1
raise # why raise? --> Program will end if user is not found
if user_data:
print(user_data)
users_data.append(user_data)
obtainKarma(users_data)
if __name__ == '__main__':
getUserInfo()
There were still other issues with your code:
You should not write json.load(user_data), because you are parsing a string. So I changed it to use json.loads(user_data).
The Python documentation for json.loads states:
Deserialize s (a str instance containing a JSON document) to a Python object using this conversion table.
And in the code comment = posts[post_id]["num_comments"], I think you forgot to index on 'data', so I changed it to comment = posts[post_id]["data"]["num_comments"]
And why are you raising the exception in the except-block? This will end the program, however it seems that you expect it not to, from looking at the following code:
print("No such user.\nRetry Please.\n")
I'm creating a game where i have the data imported from a database, but i have a little problem...
Currently i get a copy of the data as a dictionary, which i need to pass as argument to my GUI, however i also need to process some data, like in this example:
I get the data as a dict (I've created the UseDatabase context manager and is working):
def get_user(name: str, passwd: str):
user = {}
user['name'] = name
user['passwd'] = passwd
with UseDatabase() as cursor:
_SQL = "SELECT id, cash, ruby FROM user WHERE name='Admin' AND password='adminpass'"
cursor.execute(_SQL)
res = cursor.fetchall()
if res:
user['id'] = res[0][0]
user['cash'] = res[0][1]
user['ruby'] = res[0][2]
return user
return res
.
.
.
def get_activities():
with UseDatabase() as cursor:
_SQL = "SELECT * FROM activities WHERE user_id='2'"
cursor.execute(_SQL)
res = cursor.fetchall()
if res:
ids = [i[0] for i in res]
activities = {}
for i in res:
activities[i[0]] = {'title':i[1],'unlock':i[2],'usr_progress':i[3]}
return (ids, activities)
return res
Need it as a dict in my GUI ("content" argument):
class SideBar:
def __init__(self, screen: 'pygame.display.set_mode()', box_width: int, box_height: int, content: dict, font: 'font = pygame.font.Font()'):
#content dict: {id: {'title':'','unlock':'','usr_progress':''},...}
self.box_width = box_width
self.box_height = box_height
self.box_per_screen = screen.get_height() // box_height
self.content = content
self.current_box = 1
self.screen = screen
self.font = font
self.generate_bar()
def generate_bar (self):
active = [i for i in self.content.keys() if i in range(self.current_box, self.current_box+self.box_per_screen)]
for i in range(self.box_per_screen):
gfxdraw.box(self.screen,pygame.Rect((0,i*self.box_height),(self.screen.get_width()/3,self.screen.get_height()/3)),(249,0,0,170))
self.screen.blit(self.font.render(str(active[i]) + ' - ' + self.content[active[i]]['title'], True, (255,255,255)),(10,i*self.box_height+4))
for i in range(self.box_per_screen):
pygame.draw.rect(self.screen,(50,0,0),pygame.Rect((0,i*self.box_height),(self.screen.get_width()/3,self.screen.get_height()/3)),2)
But still need to make some changes in the data:
def unlock_act(act_id):
if user['cash'] >= activities[act_id]['unlock'] and activities[act_id]['usr_progress'] == 0:
user['cash'] -= activities[act_id]['unlock']
activities[act_id]['usr_progress'] = 1
So the question is: in this situation should i keep a copy of the data as dict, and create a class with it plus the methods i need or use functions to edit the data inside the dict?