The following returns a list of the desired payload in a PDF for a single site:
import multiprocessing as mp
def process_dates(dates, serviceAreaId, serviceAreaName, siteId, siteName):
model_results = []
# for loop to process dates
return model_results
def process_service_area(site):
pdf_results = []
siteId = site["id"]
siteName = site["name"]
siteTimeZone = site["time_zone"]
service_area_response = requests.request("GET",f"{lrs_url}/serviceAreas/active?siteId={siteId}", headers={},data={})
if service_area_response.status_code != 200:
sendMessageToSlack(f"Failed to retrieve service areas for site {siteId}.")
return pdf_results
service_area_json_response = json.loads(service_area_response.content)
tz = timezone(siteTimeZone)
startdate = datetime.now(tz)
dates = pd.date_range(start=startdate, periods=7).to_pydatetime().tolist()
date_pool = mp.Pool(mp.cpu_count())
for service_area in service_area_json_response:
serviceAreaName = service_area["name"]
serviceAreaId = service_area["id"]
active = service_area["active"]
#Run comparison
date_pool_async_results = date_pool.apply_async(process_dates, args = (dates, serviceAreaId, serviceAreaName, siteId, siteName))
date_pool.close()
date_pool.join()
for r in date_pool_async_results.get():
pdf_results.append(r)
return pdf_results
def process_all_sites():
sites_response = requests.request("GET",f"{lrs_url}/sites/active", headers={}, data={})
sites_json_response = json.loads(sites_response.content)
pdf_results = []
for site in sites_json_response:
pdf_results += process_service_area(site)
break
# service_area_pool = mp.Pool(2)
# service_area_pool_async_results = #service_area_pool.apply_async(process_service_area, args = (site))
# service_area_pool.close()
# service_area_pool.join()
# for r in service_area_pool_async_results.get():
# pdf_results.append(r)
return pdf_results
results = process_all_sites()
create_pdf(results)
However, when the script is ran for multiple sites, that is, when break is commented and service_area_pool up to pdf_results.append(r) are uncommented, I receive an UnboundLocalError: local variable 'date_pool_async_results' referenced before assignment
Why is that the case? It seems that date_pool_async_results does not receive any further payloads than just the first iteration.
I've gone through these following resources; however, they don't seem to work as as solution as I am trying to append to a list in my case.
I've also tried the following, but received an UnboundLocalError: local variable 'pdf_results' referenced before assignment as well.
global pdf_results
pdf_results = []
def process_all_sites():
# retrieve payload
Please do let me know if further context is required.
Related
I'm creating a boto3 script that scrapes and uploads our entire accounts Public Ips and NatGateway Ips to our S3 bucket. I'm stuck on writing files for both returns. I would ideally like to write two separate files while still using the same filename variable you see in main(). Right now I can get this to work with only one return(either nat_ips or public_ips)
import boto3
from datetime import datetime
from csv import writer
def get_ips():
# Uses STS to assume the role needed.
boto_sts=boto3.client('sts')
sts_response = boto_sts.assume_role(
RoleArn='arn:aws:iam::1234:role/foo',
RoleSessionName='Foo'
)
# Save the details from assumed role into vars
sts_credentials = sts_response["Credentials"]
session_id = sts_credentials["AccessKeyId"]
session_key = sts_credentials["SecretAccessKey"]
session_token = sts_credentials["SessionToken"]
# List and store all the regions
ec2_client=boto3.client('ec2',aws_access_key_id=session_id,aws_secret_access_key=session_key,aws_session_token=session_token,region_name='us-west-1')
all_regions=[region['RegionName'] for region in ec2_client.describe_regions()['Regions']]
nat_ips = []
public_ips = []
for region in all_regions:
max_results = 1000
next_token = ''
ec2_client=boto3.client('ec2',aws_access_key_id=session_id,aws_secret_access_key=session_key,aws_session_token=session_token,region_name=region)
session=boto3.Session(aws_access_key_id=session_id, aws_secret_access_key=session_key, aws_session_token=session_token, region_name=region)
while next_token or next_token == '':
response = ec2_client.describe_nat_gateways(MaxResults=max_results, NextToken=next_token)
filters = [{'Name':'tag:Name', 'Values':['*sgw-eip']}]
get_ips = ec2_client.describe_addresses(Filters=filters)
for gateway in response["NatGateways"]:
for address in gateway["NatGatewayAddresses"]:
nat_ips.append(address["PublicIp"]+'/32')
for eip_dict in get_ips['Addresses']:
public_ip_string = eip_dict['Tags'][0]['Value'] + ' : ' + eip_dict['PublicIp']
public_ips.append(public_ip_string)
next_token = response.get("NextToken", None)
return nat_ips, public_ips
def _s3_upload(filename):
s3 = boto3.resource('s3')
bucket = 'foo-bar'
object_name = 'foo/'
s3.meta.client.upload_file(Filename=filename,Bucket=bucket,Key=object_name+filename)
print(f'Uploading {filename} to {bucket}')
def write_list_to_file(filename, data):
lines_string = '\n'.join(str(x) for x in data)
with open(filename,'w') as output:
output.writelines(lines_string)
print(f'Writing file to {filename}')
if __name__ == "__main__":
date = datetime.now().strftime('%Y%m%d')
# Stuck here since I want to make it one variable
filename_nat_ips = f'natgateway_ips{date}.csv'
filename_sga_ips = f'sga_ips{date}.csv'
public_ips = get_ips()
nat_ips = get_ips()
print(filename)
write_list_to_file(filename, nat_ips)
_s3_upload(filename)
I see that you are already returning a tuple of public_ips and nat_ips from your get_ips() function. So in your main, you could collect them together as well.
You might try something like this:
if __name__ == "__main__":
date = datetime.now().strftime('%Y%m%d')
# Stuck here since I want to make it one variable
filename_nat_ips = f'natgateway_ips{date}.csv'
filename_sga_ips = f'sga_ips{date}.csv'
nat_ips, public_ips = get_ips()
write_list_to_file(filename_nat_ips, nat_ips)
write_list_to_file(filename_public_ips, public_ips)
_s3_upload(filename_nat_ips)
_s3_upload(filename_public_ips)
I was doing it right the first time. And was trying to make it more complicated.
if __name__ == "__main__":
date = datetime.now().strftime('%Y%m%d')
filename_nat_ips = f'natgateway_ips{date}.csv'
filename_sga_ips = f'sga_ips{date}.csv'
nat_ips, public_ips = get_ips()
print(filename_nat_ips)
print(filename_sga_ips)
write_list_to_file(filename_nat_ips, nat_ips)
write_list_to_file(filename_sga_ips, public_ips)
_s3_upload(filename_nat_ips)
_s3_upload(filename_sga_ips)
The goal of mine is to create a dictionary called 'sum_of_department' contains the department as the key and the total annual salary of all employees combined as a value. So far this is what I have but I'm a bit lost on how to add all the department names along with a sum of all of the employees salary in that dictionary. The current dictionary i tried displays only the amount of the salary and how many times its seen in the file. this is where i need the help.
import requests
# endpoint
endpoint = "https://data.cityofchicago.org/resource/xzkq-xp2w.json"
# optional parameters
parameters = {"$limit":20,}
# make request
response = requests.get(endpoint, params=parameters)
# Get the response data as a python object.
data = response.json()
count_by_department = {}
sum_by_department = {}
#loop through the data
for i in data:
if ('department' and 'salary_or_hourly' and 'annual_salary' in i):
department = i['department']
pay_type = i['salary_or_hourly']
anual_salary = i['annual_salary']
# print(i['annual_salary'])
else:
# handle case where there is no department property in that record
department = 'undefined'
pay_type = 'n/a'
anual_salary = 'n/a'
# print(department,"," ,pay_type)
# exclude the cases where the pay type is Hourly
if(pay_type != 'Salary' ):
pay_type = 0
# print(department,"," ,pay_type)
# update the sum_by_department and count_by_department dictionaries
if (department in count_by_department):
count_by_department[department] += 1
else:
count_by_department[department] = 1
if (anual_salary in sum_by_department):
sum_by_department[anual_salary] +=1
else:
sum_by_department[anual_salary] = 1
# print(count_by_department)
# print(sum_by_department)
You should add each person's annual_salary to the sum_by_department array while looping. Also, do not forget to convert your annual_salary variable to the float type, because adding them together as strings won't work.
Example script:
import requests
# endpoint
endpoint = "https://data.cityofchicago.org/resource/xzkq-xp2w.json"
# optional parameters
parameters = {"$limit":20,}
# make request
response = requests.get(endpoint, params=parameters)
# Get the response data as a python object.
data = response.json()
count_by_department = {}
sum_by_department = {}
#loop through the data
for i in data:
if ('department' and 'salary_or_hourly' and 'annual_salary' in i):
department = i['department']
pay_type = i['salary_or_hourly']
annual_salary = float(i['annual_salary'])
# print(i['annual_salary'])
else:
# handle case where there is no department property in that record
department = 'undefined'
pay_type = 'n/a'
annual_salary = 0
# print(department,"," ,pay_type)
# exclude the cases where the pay type is Hourly
if(pay_type != 'Salary' ):
pay_type = 0
# print(department,"," ,pay_type)
# update the sum_by_department and count_by_department dictionaries
if (department in count_by_department):
count_by_department[department] += 1
sum_by_department[department] += annual_salary
else:
count_by_department[department] = 1
sum_by_department[department] = annual_salary
#import pdb; pdb.set_trace();
print('count_by_department = ', count_by_department)
print('sum_by_department = ', sum_by_department)
Tip:
Uncomment the pdb line to debug interactively. The Python Debugger (pdb for short) halts the program while it's still running (i.e. in memory), so you can interact with it and inspect all variables.
def getNewWatchedCountGraph(requests):
data = Video.getNewWatchedCountGraph(requests)
data = json.loads(data)
# print(data)
x = []
m = []
bg = {}
res = {}
monthnumbers = []
currentMonth = datetime.datetime.now().month
for item in data:
seconds = int(item['count'])
x.append(seconds)
mydate = datetime.datetime.strptime(item['_id'], "%Y-%m")
monthnumbers.append(mydate.month)
m.append(mydate.strftime("%B"))
startMonths = monthnumbers[0] #line 116
endMonths = currentMonth+1
data = []
mon = []
for months in range(startMonths,endMonths):
if months not in monthnumbers:
mon.append(calendar.month_name[months])
data.append(0)
else:
mon.append(calendar.month_name[months])
monthIndex = monthnumbers.index(months)
data.append(x[monthIndex])
res['series_name'] = "Views"
res['series'] = list(data)
res['xdata'] = list(mon)
restrn_response = dumps(res)
return HttpResponse(restrn_response)
I have made this function to show the graph of total number of views.
It is working fine in my local server. But showing List index out of range in main server at line no 116. Where am i doing wrong?
This happens because monthnumbers is empty. Given that it’s being filled while iterating over data, I think the loop doesn’t even start because data is empty.
The below python program asks the user for two reddit usernames and compares their score.
import json
from urllib import request
def obtainKarma(users_data):
users_info = []
for user_data in users_data:
data = json.load(user_data)
posts = data["data"]["children"]
num_posts = len(posts)
scores = []
comments = []
for post_id in range(num_posts):
score = posts[post_id]["data"]["score"]
comment = posts[post_id]["num_comments"]
scores.append(score)
comments.append(comment)
users_info.append((scores,comments))
user_id = 0
for user_info in users_info:
user_id+=1
print("User"+str(user_id))
for user_attr in user_info:
print(user_attr)
def getUserInfo():
count = 2
users_data = []
while count:
count = count + 1
username = input("Please enter username:\n")
url = "https://reddit.com/user/"+username+".json"
try:
user_data = request.urlopen(url)
except:
print("No such user.\nRetry Please.\n")
count = count + 1
raise
users_data.append(user_data)
obtainKarma(users_data)
if __name__ == '__main__':
getUserInfo()
However, when I run the program and enter a username, I get an error:
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 429: Too Many Requests
I tried looking for similar issues but none of them satisfied to solve this specific issue. Looking at the error, it would make sense to say that the URL includes an amount of data that exceeds a specific limit? But that still sounds absurd because it is not that much of a data.
Thanks.
The problem seems to be resolved when you supply a User-Agent with your request.
import json
from urllib import request
def obtainKarma(users_data):
users_info = []
for user_data in users_data:
data = json.loads(user_data) # I've changed 'json.load' to 'json.loads' because you want to parse a string, not a file
posts = data["data"]["children"]
num_posts = len(posts)
scores = []
comments = []
for post_id in range(num_posts):
score = posts[post_id]["data"]["score"]
comment = posts[post_id]["data"]["num_comments"] # I think you forgot '["data"]' here, so I added it
scores.append(score)
comments.append(comment)
users_info.append((scores,comments))
user_id = 0
for user_info in users_info:
user_id+=1
print("User"+str(user_id))
for user_attr in user_info:
print(user_attr)
def getUserInfo():
count = 2
users_data = []
while count:
count = count + 1
username = input("Please enter username:\n")
url = "https://reddit.com/user/"+username+".json"
user_data = None
try:
req = request.Request(url)
req.add_header('User-Agent', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)')
resp = request.urlopen(req)
user_data = resp.read().decode("utf-8")
except Exception as e:
print(e)
print("No such user.\nRetry Please.\n")
count = count + 1
raise # why raise? --> Program will end if user is not found
if user_data:
print(user_data)
users_data.append(user_data)
obtainKarma(users_data)
if __name__ == '__main__':
getUserInfo()
There were still other issues with your code:
You should not write json.load(user_data), because you are parsing a string. So I changed it to use json.loads(user_data).
The Python documentation for json.loads states:
Deserialize s (a str instance containing a JSON document) to a Python object using this conversion table.
And in the code comment = posts[post_id]["num_comments"], I think you forgot to index on 'data', so I changed it to comment = posts[post_id]["data"]["num_comments"]
And why are you raising the exception in the except-block? This will end the program, however it seems that you expect it not to, from looking at the following code:
print("No such user.\nRetry Please.\n")
I’m working on a Python script that takes a set of input lines and assigns a mullion to the corresponding gridline that they intersect. However, I’m getting a strange error:
that I don’t know how to correct towards the end of the script. Python is telling me that it expected a MullionType and got a Family Type (see image). I’m using a modified version of Spring Nodes’ Collector.WallTypes that collects Mullion Types instead but the output of the node is a Family Type, which the script won’t accept. Any idea how to get the Mullion Type to feed into the final Python node?
SpringNodes script:
#Copyright(c) 2016, Dimitar Venkov
# #5devene, dimitar.ven#gmail.com
import clr
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
doc = DocumentManager.Instance.CurrentDBDocument
clr.AddReference("RevitAPI")
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
def tolist(obj1):
if hasattr(obj1,"__iter__"): return obj1
else: return [obj1]
fn = tolist(IN[0])
fn = [str(n) for n in fn]
result, similar, names = [], [], []
fec = FilteredElementCollector(doc).OfClass(MullionType)
for i in fec:
n1 = Element.Name.__get__(i)
names.append(n1)
if any(fn1 == n1 for fn1 in fn):
result.append(i.ToDSType(True))
elif any(fn1.lower() in n1.lower() for fn1 in fn):
similar.append(i.ToDSType(True))
if len(result) > 0:
OUT = result,similar
if len(result) == 0 and len(similar) > 0:
OUT = "No exact match found. Check partial below:",similar
if len(result) == 0 and len(similar) == 0:
OUT = "No match found! Check names below:", names
The SpringNodes script outputs a Family Type, even though the collector is for Mullion Types (see above image)
Here's my script:
import clr
# Import RevitAPI
clr.AddReference("RevitAPI")
import Autodesk
from Autodesk.Revit.DB import *
# Import DocumentManager and TransactionManager
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
# Import ToDSType(bool) extension method
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.GeometryConversion)
from System import Array
clr.AddReference('ProtoGeometry')
from Autodesk.DesignScript.Geometry import *
import math
doc = DocumentManager.Instance.CurrentDBDocument
app = DocumentManager.Instance.CurrentUIApplication.Application
walls = UnwrapElement(IN[0])
toggle = IN[1]
inputLine = IN[2]
mullionType = IN[3]
wallSrf = []
heights = []
finalPoints = []
directions = []
isPrimary = []
projectedCrvs = []
keySegments = []
keySegmentsGeom = []
gridSegments = []
gridSegmentsGeom = []
gridLines = []
gridLinesGeom = []
keyGridLines = []
keyGridLinesGeom = []
projectedGridlines = []
lineDirections = []
gridLineDirection = []
allTrueFalse = []
if toggle == True:
TransactionManager.Instance.EnsureInTransaction(doc)
for w, g in zip(walls,inputLine):
pointCoords = []
primary = []
## Get curtain wall element sketch line
originLine = Revit.GeometryConversion.RevitToProtoCurve.ToProtoType( w.Location.Curve, True )
originLineLength = w.Location.Curve.ApproximateLength
## Get curtain wall element height, loft to create surface
for p in w.Parameters:
if p.Definition.Name == 'Unconnected Height':
height = p.AsDouble()
topLine = originLine.Translate(0,0,height)
srfCurves = [originLine,topLine]
wallSrf = NurbsSurface.ByLoft(srfCurves)
## Get centerpoint of curve, determine whether it extends across entire gridline
projectedCrvCenterpoint = []
for d in g:
lineDirection = d.Direction.Normalized()
lineDirections.append(lineDirection)
curveProject= d.PullOntoSurface(wallSrf)
if abs(lineDirection.Z) == 1:
if curveProject.Length >= height-.5:
primary.append(False)
else:
primary.append(True)
else:
if curveProject.Length >= originLineLength-.5:
primary.append(False)
else:
primary.append(True)
centerPoint = curveProject.PointAtParameter(0.5)
pointList = []
projectedCrvCenterpoint.append(centerPoint)
## Project centerpoint of curve onto wall surface
for h in [centerPoint]:
pointUnwrap = UnwrapElement(centerPoint)
pointList.append(pointUnwrap.X)
pointList.append(pointUnwrap.Y)
pointList.append(pointUnwrap.Z)
pointCoords.append(pointList)
finalPoints.append(pointCoords)
isPrimary.append(primary)
projectedCrvs.append(projectedCrvCenterpoint)
TransactionManager.Instance.TransactionTaskDone()
TransactionManager.Instance.EnsureInTransaction(doc)
##Gather all segments of gridline geometry
for wall in UnwrapElement(walls):
gridSegments2 = []
gridSegmentsGeom2 = []
gridLines1 = []
gridLinesGeom1 = []
for id1 in wall.CurtainGrid.GetVGridLineIds():
gridLinesGeom1.append(Revit.GeometryConversion.RevitToProtoCurve.ToProtoType(doc.GetElement(id1).FullCurve))
gridLines1.append(doc.GetElement(id1))
VgridSegments1 = []
VgridSegmentsGeom1 = []
for i in doc.GetElement(id1).AllSegmentCurves:
VgridSegments1.append(i)
VgridSegmentsGeom1.append(Revit.GeometryConversion.RevitToProtoCurve.ToProtoType(i,True))
gridSegments2.append(VgridSegments1)
gridSegmentsGeom2.append(VgridSegmentsGeom1)
for id2 in wall.CurtainGrid.GetUGridLineIds():
gridLinesGeom1.append(Revit.GeometryConversion.RevitToProtoCurve.ToProtoType(doc.GetElement(id2).FullCurve))
gridLines1.append(doc.GetElement(id2))
UgridSegments1 = []
UgridSegmentsGeom1 = []
for i in doc.GetElement(id2).AllSegmentCurves:
UgridSegments1.append(i)
UgridSegmentsGeom1.append(Revit.GeometryConversion.RevitToProtoCurve.ToProtoType(i,True))
gridSegments2.append(UgridSegments1)
gridSegmentsGeom2.append(UgridSegmentsGeom1)
gridSegments.append(gridSegments2)
gridSegmentsGeom.append(gridSegmentsGeom2)
gridLines.append(gridLines1)
gridLinesGeom.append(gridLinesGeom1)
boolFilter = [[[[b.DoesIntersect(x) for x in d] for d in z] for b in a] for a,z in zip(projectedCrvs, gridSegmentsGeom)]
boolFilter2 = [[[b.DoesIntersect(x) for x in z] for b in a] for a,z in zip(projectedCrvs, gridLinesGeom)]
##Select gridline segments that intersect with centerpoint of projected lines
for x,y in zip(boolFilter,gridSegments):
keySegments2 = []
keySegmentsGeom2 = []
for z in x:
keySegments1 = []
keySegmentsGeom1 = []
for g,l in zip(z,y):
for d,m in zip(g,l):
if d == True:
keySegments1.append(m)
keySegmentsGeom1.append(Revit.GeometryConversion.RevitToProtoCurve.ToProtoType(m,True))
keySegments2.append(keySegments1)
keySegmentsGeom2.append(keySegmentsGeom1)
keySegments.append(keySegments2)
keySegmentsGeom.append(keySegmentsGeom2)
##Order gridlines according to intersection with projected points
for x,y in zip(boolFilter2, gridLines):
keyGridLines1 = []
keyGridLinesGeom1 = []
for z in x:
for g,l in zip(z,y):
if g == True:
keyGridLines1.append(l)
keyGridLinesGeom1.append(Revit.GeometryConversion.RevitToProtoCurve.ToProtoType(l.FullCurve,True))
keyGridLines.append(keyGridLines1)
keyGridLinesGeom.append(keyGridLinesGeom1)
##Add mullions at intersected gridline segments
TransactionManager.Instance.TransactionTaskDone()
TransactionManager.Instance.EnsureInTransaction(doc)
for x,y,z in zip(keyGridLines,keySegments,isPrimary):
projectedGridlines1 = []
for h,j,k in zip(x,y,z):
for i in j:
if i != None:
h.AddMullions(i,mullionType,k)
projectedGridlines1.append(h)
projectedGridlines.append(projectedGridlines1)
else:
None
if toggle == True:
OUT = projectedGridlines
else:
None
TransactionManager.Instance.TransactionTaskDone()
Apologies for the messiness of the code, it's a modification of another node that I've been working on. Thanks for your help.
Bo,
Your problem is rooted in how Dynamo is wrapping elements to use with its own model. That last call .ToDSType(True) is the gist of the issue. MullionType class is a subclass (it inherits properties) from a ElementType class in Revit. When Dynamo team wraps that object into a custom wrapper they only wrote a top level wrapper that treats all ElementTypes the same, hence this outputs an ElementType/FamilyType rather than a specific MullionType.
First I would suggest that you replace the line of code in your code:
mullionType = IN[3]
with:
mullionType = UnwrapElement(IN[3])
This is their built in method for unwrapping elements to be used with calls to Revit API.
If that still somehow remains an issue, you could try and retrieve the MullionType object again, this time directly in your script, before you use it. You can do so like this:
for x,y,z in zip(keyGridLines,keySegments,isPrimary):
projectedGridlines1 = []
for h,j,k in zip(x,y,z):
for i in j:
if i != None:
h.AddMullions(i,doc.GetElement(mullionType.Id),k)
projectedGridlines1.append(h)
projectedGridlines.append(projectedGridlines1)
This should make sure that you get the MullionType element before it was wrapped.
Again, try unwrapping it first, then GetElement() call if first doesn't work.