The goal of mine is to create a dictionary called 'sum_of_department' contains the department as the key and the total annual salary of all employees combined as a value. So far this is what I have but I'm a bit lost on how to add all the department names along with a sum of all of the employees salary in that dictionary. The current dictionary i tried displays only the amount of the salary and how many times its seen in the file. this is where i need the help.
import requests
# endpoint
endpoint = "https://data.cityofchicago.org/resource/xzkq-xp2w.json"
# optional parameters
parameters = {"$limit":20,}
# make request
response = requests.get(endpoint, params=parameters)
# Get the response data as a python object.
data = response.json()
count_by_department = {}
sum_by_department = {}
#loop through the data
for i in data:
if ('department' and 'salary_or_hourly' and 'annual_salary' in i):
department = i['department']
pay_type = i['salary_or_hourly']
anual_salary = i['annual_salary']
# print(i['annual_salary'])
else:
# handle case where there is no department property in that record
department = 'undefined'
pay_type = 'n/a'
anual_salary = 'n/a'
# print(department,"," ,pay_type)
# exclude the cases where the pay type is Hourly
if(pay_type != 'Salary' ):
pay_type = 0
# print(department,"," ,pay_type)
# update the sum_by_department and count_by_department dictionaries
if (department in count_by_department):
count_by_department[department] += 1
else:
count_by_department[department] = 1
if (anual_salary in sum_by_department):
sum_by_department[anual_salary] +=1
else:
sum_by_department[anual_salary] = 1
# print(count_by_department)
# print(sum_by_department)
You should add each person's annual_salary to the sum_by_department array while looping. Also, do not forget to convert your annual_salary variable to the float type, because adding them together as strings won't work.
Example script:
import requests
# endpoint
endpoint = "https://data.cityofchicago.org/resource/xzkq-xp2w.json"
# optional parameters
parameters = {"$limit":20,}
# make request
response = requests.get(endpoint, params=parameters)
# Get the response data as a python object.
data = response.json()
count_by_department = {}
sum_by_department = {}
#loop through the data
for i in data:
if ('department' and 'salary_or_hourly' and 'annual_salary' in i):
department = i['department']
pay_type = i['salary_or_hourly']
annual_salary = float(i['annual_salary'])
# print(i['annual_salary'])
else:
# handle case where there is no department property in that record
department = 'undefined'
pay_type = 'n/a'
annual_salary = 0
# print(department,"," ,pay_type)
# exclude the cases where the pay type is Hourly
if(pay_type != 'Salary' ):
pay_type = 0
# print(department,"," ,pay_type)
# update the sum_by_department and count_by_department dictionaries
if (department in count_by_department):
count_by_department[department] += 1
sum_by_department[department] += annual_salary
else:
count_by_department[department] = 1
sum_by_department[department] = annual_salary
#import pdb; pdb.set_trace();
print('count_by_department = ', count_by_department)
print('sum_by_department = ', sum_by_department)
Tip:
Uncomment the pdb line to debug interactively. The Python Debugger (pdb for short) halts the program while it's still running (i.e. in memory), so you can interact with it and inspect all variables.
Related
So, I'm trying to generate some fake random data of a given dimension size. Essentially, I want a dataframe in which the data has a uniform random distribution. The data consist of both continuous and categorical values. I've written the following code, but it doesn't work the way I want it to be.
import random
import pandas as pd
import time
from datetime import datetime
# declare global variables
adv_name = ['soft toys', 'kitchenware', 'electronics',
'mobile phones', 'laptops']
adv_loc = ['location_1', 'location_2', 'location_3',
'location_4', 'location_5']
adv_prod = ['baby product', 'kitchenware', 'electronics',
'mobile phones', 'laptops']
adv_size = [1, 2, 3, 4, 10]
adv_layout = ['static', 'dynamic'] # advertisment layout type on website
# adv_date, start_time, end_time = []
num = 10 # the given dimension
# define function to generate random advert locations
def rand_shuf_loc(str_lst, num):
lst = adv_loc
# using list comprehension
rand_shuf_str = [item for item in lst for i in range(num)]
return(rand_shuf_str)
# define function to generate random advert names
def rand_shuf_prod(loc_list, num):
rand_shuf_str = [item for item in loc_list for i in range(num)]
random.shuffle(rand_shuf_str)
return(rand_shuf_str)
# define function to generate random impression and click data
def rand_clic_impr(num):
rand_impr_lst = []
click_lst = []
for i in range(num):
rand_impr_lst.append(random.randint(0, 100))
click_lst.append(random.randint(0, 100))
return {'rand_impr_lst': rand_impr_lst, 'rand_click_lst': click_lst}
# define function to generate random product price and discount
def rand_prod_price_discount(num):
prod_price_lst = [] # advertised product price
prod_discnt_lst = [] # advertised product discount
for i in range(num):
prod_price_lst.append(random.randint(10, 100))
prod_discnt_lst.append(random.randint(10, 100))
return {'prod_price_lst': prod_price_lst, 'prod_discnt_lst': prod_discnt_lst}
def rand_prod_click_timestamp(stime, etime, num):
prod_clik_tmstmp = []
frmt = '%d-%m-%Y %H:%M:%S'
for i in range(num):
rtime = int(random.random()*86400)
hours = int(rtime/3600)
minutes = int((rtime - hours*3600)/60)
seconds = rtime - hours*3600 - minutes*60
time_string = '%02d:%02d:%02d' % (hours, minutes, seconds)
prod_clik_tmstmp.append(time_string)
time_stmp = [item for item in prod_clik_tmstmp for i in range(num)]
return {'prod_clik_tmstmp_lst':time_stmp}
def main():
print('generating data...')
# print('generating random geographic coordinates...')
# get the impressions and click data
impression = rand_clic_impr(num)
clicks = rand_clic_impr(num)
product_price = rand_prod_price_discount(num)
product_discount = rand_prod_price_discount(num)
prod_clik_tmstmp = rand_prod_click_timestamp("20-01-2018 13:30:00",
"23-01-2018 04:50:34",num)
lst_dict = {"ad_loc": rand_shuf_loc(adv_loc, num),
"prod": rand_shuf_prod(adv_prod, num),
"imprsn": impression['rand_impr_lst'],
"cliks": clicks['rand_click_lst'],
"prod_price": product_price['prod_price_lst'],
"prod_discnt": product_discount['prod_discnt_lst'],
"prod_clik_stmp": prod_clik_tmstmp['prod_clik_tmstmp_lst']}
fake_data = pd.DataFrame.from_dict(lst_dict, orient="index")
res = fake_data.apply(lambda x: x.fillna(0)
if x.dtype.kind in 'biufc'
# where 'biufc' means boolean, integer,
# unicode, float & complex data types
else x.fillna(random.randint(0, 100)
)
)
print(res.transpose())
res.to_csv("fake_data.csv", sep=",")
# invoke the main function
if __name__ == "__main__":
main()
Problem 1
when I execute the above code snippet, it prints fine but when written to csv format, its horizontally positioned; i.e., it looks like this... How do I position it vertically when writing to csv file? What I want is 7 columns (see lst_dict variable above) with n number of rows?
Problem 2
I dont understand why the random date is generated for the first 50 columns and remaining columns are filled with numerical values?
To answer your first question, replace
print(res.transpose())
with
res.transpose() print(res)
To answer your second question look at the length of the output of the method
rand_shuf_loc()
it as well as the other helper functions only produce a list of 50 items.
The creation of res using the method
fake_data.apply
replaces all nan with a random numeric, so it also applies a numeric to the columns without any predefined values.
I am feeding a long list of inputs in a function that calls an API to retrieve data. My list is around 40.000 unique inputs. Currently, the function returns output every 1-2 seconds or so. Quick maths tells me that it would take over 10+ hrs before my function will be done. I therefore want to speed this process up, but have struggles finding a solution. I am quite a beginner, so threading/pooling is quite difficult for me. I hope someone is able to help me out here.
The function:
import quandl
import datetime
import numpy as np
quandl.ApiConfig.api_key = 'API key here'
def get_data(issue_date, stock_ticker):
# Prepare var
stock_ticker = "EOD/" + stock_ticker
# Volatility
date_1 = datetime.datetime.strptime(issue_date, "%d/%m/%Y")
pricing_date = date_1 + datetime.timedelta(days=-40) # -40 days of issue date
volatility_date = date_1 + datetime.timedelta(days=-240) # -240 days of issue date (-40,-240 range)
# Check if code exists : if not -> return empty array
try:
stock = quandl.get(stock_ticker, start_date=volatility_date, end_date=pricing_date) # get pricing data
except quandl.errors.quandl_error.NotFoundError:
return []
daily_close = stock['Adj_Close'].pct_change() # returns using adj.close
stock_vola = np.std(daily_close) * np.sqrt(252) # annualized volatility
# Average price
stock_pricing_date = date_1 + datetime.timedelta(days=-2) # -2 days of issue date
stock_pricing_date2 = date_1 + datetime.timedelta(days=-12) # -12 days of issue date
stock_price = quandl.get(stock_ticker, start_date=stock_pricing_date2, end_date=stock_pricing_date)
stock_price_average = np.mean(stock_price['Adj_Close']) # get average price
# Amihuds Liquidity measure
liquidity_pricing_date = date_1 + datetime.timedelta(days=-20)
liquidity_pricing_date2 = date_1 + datetime.timedelta(days=-120)
stock_data = quandl.get(stock_ticker, start_date=liquidity_pricing_date2, end_date=liquidity_pricing_date)
p = np.array(stock_data['Adj_Close'])
returns = np.array(stock_data['Adj_Close'].pct_change())
dollar_volume = np.array(stock_data['Adj_Volume'] * p)
illiq = (np.divide(returns, dollar_volume))
print(np.nanmean(illiq))
illiquidity_measure = np.nanmean(illiq, dtype=float) * (10 ** 6) # multiply by 10^6 for expositional purposes
return [stock_vola, stock_price_average, illiquidity_measure]
I then use a seperate script to select my csv file with the list with rows, each row containing the issue_date, stock_ticker
import function
import csv
import tkinter as tk
from tkinter import filedialog
# Open File Dialog
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename()
# Load Spreadsheet data
f = open(file_path)
csv_f = csv.reader(f)
next(csv_f)
result_data = []
# Iterate
for row in csv_f:
try:
return_data = function.get_data(row[1], row[0])
if len(return_data) != 0:
# print(return_data)
result_data_loc = [row[1], row[0]]
result_data_loc.extend(return_data)
result_data.append(result_data_loc)
except AttributeError:
print(row[0])
print('\n\n')
print(row[1])
continue
if result_data is not None:
with open('resuls.csv', mode='w', newline='') as result_file:
csv_writer = csv.writer(result_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for result in result_data:
# print(result)
csv_writer.writerow(result)
else:
print("No results found!")
It is quite messy, but like I mentioned before, I am definitely a beginner. Speeding this up would greatly help me.
I want to scrape information from supermarket products but taking into account that some of the info (the origin of the product) isn't always available.
I am trying to iterate over a dataframe of links of a supermarket. From each of them, I want to get some information. However, the origin of the products isn't always available. I don't know how to make Python look for 'origin' only when it is available. I've tried the following code:
import urllib.request
from bs4 import BeautifulSoup
import csv
import os
dir = ''
file = 'data.xlsx'
sheetname="Hoja1"
# create and write headers to a list
rows = []
rows.append(['Brand', 'Product', 'Product_Number', 'Gross_Weight', 'Origin'])
# Change working directory:
os.chdir(dir)
# Retrieve current working directory ('cwd'):
cwd = os.getcwd()
cwd
# Load spreadsheet:
xl = pd.ExcelFile(file)
# Load a sheet into a DataFrame by name: df1
df = xl.parse(sheetname)
for index, row in df.iterrows():
# specify the url
urlpage = row['link']
#print(urlpage)
# query the website and return the html to the variable 'page'
page = urllib.request.urlopen(urlpage)
# parse the html using beautiful soup and store in variable 'soup'
soup = BeautifulSoup(page, 'html.parser')
# find results within table
results = soup.find_all('dl', attrs={'class': 'des_info clearfix'})
#print('Number of results', len(results))
for result in results:
# find all columns per result
data = result.find_all('dd')
# check that columns have data
if len(data) == 0:
continue
# write columns to variables
brand = data[0].getText()
product = data[1].getText()
number = data[2].getText()
weight = data[3].getText()
if data[4].getText() == None:
origin = 0
else:
origin = data[4].getText()
# write each result to rows
rows.append([brand, product, number, weight, origin])
I get the following error:
if data[4].getText() == None:
IndexError: list index out of range
I would like to get all the data ordered in a list and, if the origin isn't available for one item, a zero.
You can use a try statement:
# write columns to variables
brand = data[0].getText()
product = data[1].getText()
number = data[2].getText()
weight = data[3].getText()
try:
origin = data[4].getText()
except:
origin = 0
You could also use len of data
if len(data) >= 4:
#do something
else:
#do something else
I'm writing string entries to a tab-delimited file in Python 3. The code that I use to save the content is:
savedir = easygui.diropenbox()
savefile = input("Please type the filename (including extension): ")
file = open(os.path.join(savedir, savefile), "w", encoding="utf-8")
file.write("Number of entities not found: " + str(missing_count) + "\n")
sep = "\t"
for entry in entities:
file.write(entry[0]+"\t")
for item in entry:
file.write(sep.join(item[0]))
file.write("\t")
file.write("\n")
file.close()
The file saves properly. There are no errors or warnings sent to the terminal. When I open the file, I find an extra column has been saved to the file.
Query | Extra | Name
Abu-Jamal, Mumia | A | Mumia Abu-Jamal
Anderson, Walter | A | Walter Inglis Anderson
Anderson, Walter | A | Walter Inglis Anderson
I've added vertical bars between the tabs for clarity; they don't normally appear there. As well, I have removed a few columns at the end. The column between the vertical bars is not supposed to be there. The document that is saved to file is longer than three lines. On each line, the extra column is the first letter of the Query column. Hence, we have A's in these three examples.
entry[0] corresponds exactly to the value in the Query column.
sep.join(item[0]) corresponds exactly to columns 3+.
Any idea why I would be getting this extra column?
Edit: I'm adding the full code for this short script.
# =============================================================================
# Code to query DBpedia for named entities.
#
# =============================================================================
import requests
import xml.etree.ElementTree as et
import csv
import os
import easygui
import re
# =============================================================================
# Default return type is XML. Others: json.
# Classes are: Resource (general), Place, Person, Work, Species, Organization
# but don't include resource as one of the
# =============================================================================
def urlBuilder(query, queryClass="unknown", returns=10):
prefix = 'http://lookup.dbpedia.org/api/search/KeywordSearch?'
#Selects the appropriate QueryClass for the url
if queryClass == 'place':
qClass = 'QueryClass=place'
elif queryClass == 'person':
qClass = 'QueryClass=person'
elif queryClass == 'org':
qClass = 'QueryClass=organization'
else:
qClass = 'QueryClass='
#Sets the QueryString
qString = "QueryString=" + str(query)
#sets the number of returns
qHits = "MaxHits=" + str(returns)
#full url
dbpURL = prefix + qClass + "&" + qString + "&" + qHits
return dbpURL
#takes a xml doc as STRING and returns an array with the name and the URI
def getdbpRecord(xmlpath):
root = et.fromstring(xmlpath)
dbpRecord = []
for child in root:
temp = []
temp.append(child[0].text)
temp.append(child[1].text)
if child[2].text is None:
temp.append("Empty")
else:
temp.append(findDates(child[2].text))
dbpRecord.append(temp)
return dbpRecord
#looks for a date with pattern: 1900-01-01 OR 01 January 1900 OR 1 January 1900
def findDates(x):
pattern = re.compile('\d{4}-\d{2}-\d{2}|\d{2}\s\w{3,9}\s\d{4}|\d{1}\s\w{3,9}\s\d{4}')
returns = pattern.findall(x)
if len(returns) > 0:
return ";".join(returns)
else:
return "None"
#%%
# =============================================================================
# Build and send get requests
# =============================================================================
print("Please select the CSV file that contains your data.")
csvfilename = easygui.fileopenbox("Please select the CSV file that contains your data.")
lookups = []
name_list = csv.reader(open(csvfilename, newline=''), delimiter=",")
for name in name_list:
lookups.append(name)
#request to get the max number of returns from the user.
temp = input("Specify the maximum number of returns desired: ")
if temp.isdigit():
maxHits = temp
else:
maxHits = 10
queries = []
print("Building queries. Please wait.")
for search in lookups:
if len(search) == 2:
queries.append([search[0], urlBuilder(query=search[0], queryClass=search[1], returns=maxHits)])
else:
queries.append([search, urlBuilder(query=search, returns=maxHits)])
responses = []
print("Gathering responses. Please wait.")
for item in queries:
response = requests.get(item[1])
data = response.content.decode("utf-8")
responses.append([item[0], data])
entities = []
missing_count = 0
for item in responses:
temp = []
if len(list(et.fromstring(item[1]))) > 0:
entities.append([item[0], getdbpRecord(item[1])])
else:
missing_count += 1
print("There are " + str(missing_count) + " entities that were not found.")
print("Please select the destination folder for the results of the VIAF lookup.")
savedir = easygui.diropenbox("Please select the destination folder for the results of the VIAF lookup.")
savefile = input("Please type the filename (including extension): ")
file = open(os.path.join(savedir, savefile), "w", encoding="utf-8")
file.write("Number of entities not found: " + str(missing_count) + "\n")
sep = "\t"
for entry in entities:
file.write(entry[0]+"\t")
for item in entry:
file.write(sep.join(item[0]))
file.write("\t")
file.write("\n")
file.close()
I'm new to Python and programming in general and need a little help with this (partially finished) function. It's calling a text file with a bunch of rows of comma delimited data (age, salary, education and so on). However, I've run into a problem from the outset. I don't know how to return the results.
My aim is to create dictionaries for each category and for each row to be sorted and tallied.
e.g. 100 people over 50, 200 people under 50 and so on.
Am I in the correct ball park?
file = "adultdata.txt"
def make_data(file):
try:
f = open(file, "r")
except IOError as e:
print(e)
return none
large_list = []
avg_age = 0
row_count_under50 = 0
row_count_over50 = 0
#create 2 dictionaries per category
employ_dict_under50 = {}
employ_dict_over50 = {}
for row in f:
edited_row = row.strip()
my_list = edited_row.split(",")
try:
#Age Category
my_list[0] = int(my_list[0])
#Work Category
if my_list[-1] == " <=50K":
if my_list[1] in employ_dict_under50:
employ_dict_under50[my_list[1]] += 1
else:
employ_dict_under50[my_list[1]] = 1
row_count_u50 += 1
else:
if my_list[1] in emp_dict_o50:
employ_dict_over50[my_list[1]] += 1
else:
employ_dict_over50[my_list[1]] = 1
row_count_o50 += 1
# Other categories here
print(my_list)
#print(large_list)
#return
# Ignored categories here - e.g. my_list[insert my list numbers here] = None
I do not have access to your file but I had a go at correcting most of the errors you had in your code.
These are a list of the mistakes I found in your code:
your function make_data is essentially useless and is out of scope. You need to remove it entirely
When using a file object f, you need to use readline to extract data from the file.
It is also best to use a with statement when using IO resources like files
You had numerous variables which were badly named in the inner loop and did not exist
You declared a try in the inner loop without a catch. You can remove the try because you are not trying to catch any Error
You have some very basic errors which are related to general programming, can I assume your new to this? If thats the case then you should probably follow some more beginner tutorials online until you get a grasp of what commands you need to use to perform basic tasks.
Try compare your code to this and see if you can understand what i'm trying to say:
file = "adultdata.txt"
large_list = []
avg_age = 0
row_count_under50 = 0
row_count_over50 = 0
#create 2 dictionaries per category
employ_dict_under50 = {}
employ_dict_over50 = {}
with open(file, "r") as f:
row = f.readline()
edited_row = row.strip()
my_list = edited_row.split(",")
#Age Category
my_list[0] = int(my_list[0])
#Work Category
if my_list[-1] == " <=50K":
if my_list[1] in employ_dict_under50:
employ_dict_under50[my_list[1]] += 1
else:
employ_dict_under50[my_list[1]] = 1
row_count_under50 += 1
else:
if my_list[1] in employ_dict_over50:
employ_dict_over50[my_list[1]] += 1
else:
employ_dict_over50[my_list[1]] = 1
row_count_over50 += 1
# Other categories here
print(my_list)
#print(large_list)
#return
I cannot say for certain if this code will work or not without your file but it should give you a head start.