Loop to append multiple lists into dataframe python - python-3.x

I have to requests.get() two urls from yahoo utilizing YQL that returns a json object. I'm getting back a json objects that I store into a list(). Then I'm looping to parse the data and creating a dic to then create a pandas data frame. Happened that only one list is getting appended to the data frame. Seems like in the last iteration, the 2nd list overwrites the first list. At this point, I can't figure out how to iterate on the list to append() both elements of the list. Here is my code...
import requests
import pandas as pd
urls = ['https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.historicaldata%20where%20symbol%20in%20(%22DIA%22%2C%22SPY%22%2C%22IWN%22)%20and%20startDate%20%3D%20%222015-01-01%22%20and%20endDate%20%3D%20%222015-10-31%22&format=json&diagnostics=true&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys&callback=',
'https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.historicaldata%20where%20symbol%20in%20(%22DIA%22%2C%22SPY%22%2C%22IWN%22)%20and%20startDate%20%3D%20%222015-11-01%22%20and%20endDate%20%3D%20%222016-08-31%22&format=json&diagnostics=true&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys&callback=']
for url in urls:
data = requests.get(url)
data_json = data.json()
quote_list = []
for quote in data_json['query']['results']['quote']:
quote_dic = {'symbol': quote['Symbol'],
'date': quote['Date'],
'volume': quote['Volume'],
'low': quote['Low'],
'high': quote['High'],
'open': quote['Open'],
'close': quote['Close'],
'adj_close': quote['Adj_Close']}
quote_list.append(quote_dic)
quote_df = pd.DataFrame(quote_list)
quote_df.to_csv('stocks.csv')
I need to be able to append the entire list() into the data frame. What would be the fix for this code?

Just create a list of dataframes, and concat them at the end of the loop:
df_list = []
for url in urls:
data = requests.get(url)
data_json = data.json()
df = pd.DataFrame(data_json['query']['results']['quote'])
df_list.append(df)
quote_df = pd.concat(df_list)
quote_df.to_csv('stocks.csv')

How about this solution?
import urllib
import re
import json
symbolslist = open("C:/Users/your_path_here/Desktop/stock_symbols.txt").read()
symbolslist = symbolslist.split("\n")
for symbol in symbolslist:
myfile = open("C:/Users/your_path_here/Desktop/" +symbol +".txt", "w+")
myfile.close()
htmltext = urllib.urlopen("http://www.bloomberg.com/markets/chart/data/1D/"+ symbol+ ":US")
data = json.load(htmltext)
datapoints = data["data_values"]
myfile = open("C:/Users/rshuell001/Desktop/symbols/" +symbol +".txt", "a")
for point in datapoints:
myfile.write(str(symbol+","+str(point[0])+","+str(point[1])+"\n"))
myfile.close()
In this file "C:/Users/your_path_here/Desktop/symbols/amex.txt"
you have the following tickers
ibm
sbux
msft

Related

Pandas - Add items to dataframe

I am trying to add row items to the dataframe, and I am not able to update the dataframe.
What i tried until now is commented out as it doesn't do what I need.
I simply want to download the json file and store it to a dataframe with those given columns. Seems I am not able to extract the child components fron JSON file and store them to a brand new dataframe.
Please find bellow my code:
import requests, json, urllib
import pandas as pd
url = "https://www.cisa.gov/sites/default/files/feeds/known_exploited_vulnerabilities.json"
data = pd.read_json(url)
headers = []
df = pd.DataFrame()
for key, item in data['vulnerabilities'].items():
for k in item.keys():
headers.append(k)
col = list(set(headers))
new_df = pd.DataFrame(columns=col)
for item in data['vulnerabilities'].items():
print(item[1])
# new_df['product'] = item[1]['product']
# new_df['vendorProject'] = item[1]['vendorProject']
# new_df['dueDate'] = item[1]['dueDate']
# new_df['shortDescription'] = item[1]['shortDescription']
# new_df['dateAdded'] = item[1]['dateAdded']
# new_df['vulnerabilityName'] = item[1]['vulnerabilityName']
# new_df['cveID'] = item[1]['cveID']
# new_df.append(item[1], ignore_index = True)
new_df
At the end my df is still blank.
The nested JSON data can be directly converted to a flattened dataframe using pd.json_normalize(). The headers are extracted from the JSON itself.
new_df = pd.DataFrame(pd.json_normalize(data['vulnerabilities']))
UPDATE: Unnested the vulnerabilities column specifically.
Output:
It worked with this:
import requests, json, urllib
import pandas as pd
url = "https://www.cisa.gov/sites/default/files/feeds/known_exploited_vulnerabilities.json"
data = pd.read_json(url)
headers = []
df = pd.DataFrame()
for key, item in data['vulnerabilities'].items():
for k in item.keys():
headers.append(k)
col = list(set(headers))
new_df = pd.DataFrame(columns=col)
for item in data['vulnerabilities'].items():
new_df.loc[len(new_df.index)] = item[1] <===THIS
new_df.head()

How to assign bs4 strings to pandas dataframe in for loop?

I have an HTML string that I am successfully able to use beautifulsoup4 on to extract the elements I need.
the HTML strings are in a list and I am wanting to extract only certain elements out of the strings and assign them to dataframe columns.
Current code:
import pandas as pd
from bs4 import BeautifulSoup
lst = [ <html>,<html>]
df = pd.DataFrame()
for i in lst:
soup = BeautifulSoup(i)
for link in soup.find_all('a'):
df['links'] = str(link.get('href'))
#print(link.get('href'))
#get all text messages
soup.find_all('p')
df['messages'] = str(soup.find_all('p'))
#get author name
soup.find_all(class_="author--name")
df['author'] = str(soup.find_all(class_="author--name"))
#get username
soup.find_all(class_= "author--username")
df['username'] = str(soup.find_all(class_= "author--username"))
All the soup lines of code are producing the data I need, but why is the dataframe not assigning the string values to the dataframe columns?
I can see that from an empty dataframe, the code creates the new columns but there are no values.
What am I doing wrong?
The solution was to wrap the assignments in brackets like so:
for i in lst:
df = pd.DataFrame()
soup = BeautifulSoup(i)
#print(soup)
for link in soup.find_all('a'):
df['links'] = [str(link.get('href'))]
#print(link.get('href'))
#get all text messages
soup.find_all('p')
df['messages'] = [str(soup.find_all('p'))]
#get author name
soup.find_all(class_="author--name")
df['author'] = [str(soup.find_all(class_="author--name"))]
#get username
soup.find_all(class_= "author--username")
df['username'] = [str(soup.find_all(class_= "author--username"))] text messages
soup.find_all('p')
df['messages'] = str(soup.find_all('p'))
#get author name
soup.find_all(class_="author--name")
df['author'] = str(soup.find_all(class_="author--name"))
#get username
soup.find_all(class_= "author--username")
df['username'] = str(soup.find_all(class_= "author--username"))

Python - Creating a for loop to build a single csv file with multiple dataframes

I am new to python and trying various things to learn the fundamentals. One of the things that i'm currently stuck on is for loops. I have the following code and am positive it can be built out more efficiently using a loop but i'm not sure exactly how.
import pandas as pd
import numpy as np
url1 = 'https://www.cbssports.com/nfl/stats/player/receiving/nfl/regular/qualifiers/?page=1'
url2 = 'https://www.cbssports.com/nfl/stats/player/receiving/nfl/regular/qualifiers/?page=2'
url3 = 'https://www.cbssports.com/nfl/stats/player/receiving/nfl/regular/qualifiers/?page=3'
df1 = pd.read_html(url1)
df1[0].to_csv ('NFL_Receiving_Page1.csv', index=False) #index false gets rid of index listing that appears as the very first column in the csv
df2 = pd.read_html(url2)
df2[0].to_csv ('NFL_Receiving_Page2.csv', index=False) #index false gets rid of index listing that appears as the very first column in the csv
df3 = pd.read_html(url3)
df3[0].to_csv ('NFL_Receiving_Page3.csv', index=False) #index false gets rid of index listing that appears as the very first column in the csv
df_receiving_agg = pd.concat([df1[0], df2[0], df3[0]])
df_receiving_agg.to_csv('NFL_Receiving_Combined.csv', index=False) #index false gets rid of index listing that appears as the very first column in the csv
I'm ultimately trying to combine the data in the above URL's into a single table in a csv file.
You can try this:
urls = [url1,url2,url3]
df_receiving_agg = pd.DataFrame()
for url in urls:
df = pd.read_html(url)
df_receiving_agg = pd.concat([df_receiving_agg, df])
df_receiving_agg.to_csv('filepath.csv',index=False)
You can do this:
base_url = 'https://www.cbssports.com/nfl/stats/player/receiving/nfl/regular/qualifiers/?page='
dfs = []
for page in range(1, 4):
url = f'{base_url}{page}'
df = pd.read_html(url)
df.to_csv(f'NFL_Receiving_Page{page}.csv', index=False)
dfs.append(df)
df_receiving_agg = pd.concat(dfs)
df_receiving_agg.to_csv('NFL_Receiving_Combined.csv', index=False)

appending Dict to nested list per request made

I am currently scraping through an XML API response. I am looking to gather a piece of information for each request and create a dictionary each time I find this piece of data. Each request can have several IDs. So one response can have 2 IDs while the next response might have 3 IDs. For example, let's say the first response has 2 IDs. I am storing this data in a list at the moment when the second request is done the additional 3 IDs are being stored under this same list as well.
import requests
import pandas as pd
from pandas import DataFrame
from bs4 import BeautifulSoup
import datetime as datetime
import json
import time
trackingDomain = ''
domain = ''
aIDs = []
cIDs = []
url = "https://" + domain + ""
print(url)
df = pd.read_csv('campids.csv')
for index, row in df.iterrows():
payload = {'api_key':'',
'campaign_id':'0',
'site_offer_id':row['IDs'],
'source_affiliate_id':'0',
'channel_id':'0',
'account_status_id':'0',
'media_type_id':'0',
'start_at_row':'0',
'row_limit':'0',
'sort_field':'campaign_id',
'sort_descending':'TRUE'
}
print('Campaign Payload', payload)
r = requests.get(url, params=payload)
print(r.status_code)
soup = BeautifulSoup(r.text, 'lxml')
success = soup.find('success').string
for affIDs in soup.select('campaign'):
affID = affIDs.find('source_affiliate_id').string
aIDs.append(affID)
dataDict = dict()
dataDict['offers'] = []
affDict = {'affliate_id':aIDs}
dataDict['offers'].append(dict(affDict))
The result ends up being as follows:
dictData = {'offers': [{'affliate_id': ['9','2','45','47','14','8','30','30','2','2','9','2']}]}
What I am looking to do is this:
dictData = {'offers':[{'affiliate_id'['9','2','45','47','14','8','30','30','2','2']},{'affiliate_id':['9','2']}]}
On the first request, I obtain the following:
IDs['9','2','45','47','14','8','30','30','2','2']
On the second request these IDs are returned:
['9','2']
I am new to Python so please bear with me as far etiquette goes and I am missing something. I'll be happy to provide any additional information.
It has to do with the order of your initializing and appending that is causing you to not get the outcome you are wanting. You are overwriting your dataDict after each iteration, and inserting the appended list which is not overwritten, thus leaving you with a final list that has appended ALL aIDs. What you want to to do is initialise that dataDict out side of your for loop, and then you can append the dictionary in the nested loop into that list:
Note: It's tough to work out/test without having the actual data, but I believe this should do it if I worked out the logic correctly in my head:
import requests
import pandas as pd
from pandas import DataFrame
from bs4 import BeautifulSoup
import datetime as datetime
import json
import time
trackingDomain = ''
domain = ''
cIDs = []
url = "https://" + domain + ""
# Initialize your dictionary
dataDict = dict()
# Initialize your list in your dictionary under key `offers`
dataDict['offers'] = []
print(url)
df = pd.read_csv('campids.csv')
for index, row in df.iterrows():
payload = {'api_key':'',
'campaign_id':'0',
'site_offer_id':row['IDs'],
'source_affiliate_id':'0',
'channel_id':'0',
'account_status_id':'0',
'media_type_id':'0',
'start_at_row':'0',
'row_limit':'0',
'sort_field':'campaign_id',
'sort_descending':'TRUE'
}
print('Campaign Payload', payload)
r = requests.get(url, params=payload)
print(r.status_code)
soup = BeautifulSoup(r.text, 'lxml')
success = soup.find('success').string
# Initialize your list for this iteration/row in your df.iterrows
aIDs = []
for affIDs in soup.select('campaign'):
affID = affIDs.find('source_affiliate_id').string
# Append those affIDs to the aIDs list
aIDs.append(affID)
# Create your dictionary of key:value with key 'affiliate_id' and value the aIDs list
affDict = {'affliate_id':aIDs}
# NOW append that into your list in your dictionary under key `offers`
dataDict['offers'].append(dict(affDict))

Import and parse .data file

there is a file I tried to import and safe as pandas df. At a first sight looks like it's already columns and rows ordered, but finally I had to do a bunch of stuff to create pandas df. Could you please check if there is much faster way to manage it?
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
My way of doing it is:
import requests
import pandas as pd
r = requests.get(url)
file = r.text
step_1 = file.split('\n')
for n in range(len(step_1)): # remove empty strings
if bool(step_1[n]) == False:
del(step_1[n])
step_2 = [i.split('\t') for i in step_1]
cars_names = [i[1] for i in step_2]
step_3 = [i[0].split(' ') for i in step_2]
for e in range(len(step_3)): # remove empty strings in each sublist
step_3[e] = [item for item in step_3[e] if item != '']
mpg = [i[0] for i in step_3]
cylinders = [i[1] for i in step_3]
disp = [i[2] for i in step_3]
horsepower = [i[3] for i in step_3]
weight = [i[4] for i in step_3]
acce = [i[5] for i in step_3]
year = [i[6] for i in step_3]
origin = [i[7] for i in step_3]
list_cols = [cars_names, mpg, cylinders, disp, horsepower, weight, acce, year, origin]
# list_labels written manually:
list_labels = ['car name', 'mpg', 'cylinders', 'displacement', 'horsepower', 'weight', 'acceleration', 'model year', 'origin']
zipped = list(zip(list_labels, list_cols))
data = dict(zipped)
df = pd.DataFrame(data)
When you replaced \t to blankspace, you can use read_csv to read it. But you need to wrap up your text, because the first parameter in read_csv is filepath_or_buffer which needs object with a read() method (such as a file handle or StringIO). Then your question can be transform to read_csv doesn't read the column names correctly on this file?
import requests
import pandas as pd
from io import StringIO
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
r = requests.get(url)
file = r.text.replace("\t"," ")
# list_labels written manually:
list_labels = ['mpg', 'cylinders', 'displacement', 'horsepower', 'weight', 'acceleration', 'model year', 'origin','car name']
df = pd.read_csv(StringIO(file),sep="\s+",header = None,names=list_labels)
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(df)

Resources