ValueError: year is out of range using matplotlib.pyplot - python-3.x

After I call candlestick_ohlc, I can't seem to convert the x axis dates to something matplotlib can understand.
I'm a noob Python programmer. I've tried turning the dataframe into a list, I've tried passing dates to candlestick_ohlc, nothing seems to work other than changing
df['time'] = (df['time'].astype('float'))
into
df['time'] = (df['time'].astype('float')\1000)
Although that renders the wrong datetime.
import requests
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdate
import matplotlib.style as style
import matplotlib.ticker as mticker
from matplotlib.dates import date2num
from mpl_finance import candlestick_ohlc
import datetime as dt
import numpy as np
import matplotlib.ticker as mticker
def get_data(date):
""" Query the API for 2000 days historical price data starting from "date". """
url = "https://min-api.cryptocompare.com/data/histoday?fsym=BTC&tsym=USD&limit=2000&toTs={}".format(date)
r = requests.get(url)
ipdata = r.json()
return ipdata
def get_df(from_date, to_date):
""" Get historical price data between two dates. """
date = to_date
holder = []
# While the earliest date returned is later than the earliest date requested, keep on querying the API
# and adding the results to a list.
while date > from_date:
data = get_data(date)
holder.append(pd.DataFrame(data['Data']))
date = data['TimeFrom']
# Join together all of the API queries in the list.
df = pd.concat(holder, axis = 0)
# Remove data points from before from_date
df = df[df['time']>from_date]
# Convert to timestamp to readable date format
# df['time'] = pd.to_datetime(df['time'], unit='s')
# Make the DataFrame index the time
df.set_index('time', inplace=True)
# And sort it so its in time order
df.sort_index(ascending=False, inplace=True)
return df
df = get_df(1528502400, 1560112385)
style.use('dark_background')
fig = plt.figure()
ax1 = plt.subplot2grid((1,1), (0,0))
df = df.reset_index()
cols = ['time', 'open', 'high', 'low', 'close', 'volumefrom', 'volumeto']
df = df[cols]
#IF YOU /1000 AFER ('float') IT WILL RUN BUT NOT CORRECT DATE
df['time'] = (df['time'].astype('float'))
print(df.dtypes)
ohlc = df.values.tolist()
candlestick_ohlc(ax1, ohlc, width=.4, colorup='g', colordown='r')
# IF YOU COMMENT NEXT 4 LINES IT WILL RUN, but NO DATES for XAXIS
date_fmt = "%d-%m-%Y"
date_formatter = mdate.DateFormatter(date_fmt)
ax1.xaxis.set_major_formatter(date_formatter)
fig.autofmt_xdate()
ax1.set_ylabel('BTC Price (USD)')
ax1.set_xlabel('Date')
plt.show()
Expected result would be date labels plotted as d-m-y. :)
Wish this had dates for xaxis labels not seconds since 1970
This is what I want it to look like, but with accurate dates

This is how to fix the code:
df['time'] = df['time'].apply(mdates.epoch2num)
It was definitely one of those lines of code that you spend hours on... now I know.

Related

pandas datareader. Save all data to one dataframe

I am new to Python and I have trouble getting data into one dataframe.
I have the following code.
from pandas_datareader import data as pdr
from datetime import date
from datetime import timedelta
import yfinance as yf
yf.pdr_override()
import pandas as pd
# tickers list
ticker_list = ['0P0001A532.CO','0P00018Q4V.CO','0P00017UBI.CO','0P00000YYT.CO','PFIBAA.CO','PFIBAB.CO','PFIBAC.CO','PFIDKA.CO','PFIGLA.CO','PFIMLO.CO','PFIKRB.CO','0P00019SMI.F','WEKAFKI.CO','0P0001CICW.CO','WEISTA.CO','WEISTS.CO','WEISA.CO','WEITISOP.CO']
today = date.today()
# We can get data by our choice by days bracket
if date.today().weekday()==0:
start_date = (today + timedelta((4 + today.weekday()) % 7)) - timedelta(days=7) # Friday. If it is monday we do not have a price since it is based on the previous day close.
else:
start_date = today - timedelta(days=1)
files=[]
allData = []
dafr_All = []
def getData(ticker):
print(ticker)
data = pdr.get_data_yahoo(ticker, start= start_date, end=(today + timedelta(days=2)))['Adj Close']
dataname = ticker+'_'+str(today)
files.append(dataname)
allData.append(data)
SaveData(data, dataname)
# Create a data folder in your current dir.
def SaveData(df, filename):
df.to_csv('./data/'+filename+'.csv')
#This loop will iterate over ticker list, will pass one ticker to get data, and save that data as file.
for tik in ticker_list:
getData(tik)
for i in range(0,11):
df1= pd.read_csv('./data/'+ str(files[i])+'.csv')
print (df1.head())
I get several csv files containing the adjusted close values (if there exists an adjusted close).
I want to save all the data to a dataframe where the first column consist of tickers, while the second column consist of adjusted close values. The dataframe then needs to be exported into a csv-file.

Python Pandas Writing Value to a Specific Row & Column in the data frame

I have a Pandas df of Stock Tickers with specific dates, I want to add the adjusted close for that date using yahoo finance. I iterate through the dataframe, do the yahoo call for that Ticker and Date, and the correct information is returned. However, I am not able to add that information back to the original df. I have tried various loc, iloc, and join methods, and none of them are working for me. The df shows the initialized zero values instead of the new value.
import pandas as pd
import yfinance as yf
from datetime import timedelta
# Build the dataframe
df = pd.DataFrame({'Ticker':['BGFV','META','WIRE','UG'],
'Date':['5/18/2021','5/18/2021','4/12/2022','6/3/2019'],
})
# Change the Date to Datetime
df['Date'] = pd.to_datetime(df.Date)
# initialize the adjusted close
df['Adj_Close'] = 0.00 # You'll get a column of all 0s
# iterate through the rows of the df and retrieve the Adjusted Close from Yahoo
for i in range(len(df)):
ticker = df.iloc[i]['Ticker']
start = df.iloc[i]['Date']
end = start + timedelta(days=1)
# YF call
data = yf.download(ticker, start=start, end=end)
# Get just the adjusted close
adj_close = data['Adj Close']
# Write the acjusted close to the dataframe on the correct row
df.iloc[i]['Adj_Close'] = adj_close
print(f'i value is {i} and adjusted close value is {adj_close} \n')
print(df)
The simplest way to do is to use loc as below-
# change this line
df.loc[i,'Adj_Close'] = adj_close.values[0]
You can use:
def get_adj_close(x):
# You needn't specify end param because period is already set to 1 day
df = df = yf.download(x['Ticker'], start=x['Date'], progress=False)
return df['Adj Close'][0].squeeze()
df['Adj_Close'] = df.apply(get_adj_close, axis=1)
Output:
>>> df
Ticker Date Adj_Close
0 BGFV 2021-05-18 27.808811
1 META 2021-05-18 315.459991
2 WIRE 2022-04-12 104.320045
3 UG 2019-06-03 16.746983

Locate an id in Dataframe using constraint on columns percentile

I am trying to do a Weighted Aged Historical Var based on the below Dataframe. I would like to identify the ID in my dataframe corresponding to the 5% quantile of the 'Weight_Age_Cumul' column (like in the below example i found on internet)
enter image description here
I ve tryied the following line of code but i get the following error message : 'DataFrame' object has no attribute 'idmax'
cac_df_sorted[cac_df_sorted.Weight_Age_Cumul]<=0.05].CAC_Log_returns.idmax()
enter image description here
If you can help me on that it you be great, thank you
full code below if needed :
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from tabulate import tabulate
from scipy.stats import norm
import yfinance as yf
from yahoofinancials import YahooFinancials
import sys
cac_df = yf.download('^FCHI',
start='2020-04-01',
end='2022-05-31',
progress=False,
)
cac_df.head()
cac_df = cac_df.drop(columns=['Open','High','Low','Close','Volume'])
#convertion into retuns
cac_df['Adj Close_-1'] = cac_df['Adj Close'].shift(1)
cac_df['CAC_Log_returns'] = np.log(cac_df['Adj Close']/cac_df['Adj Close_-1'])
cac_df.index = pd.to_datetime(cac_df.index, format = '%Y-%m-%d').strftime('%Y-%m-%d')
#plot CAC returns graph & histogram
cac_df['CAC_Log_returns'].plot(kind='line',figsize=(15,7))
plt.show()
cac_df['CAC_Log_returns'].hist(bins=40,normed=True,histtype='stepfilled',alpha=0.5)
plt.xlabel('Returns')
plt.ylabel('Frequency')
plt.grid(True)
plt.show()
#Historical Var Constant weight & Age Weighted & Vol Weighted
cac_df_sorted = cac_df.copy()
cac_df_sorted.sort_values(by=['Date'],inplace=True,ascending = False)
#Weight for Var Age weighted
lamb = 0.98
n = len(cac_df_sorted['CAC_Log_returns'])
weight_age= []
weight_age = [(lamb**(i-1) * (1-lamb))/(1-lamb**n)for i in range(1, n+1)]
#design of the dataframe
cac_df_sorted['Weight_Age'] = weight_age
cac_df_sorted.sort_values(by=['CAC_Log_returns'],inplace=True,ascending = True)
cac_df_sorted['Weight_Age_Cumul'] = np.cumsum(weight_age)
#Historical Var Constant weight
Var_95_1d_CW = -cac_df_sorted['CAC_Log_returns'].quantile(0.05)
Var_99_1d_CW = -cac_df_sorted['CAC_Log_returns'].quantile(0.01)
#from Var1d to Var10d
mean = np.mean(cac_df['CAC_Log_returns'])
Var_95_10d_CW =(np.sqrt(10)*Var_95_1d_CW)+(mean *(np.sqrt(10)-10))
Var_99_10d_CW = (np.sqrt(10)*Var_99_1d_CW) +(mean *(np.sqrt(10)-10))
print(tabulate([['95%',Var_95_1d_CW,Var_95_10d_CW],['99%',Var_99_1d_CW,Var_99_10d_CW]], headers= ['Confidence Level', 'Value at Risk 1 day Constant Weight','Value at Risk 10 days Constant Weight']))
print(cac_df_sorted)
# Historical Var Age weighted
#Find where cumulative (percentile) hits 0.05 and 0.01
cac_df_sorted[cac_df_sorted['Weight_Age_Cumul']<=0.05].CAC_Log_returns.idmax()

Automatically Extracting the Datetime Format from a Pandas Series [duplicate]

I am trying to format the column 'Data' to make a pattern with dates.
The formats I have are:
1/30/20 16:00
1/31/2020 23:59
2020-02-02T23:43:02
Here is the code for the dataframe.
import requests
import pandas as pd
import numpy as np
url = "https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_daily_reports"
csv_only = [i.split("=")[1][1:-1] for i in requests.get(url).text.split(" ") if '.csv' in i and 'title' in i]
combo = [pd.read_csv(url.replace("github","raw.githubusercontent").replace("/tree/","/")+"/"+f) for f in csv_only]
one_df = pd.concat(combo,ignore_index=True)
one_df["País"] = one_df["Country/Region"].fillna(one_df["Country_Region"])
one_df["Data"] = one_df["Last Update"].fillna(one_df["Last_Update"])
I tried adding the code bellow but it doesn't bring the result I wanted
pd.to_datetime(one_df['Data'])
one_df.style.format({"Data": lambda t: t.strftime("%m/%d/%Y")})
Any help?
UPDATE
This is the complete code, but it doesn't work. Many exceptions printed with different date formats.
import requests
import pandas as pd
import numpy as np
from datetime import datetime
url = "https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_daily_reports"
csv_only = [i.split("=")[1][1:-1] for i in requests.get(url).text.split(" ") if '.csv' in i and 'title' in i]
combo = [pd.read_csv(url.replace("github","raw.githubusercontent").replace("/tree/","/")+"/"+f) for f in csv_only]
one_df = pd.concat(combo,ignore_index=True)
df = pd.DataFrame()
DATE_FORMATS = ["%m/%d/%y %H:%M", "%m/%d/%Y %H:%M", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M:%S"]
df["Região"] = one_df["Province/State"].fillna(one_df["Admin2"])
df["País"] = one_df["Country/Region"].fillna(one_df["Country_Region"])
df["Data"] = one_df["Last Update"].fillna(one_df["Last_Update"])
df["Confirmados"] = one_df["Confirmed"]
df["Mortes"] = one_df["Deaths"]
df["Recuperados"] = one_df["Recovered"]
def parse(x_):
for fmt in DATE_FORMATS :
try:
tmp = datetime.strptime(x_, fmt).strftime("%m/%d/%Y")
return tmp
except ValueError:
print(x_)
pd.to_datetime(df['Data'])
df['Data'] = df['Data'].apply(lambda x: parse(x))
#df['Data'].strftime('%m/%d/%Y')
#df['Data'] = df['Data'].map(lambda x: x.strftime('%m/%d/%Y') if x else '')
df.to_excel(r'C:\Users\guilh\Downloads\Covid2\Covid-19.xlsx', index=False, encoding="utf8")
print(df)
from datetime import datetime
import pandas as pd
You could save all possible formats in a list as -
DATE_FORMATS = ["%Y-%m-%d %H:%M:%S", "%Y-%m-%dT%H:%M:%S", "%m/%d/%y %H:%M", "%m/%d/%Y %H:%M"]
Define a function that loops through the formats and tries to parse it.
(Fixed a bug, where the print statement should have been outside the for loop)
issues = set()
def parse(x_):
for fmt in DATE_FORMATS:
try:
return datetime.strptime(x_, fmt).strftime("%m/%d/%Y")
except ValueError:
pass
issues.add(x_)
sample = ["1/30/20 16:00", "1/31/2020 23:59", "2020-02-02T23:43:02"]
df = pd.DataFrame({'data': sample})
df['data'] = df['data'].apply(lambda x: parse(x))
assert df['Data'].isna().sum() == len(issues) == 0, "Issues observed, nulls observed in dataframe"
print("Done")
Output
data
0 01/30/2020
1 01/31/2020
2 02/02/2020
If df.apply() comes across a particular date format that hasn't been defined in the list, it would simply print None since nothing would be returned by the function parse()
also here, letting pd.to_datetime infer the format does the trick:
import pandas as pd
s = pd.to_datetime(["1/30/20 16:00", "1/31/2020 23:59", "2020-02-02T23:43:02"])
print(s)
# DatetimeIndex(['2020-01-30 16:00:00', '2020-01-31 23:59:00',
# '2020-02-02 23:43:02'],
# dtype='datetime64[ns]', freq=None)
Note that if your date/time format generally provides the day first (e.g. 30.1.2021 for Jan 30th 2021), set keyword dayfirst=True.

Second title of a plot/photo is the value of a column in a CSV but only the last value is used in all photos

I have a script that makes a photo that shows a basemap and where an earthquake happened. So 1 earthquake, 1 photo. The second title of each plot should be the date of the earthquake. However, only the last value, which is "2020-04-10", is used in all photos.
from shapely.geometry import Point
from geopandas import GeoDataFrame
import geopandas as gpd
import pandas as pd
import matplotlib.pyplot as plt
import os
os.chdir(r'path')
def plotPoint():
df = pd.read_csv('earthquakes.csv')
basemap = gpd.read_file('basemap.shp')
crs = "epsg:32651"
geometry = gpd.points_from_xy(df.Longitude, df.Latitude)
gdf = GeoDataFrame(df, crs=crs, geometry=geometry)
for d in df['Date'].values:
date = d
for i in range(gdf.shape[0]):
ax = basemap.plot(figsize=(15,10))
ax.axis('off')
g = gdf.iloc[i].geometry
plt.plot(g.x, g.y, marker='o', color='red', markersize=15)
title = 'Earthquakes in the ___ from 2008 to 2020'
dateInfo = str(date)
plt.suptitle(title)
plt.title(dateInfo)
plt.savefig("earthquake_{0}.png".format(i))
plotPoint()
Get the values of "Date" column
for i in df['Date'].values:
print(i)
Result
2020-04-22
2020-04-22
2020-04-21
2020-04-18
2020-04-10
Sample CSV
Latitude,Longitude,Date,Time_UTC,Depth,Depth Type,Magnitude Type,Magnitude,Region Name,Last Update,Eqid,unknown field
13.81,121.1,2020-04-22,03:19:57,10,f,mb,4.5,MINDORO, PHILIPPINES,2020-04-28 23:17,850323
13.76,120.92,2020-04-22,02:36:19,10, , M,4.2,MINDORO, PHILIPPINES,2020-04-22 03:50,850325
10.45,125.2,2020-04-21,21:43:05,10,f,mb,4.7,LEYTE, PHILIPPINES,2020-04-21 22:55,850252
6.69,125.23,2020-04-18,15:22:16,32, , M,3.6,MINDANAO, PHILIPPINES,2020-04-18 15:35,849329
5.65,126.54,2020-04-10,18:45:49,80, ,Mw,5.2,MINDANAO, PHILIPPINES,2020-04-11 06:41,846838
Changed your code, you were using date from a different for loop and that's why it picked up only the last date, you can use the Date from gdf too I'm guessing:
# for d in df['Date'].values:
# date = d
for i in range(gdf.shape[0]):
ax = basemap.plot(figsize=(15,10))
ax.axis('off')
g = gdf.iloc[i].geometry
plt.plot(g.x, g.y, marker='o', color='red', markersize=15)
title = 'Earthquakes in the ___ from 2008 to 2020'
# Added this line
date = gdf.iloc[i]['Date']
dateInfo = str(date)
plt.suptitle(title)
# Changed this line
plt.title(dateInfo)
plt.savefig("earthquake_{0}.png".format(i))
plt.show()

Resources