ical parsing reoccuring events in python - python-3.x

I am extremely new to python and icalendar but I am trying to grab my icalendar from Apple's Icloud website and then be able to access the calendar information so that I can display it on an app. I am able to get any event from the calendar that isn't reoccurring, but it only gets those events and for some reason skips over the reoccurring ones (which is basically the only type of events I schedule now of days) This is the code that I currently have, any ideas on how to get reoccurring events?
from icalendar import Calendar, Event
import urllib.request
def getCalendar():
urlHome = urllib.request.urlopen('https://p10-calendarws.icloud.com/ca/subscribe/1/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX').read()
return urlHome
def displayCalendar(calendar):
showCalHome = Calendar.from_ical(calHome)
for event in showCalHome.walk('VEVENT'):
date = event.get('dtstart').dt
summary = event.get('summary')
print(summary,date)
calendar = getCalendar()
displayCalendar(calendar)
Any thoughts or ideas on what I can do to not only get a list of single events, but also reoccurring events?

Figured it out by parsing through the ICS file itself, turning alot of values into strings and then comparing the strings to what I was looking for, looked something like this:
import json
import requests
from icalendar import Calendar, Event
import urllib.request
from datetime import *
import datetime
from dateutil.rrule import *
def getCalendar():
urlWork = urllib.request.urlopen('https://p10-calendarws.icloud.com/ca/subscribe/1/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX').read()
urlHome = urllib.request.urlopen('https://p10-calendarws.icloud.com/ca/subscribe/1/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX').read()
calendars = [urlHome, urlWork]
return calendars
def getTodaysEvents(calendars):
calHome = calendars[0]
calWork = calendars[1]
allEvents = []
singleEvents = {}
homeEvents = {}
workEvents = {}
today = str(date.today())
year = today[0:4]
month = today[5:7]
day = today[8:10]
currentDay = year + month + day
showCalHome = Calendar.from_ical(calHome)
for component in showCalHome.walk():
if component.name == "VEVENT":
rule = component.get('rrule')
eventDay = str(component.get('dtstart').dt)
if rule is not None:
rrule = dict(rule)
if 'UNTIL' in rrule.keys():
eventEnd = str(rrule['UNTIL'][0])
eventEndYear = eventEnd[0:4]
eventEndMonth = eventEnd[5:7]
eventEndDay = eventEnd[8:10]
endEvent = eventEndYear + eventEndMonth + eventEndDay
if int(endEvent) >= int(currentDay):
print(component.get('summary'))
homeEvents['CALENDAR'] = "HOME"
homeEvents['SUMMARY'] = (component.get('summary'))
homeEvents['LOCATION'] = (component.get('location'))
homeEvents['START'] = (component.get('dtstart').dt)
homeEvents['END'] = (component.get('dtend').dt)
allEvents.append(homeEvents)
# else: (NEED TO IMPLEMENT WEEKLY EVENTS THAT DON'T END and Events that aren't Reoccuring)
# print(component.get('summary'))
# if component.get('location') != 'None':
# print(component.get('location'))
# print(component.get('dtstart').dt)
# print(component.get('dtend').dt)
else:
if str(date.today()) == eventDay[0:10]:
print(component.get('summary'))
singleEvents['CALENDAR'] = "HOME"
singleEvents['SUMMARY'] = (component.get('summary'))
singleEvents['LOCATION'] = (component.get('location'))
singleEvents['START'] = (component.get('dtstart').dt)
singleEvents['END'] = (component.get('dtend').dt)
allEvents.append(singleEvents)
print(allEvents)
showCalWork = Calendar.from_ical(calWork)
for component in showCalWork.walk():
if component.name == "VEVENT":
rule = component.get('rrule')
if rule is not None:
rrule = dict(rule)
today = str(date.today())
year = today[0:4]
month = today[5:7]
day = today[8:10]
currentDay = year + month + day
if 'UNTIL' in rrule.keys():
eventEnd = str(rrule['UNTIL'][0])
eventEndYear = eventEnd[0:4]
eventEndMonth = eventEnd[5:7]
eventEndDay = eventEnd[8:10]
endEvent = eventEndYear + eventEndMonth + eventEndDay
if int(endEvent) >= int(currentDay):
workEvents['CALENDAR'] = "WORK"
workEvents['SUMMARY'] = (component.get('summary'))
workEvents['LOCATION'] = (component.get('location'))
workEvents['START'] = (component.get('dtstart').dt)
workEvents['END'] = (component.get('dtend').dt)
allEvents.append(workEvents)
# else:
# print(component.get('summary'))
# if component.get('location') != 'None':
# print(component.get('location'))
# print(component.get('dtstart').dt)
# print(component.get('dtend').dt)
return allEvents
def displayEvents(events):
print(events)
print()
print("TODAY:")
for event in range(len(events)):
start = str(events[event]['START'])[11:16]
end = str(events[event]["END"])[11:16]
if int(start[0:2]) < 12:
sT = "AM"
else:
pmtime = int(start[0:2]) - 12
start[0:2].replace(start[0:2], str(pmtime))
sT = "PM"
if int(end[0:2]) < 12:
eT = "AM"
else:
pmtime = int(end[0:2]) - 12
end = str(pmtime) + end[2:5]
eT = "PM"
print(events[event]['SUMMARY'] + " - " + events[event]["CALENDAR"])
if str(events[event]['LOCATION']) != "None":
print(events[event]['LOCATION'])
if start[0] == "0" and end[0] == "0":
print(start[1:5] + sT + " - " + end[1:5] + eT)
elif start[0] == "0":
print(start[1:5] + sT + " - " + end + eT)
elif end[0] == "0":
print(start + sT + " - " + end[1:5] + eT)
else:
print(start + sT + " - " + end + eT)
calendars = getCalendar()
events = getTodaysEvents(calendars)
displayEvents(events)

I created a library because I was looking for the exact same use-case.
In your case, recurring-ical-events could be embedded like this:
events = recurring_ical_events.of(calendar).between(start_date, end_date)
for event in events:
# ...

Related

Print REGEX using USER DEFINED FUNCTION

I'm trying to print the variables ccb_3, nome, data, taxa and parcela using the function I defined as "ext_ccb", but when I run the code it returns 3 times (because I defined q as 3) the variable ccb_3.
I tried splitting it into 2 functions (one with the variable ccb_3 e one with the rest that uses REGEX) but it didn't worked to.
'''
from PyPDF2 import PdfFileReader, PdfFileWriter
import re
x = 1
q = 3
def ext_ccb():
nome_ccb = str("Vazio (" + y + ").pdf")
ccb = PdfFileReader(nome_ccb)
ccb_obj_1 = ccb.getPage(0)
ccb_text_1 = ccb_obj_1.extractText()
ccb_obj_2 = ccb.getPage(1)
ccb_text_2 = ccb_obj_2.extractText()
ccb_3 = ccb_text_1[1:8]
print(ccb_3)
pattern_nome = re.compile(r'''[^\n][^CPF][A-Z](|\.)\w*\s*.*$
Nome Completo
''', re.M)
matches_nome = pattern_nome.finditer(ccb_text_1)
for match in matches_nome:
nome = str(match)
nome = nome[40:].replace(r"\n\nNome Completo\n'>", "")
print(nome)
pattern_data = re.compile(r'''5\.2\. Modalidade
\d{2}/\d{2}/\d{4}
''')
matches_data = pattern_data.findall(ccb_text_1)
for match in matches_data:
data = match[17:27]
print(data)
pattern_taxa = re.compile(r'''Taxa de Juros a\.m\. \(%\)
\d*,\d*''')
matches_taxa = pattern_taxa.findall(ccb_text_2)
for match in matches_taxa:
taxa = match[24:]
print(taxa)
pattern_vparcela = re.compile(r'''Valor das Parcelas
R\$ \d*,\d*''')
matches_vparcela = pattern_vparcela.findall(ccb_text_2)
for match in matches_vparcela:
parcela = match[23:]
print(parcela)
while x <= q:
y = str(x)
x += 1
ext_ccb()
'''
What I really need is to insert it into an csv, multiple times from different PDF's, which I already have the code for:
'''
from csv import writer
x = 5
q = 0
while q < x:
q += 1
ccb_3 += 1
nome += 2
data += 4
taxa += 4
parcela += 5
list_data = [ccb_3, nome, data, taxa, parcela]
with open('csv_teste.csv', 'a', newline = '') as f_object:
writer_object = writer(f_object)
writer_object.writerow(list_data)
f_object.close()
'''
How can I save each data from each PDF and put it into the CSV?

How to get the google search console data using access_token or refresh token in python?

I'm trying to get the data from google search console in behalf of the user once they login it returns the access_token and refresh_token by using the access_token or refresh_token how to get the Google Search Console data (imperssion,click,pages).
Same way i am getting the data from Google Analytics but in google search console it's not possible.
def extract_data(site, creds, num_days, output):
domain_name = get_domain_name(site)
create_project(domain_name)
full_path = domain_name + '/' + output
current_dates = get_dates_from_csv(full_path)
webmasters_service = authorize_creds(creds)
# Set up Dates
end_date = datetime.date.today() - relativedelta.relativedelta(days=3)
start_date = end_date - relativedelta.relativedelta(days=num_days)
delta = datetime.timedelta(days=1) # This will let us loop one day at the time
scDict = defaultdict(list)
while start_date <= end_date:
if current_dates is not None and current_dates.str.contains(
datetime.datetime.strftime(start_date, '%Y-%m-%d')).any():
start_date += delta
else:
# print('Start date at beginning: %s' % start_date)
maxRows = 25000 # Maximum 25K per call
numRows = 0 # Start at Row Zero
status = '' # Initialize status of extraction
# print("status status status status",status)
while (status != 'Finished'): # Test with i < 10 just to see how long the task will take to process.
request = {
'startDate': datetime.datetime.strftime(start_date, '%Y-%m-%d'),
'endDate': datetime.datetime.strftime(start_date, '%Y-%m-%d'),
'dimensions': ['date', 'page', 'query'],
'rowLimit': maxRows,
'startRow': numRows
}
response = execute_request(webmasters_service, site, request)
try:
# Process the response
for row in response['rows']:
scDict['date'].append(row['keys'][0] or 0)
scDict['page'].append(row['keys'][1] or 0)
scDict['query'].append(row['keys'][2] or 0)
scDict['clicks'].append(row['clicks'] or 0)
scDict['ctr'].append(row['ctr'] or 0)
scDict['impressions'].append(row['impressions'] or 0)
scDict['position'].append(row['position'] or 0)
# print('successful at %i' % numRows)
except:
print('error occurred at %i' % numRows)
# Add response to dataframe
df = pd.DataFrame(data=scDict)
df['clicks'] = df['clicks'].astype('int')
df['ctr'] = df['ctr'] * 100
df['impressions'] = df['impressions'].astype('int')
df['position'] = df['position'].round(2)
print('Numrows at the start of loop: %i' % numRows)
try:
numRows = numRows + len(response['rows'])
except:
status = 'Finished'
print('Numrows at the end of loop: %i' % numRows)
if numRows % maxRows != 0:
status = 'Finished'
start_date += delta
print('Start date at end: %s' % start_date)
write_to_csv(df, full_path)
return df
This is code i am getting in google search console this code using the webmasters_service = authorize_creds(creds) method but i want to access using access_token or refresh token.
This is the code used in google analytics.
def google_analytics_reporting_api_data_extraction(viewID, dim, met, start_date,
end_date, refresh_token,
transaction_type, goal_number,
condition):
viewID = viewID;
dim = dim;
met = met;
start_date = start_date;
end_date = end_date;
refresh_token = refresh_token;
transaction_type = transaction_type;
condition = condition
goal_number = goal_number
viewID = "".join(['ga%3A', viewID])
if transaction_type == "Goal":
met1 = "%2C".join([re.sub(":", "%3A", i) for i in met]).replace("XX", str(goal_number))
elif transaction_type == "Transaction":
met1 = "%2C".join([re.sub(":", "%3A", i) for i in met])
dim1 = "%2C".join([re.sub(":", "%3A", i) for i in dim])
credentials = client.OAuth2Credentials(
access_token=None, client_id=client_id, client_secret=client_secret, refresh_token=refresh_token,
token_expiry=3600, token_uri=GOOGLE_TOKEN_URI, user_agent='my-user-agent/1.0', revoke_uri=GOOGLE_REVOKE_URI)
credentials.refresh(httplib2.Http())
rt = (json.loads(credentials.to_json()))['access_token']
api_url = "https://www.googleapis.com/analytics/v3/data/ga?ids="
url = "".join(
[api_url, viewID, '&start-date=', start_date, '&end-date=', end_date, '&metrics=', met1, '&dimensions=',
dim1, '&max-results=1000000', condition, '&access_token=', rt])
data = pd.DataFrame()
dataa = pd.DataFrame()
users = []
final_date = []
# try:
r = requests.get(url)
# print("r values",list((r.json())['rows']))
# print("start_date",start_date)
start = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end = datetime.datetime.strptime(end_date, "%Y-%m-%d")
date_generated = [start + datetime.timedelta(days=x) for x in range(0, (end - start).days)]
for each in date_generated:
date_value = each.date()
url = "".join(
[api_url, viewID, '&start-date=', str(each.date()), '&end-date=', str(each.date()), '&metrics=', met1,
'&dimensions=',
dim1, '&max-results=1000000', condition, '&access_token=', rt])
rr = requests.get(url)
dataa = pd.DataFrame(list((rr.json())['rows']))
users.append(dataa[0][0])
final_date.append(str(date_value))
# print("data and users", users, final_date)
data = pd.DataFrame(list((r.json())['rows']))
try:
data = pd.DataFrame(list((r.json())['rows']), columns=[re.sub("ga:", "", i) for i in met])
# data['date'] = start_date
# dim_data = pd.DataFrame(list((r.json())['rows']), columns=[re.sub("ga:", "", i) for i in dim])
return data, users, final_date
except:
print((r.json()))
In the above code by using refresh_token we access the data from google analytics. Like this way only i want the code in google search console.
Please help me out

ValueError: Length of values (1) does not match length of index (50)

Hey there awesome peeps,
I am trying to retrieve some trend information based on some keywords that I have in a list (1000 keywords). In order to minimize the chance of getting blocked by Google I have a cutoff period of 50 and a 10 second pause. At the moment I get an error saying that my Length of value does not match the length of the index. This fails on the
df3['Trend'] = trends
If anyone can help I will really appreciate it.
Thanks!
!pip install pytrends
import pandas as pd
import json
import time
from pytrends.request import TrendReq
get_gsc_file = "/content/Queries.csv"
sortby = "Clicks"
cutoff = 50
pause = 10
timeframe = "today 3-m"
geo = "US"
df = pd.read_csv(get_gsc_file, encoding='utf-8')
df.sort_values(by=[sortby], ascending=False, inplace=True)
df = df[:cutoff]
d = {'Keyword': [], sortby:[], 'Trend': []}
df3 = pd.DataFrame(data=d)
keywords = []
trends = []
metric = df[sortby].tolist()
up = 0
down = 0
flat = 0
na = 0
for index, row in df.iterrows():
keyword = row['Top queries']
pytrends = TrendReq(hl='en-US', tz=360, retries=2, backoff_factor=0.1)
kw_list = [keyword]
pytrends.build_payload(kw_list, cat=0, timeframe=timeframe, geo=geo, gprop='')
df2 = pytrends.interest_over_time()
keywords.append(keyword)
try:
trend1 = int((df2[keyword][-5] + df2[keyword][-4] + df2[keyword][-3])/3)
trend2 = int((df2[keyword][-4] + df2[keyword][-3] + df2[keyword][-2])/3)
trend3 = int((df2[keyword][-3] + df2[keyword][-2] + df2[keyword][-1])/3)
if trend3 > trend2 and trend2 > trend1:
trends.append('UP')
up+=1
elif trend3 < trend2 and trend2 < trend1:
trends.append('DOWN')
down+=1
else:
trends.append('FLAT')
flat+=1
except:
trends.append('N/A')
na+=1
time.sleep(pause)
df3['Keyword'] = keywords
df3['Trend'] = trends
df3[sortby] = metric
def colortable(val):
if val == 'DOWN':
color="lightcoral"
elif val == 'UP':
color = "lightgreen"
elif val == 'FLAT':
color = "lightblue"
else:
color = 'white'
return 'background-color: %s' % color
df3 = df3.style.applymap(colortable)
total = len(trends)
print("Up: " + str(up) + " | " + str(round((up/total)*100,0)) + "%")
print("Down: " + str(down) + " | " + str(round((down/total)*100,0)) + "%")
print("Flat: " + str(flat) + " | " + str(round((flat/total)*100,0)) + "%")
print("N/A: " + str(na) + " | " + str(round((na/total)*100,0)) + "%")
df3

Winodow not appearing in Tkinter

this code is in referrence to a cricket scoring program
When a button is pressed in my program, it runs a validation based of the users input and if it goes through, it should open a new window. But every time I run the code, the window.
def team_names(team):
global team_amount
global team_name_input
global team_amount_test
team_name_input = Toplevel(master)
confirm_screen.withdraw()
enter_names = Label(team_name_input, text = "Enter amount of players in " + team).grid(row = 0, column = 1)
enter_names2 = Label(team_name_input, text="(Between 2 and 11)").grid(row=1, column=1)
team_amount_test = Entry(team_name_input, width = 20)
team_amount_test.grid(row = 2, column = 1)
submit_amount = Button(team_name_input, text="Submit", command= lambda:amount_validation(team_1_array)).grid(row=3, column=1) <--- Button to open new winodw
def amount_validation(team_array):
team_num = 1
team_amount = int(team_amount_test.get())
if team_num == 1:
current_team = team_1
elif team_num == 2:
current_team = team_2
#Validation
if team_amount < 2 or team_amount > 11:
error = Label(team_name_input, text = "Invalid amount, try again").grid(row = 4, column = 1)
elif team_amount > 2 or team_amount < 11:
player_num = 1
name_input_win = Toplevel(master) #Should open new winodw here
#name_input_win.geometry("200x150")
team_name_input.withdraw()
first = Label(name_input_win, text = "Enter player " + str(player_num) + " for " + str(current_team)).grid(row = 0, column = 1)
name_input = Entry(name_input_win)
name_input.grid(row = 1, column = 1)
name = str(name_input.get())
for i in range(0, player_num):
p = None
while not p or p in team_array:
#first = Label(name_input, text="Enter player " + str(player_num) + " for " + str(current_team)).grid(row=0,column=1)
if p in team_array:
error = Label(name_input, text = "Player already entered").grid(row = 2, column = 1)
team_array.append(p)
player_num = player_num + 1
Any help will be appriciated ^-^

Scipy optimize.minimize with multi- parameters

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import linalg, optimize
%matplotlib inline
Data load
data = pd.read_csv("D:/Stat/TimeSeries/KRW_month_0617_1.csv",index_col="Date") / 100
para = open("D:/Stat/TimeSeries/KRW_month_0617_1.txt").readlines()[0:2]
data.index = pd.to_datetime(data.index)
Parameters
cond = []
params = []
time = []
for i in para:
j = i.split()
for k in j:
cond.append(k)
cond = cond[1:]
for i in range(len(cond)):
cond[i] = round(float(cond[i]),4)
params = cond[0:23]
time = cond[23:]
maturity = np.array(time[1:])
timegap = 1/cond[23]
Functions We need
def Paramcheck(Params, checkStationary = 1):
result = 0
Kappa = np.array([[params[20],0,0], [0,params[21],0], [0,0,params[22]]])
Sigma = np.array([[params[1],0,0], [params[2],params[3],0], [params[4],params[5],params[6]]])
State = np.array([params[7], params[8], params[9]])
Lambda = params[0]
SigmaEps = np.identity(10)
for i in range(10):
SigmaEps[i][i] = params[i+10]
for i in range(len(Sigma)):
if Sigma[i][i] < 0:
result = 1
for j in SigmaEps:
if np.any(SigmaEps) < 0:
result = 1
if Lambda < 0.05 or Lambda > 2:
result = 1
elif State[0] < 0:
result = 1
elif Kappa[0][0] < 0:
result = 1
if result == 0 and checkStationary > 0:
if max(np.linalg.eigvals(-Kappa).real) > 0:
result = 2
return result
def CheckDet(x):
if x == np.inf or x == np.nan:
result = 1
elif x < 0:
result = 2
elif abs(x) < 10**-250:
result = 3
else:
result = 0
return result
def NS_factor(lambda_val, maturity):
col1 = np.ones(len(maturity))
col2 = (1 - np.exp(-lambda_val*maturity))/(lambda_val*maturity)
col3 = col2 - np.exp(-lambda_val*maturity)
factor = np.array([col1,col2,col3]).transpose()
return factor
def DNS_Kalman_filter(Params, *args):
N = Paramcheck(Params)
if N == 0:
Kappa = np.array([[params[20],0,0], [0,params[21],0], [0,0,params[22]]])
Sigma = np.array([[params[1],0,0], [params[2],params[3],0],
[params[4],params[5],params[6]]])
State = np.array([params[7], params[8], params[9]])
Lambda = params[0]
SigmaEps = np.identity(10)
for i in range(10):
SigmaEps[i][i] = params[i+10]
Obs_Yield = args[0]
Obs_Date = args[1]
Timegap = args[2]
Obs_Mty = args[3]
Finalstate = args[4]
Mty_length = len(Obs_Mty)
B = NS_factor(lambda_val = Lambda,maturity = Obs_Mty)
H_large = SigmaEps **2
N_obs = len(Obs_Date)
LLH_vec = np.zeros(N_obs)
phi1 = linalg.expm(-Kappa*Timegap)
phi0 = (np.identity(3)-phi1) # State
Eigenvalues = np.linalg.eig(Kappa)[0]
Eigen_vec = np.linalg.eig(Kappa)[1]
Eigen_vec_inv = np.linalg.inv(Eigen_vec)
S = Eigen_vec_inv # Sigma # Sigma.transpose() # Eigen_vec_inv.transpose()
Atilde = np.dot(Sigma[0], Sigma[0])
Btilde = np.dot(Sigma[1], Sigma[1])
Ctilde = np.dot(Sigma[2], Sigma[2])
Dtilde = np.dot(Sigma[0], Sigma[1])
Etilde = np.dot(Sigma[0], Sigma[2])
Ftilde = np.dot(Sigma[1], Sigma[2])
res1= Atilde* Obs_Mty* Obs_Mty/6
res2= Btilde*(1/(2*Lambda**2) - (1-np.exp(-Lambda*Obs_Mty))/(Lambda**3*Obs_Mty) + (1-
np.exp(-2*Lambda*Obs_Mty))/(4*Lambda**3*Obs_Mty))
res3= Ctilde*(1/(2*Lambda**2) + np.exp(-Lambda*Obs_Mty)/(Lambda**2)-
Obs_Mty*np.exp(-2*Lambda*Obs_Mty)/(4*Lambda) -
3*np.exp(-2*Lambda*Obs_Mty)/(4*Lambda**2) - 2*(1-np.exp(-
Lambda*Obs_Mty))/(Lambda**3*Obs_Mty) + 5*(1-
np.exp(-2*Lambda*Obs_Mty))/(8*Lambda**3*Obs_Mty))
res4= Dtilde*(Obs_Mty/(2*Lambda) + np.exp(-Lambda*Obs_Mty)/(Lambda**2) - (1-np.exp(-
Lambda*Obs_Mty))/(Lambda**3*Obs_Mty))
res5= Etilde*(3*np.exp(-Lambda*Obs_Mty)/(Lambda**2) + Obs_Mty/(2*Lambda)+Obs_Mty*np.exp(-
Lambda*Obs_Mty)/(Lambda) - 3*(1-np.exp(-Lambda*Obs_Mty))/(Lambda**3*Obs_Mty))
res6= Ftilde*(1/(Lambda**2) + np.exp(-Lambda*Obs_Mty)/(Lambda**2) -
np.exp(-2*Lambda*Obs_Mty)/(2*Lambda**2) - 3*(1-np.exp(-
Lambda*Obs_Mty))/(Lambda**3*Obs_Mty) + 3*(1-
np.exp(-2*Lambda*Obs_Mty))/(4*Lambda**3*Obs_Mty))
val = res1 + res2 + res3 + res4 + res5 + res6
V_mat = np.zeros([3,3])
V_lim = np.zeros([3,3])
for i in range(3):
for j in range(3):
V_mat[i][j] = S[i][j]*(1-np.exp(-(Eigenvalues[i] +
Eigenvalues[j])*Timegap))/(Eigenvalues[i] + Eigenvalues[j])
V_lim[i][j] = S[i][j]/(Eigenvalues[i] + Eigenvalues[j])
Q = (Eigen_vec # V_mat # Eigen_vec.transpose()).real
Sigma_lim = (Eigen_vec # V_lim # Eigen_vec.transpose()).real
for i in range(N_obs):
y = Obs_Yield[i]
xhat = phi0 + phi1 # State
y_implied = B # xhat
v = y - y_implied + val
Sigmahat = phi1 # Sigma_lim # phi1.transpose() + Q
F = B # Sigmahat # B.transpose() + H_large
detF = np.linalg.det(F)
if CheckDet(detF) > 0:
N = 3
break
Finv = np.linalg.inv(F)
State = xhat + Sigmahat # B.transpose() # Finv # v
Sigma_lim = Sigmahat - Sigmahat # B.transpose() # Finv # B # Sigmahat
LLH_vec[i] = np.log(detF) + v.transpose() # Finv # v
if N == 0:
if Finalstate:
yDate = Obs_Date[-1]
result = np.array([yDate,State])
else:
result = 0.5 * (sum(LLH_vec) + Mty_length*N_obs*np.log(2*np.pi))
else:
result = 7000000
return result
I made a code that does Arbitrage Free Nelson-Siegel model. Data is return rates of bond (1Y,1.5Y, ... ,20Y). I wanna optimize that function with scipy optimize.minimize function with fixed *args.
Suppose that Initial parmas are verified that it's close to optimized params from empirical experiments using Dynamic Nelson-Siegel Model.
LLC_new = 0
while True:
LLC_old = LLC_new
OPT = optimize.minimize(x0=params,fun=DNS_Kalman_filter, args=
(data.values,data.index,timegap,maturity,0))
params = OPT.x
LLC_new = round(OPT.fun,5)
print("Current LLC: %0.5f" %LLC_new)
if LLC_old == LLC_new:
OPT_para = params
FinalState = DNS_Kalman_filter(params,data.values,data.index,timegap,maturity,True)
break
Result is
Current LLC: -7613.70146
Current LLC: -7613.70146
LLC(log-likelihood value) isn't maximized. It's not a result I desire using Optimizer.
Is there any solution for that?
In R, there is optim() function works as similar as scipy.optimize.minimize() which works really well. I also have a R code for that very similar to this Python code.

Resources