SQLAlchemy join onto an inline table - python-3.x

I have an inline defined table:
select(
Values(column('key', String), column('value', String), column('ordering', Integer), name='subq')
.data([(e.name, e.value, i) for i, e in enumerate(DurationType)])
This produces this SQL:
select key, value, ordering
from (values ("key1", "name1", 1), ("key2", "name2", 2) ...)
Which is a fine table:
Now i need to join this subquery to another one. The other query is:
self.session.query(self.model)
.filter(self.model.duration != None)
.with_entities(
duration_bucket := case((self.model.duration < 60, DurationType.LESS_THAN_1.name),
(and_(60 <= self.model.duration, self.model.duration < 60 * 5),
DurationType.FROM_1_TO_5.name),
(and_(60 * 5 <= self.model.duration,
self.model.duration < 60 * 10), DurationType.FROM_5_TO_10.name),
(and_(60 * 10 <= self.model.duration,
self.model.duration < 60 * 20), DurationType.FROM_10_TO_20.name),
(and_(60 * 20 <= self.model.duration,
self.model.duration < 60 * 30), DurationType.FROM_20_TO_30.name),
(60 * 30 <= self.model.duration, DurationType.MORE_THAN_30.name)
).label('id'))
.group_by(duration_bucket)
.having(count() > 0)
Which is apart from case clause is just a select from aggregation, that leaves me with a result of single column "id"
But for the life of me I can't figure how to join it. My initial attempt looked like this:
inline_enum_table = select(
Values(column('key', String), column('value', String), column('ordering', Integer), name="suqb")
.data([(e.name, e.value, i) for i, e in enumerate(DurationType)])).subquery()
inline_enum_table = self.session.query(inline_enum_table)
return (
self.session.query(self.model)
.filter(self.model.duration != None)
.with_entities(
duration_bucket := case((self.model.duration < 60, DurationType.LESS_THAN_1.name),
(and_(60 <= self.model.duration, self.model.duration < 60 * 5),
DurationType.FROM_1_TO_5.name),
(and_(60 * 5 <= self.model.duration,
self.model.duration < 60 * 10), DurationType.FROM_5_TO_10.name),
(and_(60 * 10 <= self.model.duration,
self.model.duration < 60 * 20), DurationType.FROM_10_TO_20.name),
(and_(60 * 20 <= self.model.duration,
self.model.duration < 60 * 30), DurationType.FROM_20_TO_30.name),
(60 * 30 <= self.model.duration, DurationType.MORE_THAN_30.name)
).label('id'))
.group_by(duration_bucket)
.having(count() > 0)
.join(duration_enum := inline_enum_table.label('qwe'), duration_enum.key == duration_bucket)
This particular attempt results in sqlalchemy.exc.ArgumentError: Expected mapped entity or selectable/table as join target
I've had many more tries with all sorts of errors.

I got the following to work. select().subquery() is what helps SQLAlchemy work with such objects as part of a FROM clause.
from sqlalchemy import Column, column, create_engine, Integer, join, select, String, values
from sqlalchemy.orm import declarative_base, Session
engine = create_engine("postgresql://scott:tiger#192.168.0.199/test")
Base = declarative_base()
class Thing(Base):
__tablename__ = "thing"
id = Column(Integer, primary_key=True, autoincrement=False)
key = Column(String(50))
def __repr__(self):
return f"Thing(id={repr(self.id)}, key={repr(self.key)})"
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
with Session(engine) as sess, sess.begin():
sess.add_all(
[
Thing(id=1, key="key1"),
Thing(id=2, key="key2"),
]
)
inline_enum_table = select(
values(
column("key", String),
column("value", String),
column("ordering", Integer),
name="inline_enums",
literal_binds=True,
).data(
[
("key1", "name1", 1),
("key2", "name2", 2),
]
)
).subquery()
with Session(engine) as sess:
query = select(Thing, inline_enum_table.c.value).select_from(
join(Thing, inline_enum_table, Thing.key == inline_enum_table.c.key)
)
print(query)
"""
SELECT thing.id, thing.key, anon_1.value
FROM thing JOIN (SELECT inline_enums.key AS key, inline_enums.value AS value, inline_enums.ordering AS ordering
FROM (VALUES ('key1', 'name1', 1), ('key2', 'name2', 2)) AS inline_enums (key, value, ordering)) AS anon_1 ON thing.key = anon_1.key
"""
results = sess.execute(query).all()
print(results)
# [(Thing(id=1, key='key1'), 'name1'), (Thing(id=2, key='key2'), 'name2')]

Related

Key-Error: KeyError: "None of [Float64Index([15.593, 15.577, 15.563], dtype='float64')] are in the [columns]"

I'm trying to calculate a column, but I get the following error:
Key-Error: KeyError: "None of [Float64Index([15.593, 15.577, 15.563], dtype='float64')] are in the [columns]"
Where is my error coming from? and how can I fix it?
import pandas as pd
import numpy as np
def absolute_humidity(temp, humidity):
if df[temp] > 0:
output = ((610.78 * np.exp((17.08085 * temp) / (234.175 + temp)) * humidity / 100) / (
462 * (273.1 + temp)) * 1000)
return output
else:
output = ((610.714 * np.exp((22.44294 * temp) / (272.44 + temp)) * humidity / 100) / (
462 * (272.44 + temp)) * 1000)
return output
data = {'Date': ['2017-10-18 00:00:00', '2017-10-18 01:00:00', '2017-10-18 02:00:00'],
'T': [15.593, 15.577, 15.563],
'rF': [77.8, 77.767, 77.667]}
df = pd.DataFrame(data)
df['Date'] = pd.to_datetime(df['Date'], format='%Y-%m-%d %H:%M')
df = df.set_index('Date')
df['aF'] = absolute_humidity(df['T'], df['rF'])
I reduced my code and data to the function. Have also not found any solutions so far.
Thought with apply it might work
df['aF'] = df.apply(lambda x: absolute_humidity(x.T, x.rF), axis=1)
Try refactoring your function like this:
def absolute_humidity(temp, humidity):
return (
(
(610.78 * np.exp((17.08085 * temp) / (234.175 + temp)) * humidity / 100)
/ (462 * (273.1 + temp))
* 1000
)
if temp > 0 # instead of df["temp"] > 0 in your code
else (
(610.714 * np.exp((22.44294 * temp) / (272.44 + temp)) * humidity / 100)
/ (462 * (272.44 + temp))
* 1000
)
)
And assign the new "aF" column like this:
df["aF"] = [
absolute_humidity(temp, humidity) for temp, humidity in zip(df["T"], df["rF"])
]
So that:
print(df)
# Output
T rF aF
Date
2017-10-18 00:00:00 15.593 77.800 10.349011
2017-10-18 01:00:00 15.577 77.767 10.334587
2017-10-18 02:00:00 15.563 77.667 10.312535

Python Truth value of a series is ambiguous error in Function

I 'm trying to build a function that uses several scalar values as inputs and one series or array also as an input.
The function applies calculations to each value in the series. It works fine so far. But now I'm adding a phase where it has to check the value of the series and if it's less than X it performs one calculation other it performs a different calculation.
However I keep getting a 'truth value series is ambiguous error and I can't seem to solve it.
What is a work around?
My code is below
import numpy as np
import pandas as pd
import math
tramp = 2
Qo = 750
Qi = 1500
b = 1.2
Dei = 0.8
Df = 0.08
Qf = 1
tmax = 30
tper = 'm'
t = pd.Series(range(1,11))
def QHyp_Mod(Qi, b, Dei, Df, Qf, tmax, tper, t):
tper = 12
Qi = Qi * (365/12)
Qf = Qf * (365/12)
ai = (1 / b) * ((1 / (1 - Dei)) ** b - 1)
aim = ai / tper
ai_exp = -np.log(1 - Df)
aim_exp = ai_exp / tper
t_exp_sw = 118
Qi_exp = Qi / ((1 + aim * t_exp_sw * b) ** (1 / b))
Qcum = (Qi / (aim * (1 - b))) * (1 - (1 / ((1 + aim * t * b) ** ((1 - b) / b))))
t_exp = t - t_exp_sw
Qcum_Exp = (Qi_exp / aim_exp) * (1 - np.exp(-aim_exp * t_exp))
if t < t_exp_sw:
return Qcum
else:
return Qcum_exp
z = QHyp_Mod(Qi=Qi, b=b, Dei=Dei, Df=Df, Qf=Qf, tmax=tmax, tper=tper, t=t)
Replace the if - else statement:
if t < t_exp_sw:
return Qcum
else:
return Qcum_exp
with this:
Q.where(t < t_exp_sw, Q_exp)
return Q
The where method tests the conditional for each member of Q, if true keeps the original value, and if false replaces it with the corresponding element of Q_exp

SyntaxError: invalid syntax someone know this?

I'm trying to run the code below but there is an error in the above.
The complete code and error is:
line 27
return np.exp(-1.0)*self.rf*self.T)*average SyntaxError: invalid syntax
import numpy as np
import math
import time
class optionPricing:
def __init__(self,S0,E,T,rf,sigma,interations):
self.S0 = S0
self.E = E
self.T = T
self.rf = rf
self.sigma = sigma
self.interations = interations
def call_option_simulation(self):
option_data = np.zeros([self.interations, 2])
rand = np.random.normal(0, 1, [1, self.interations])
stock_price = self.S0*np.exp(self.T*(self.rf - 0.5*self.sigma**2)+self.sigma*np.sqrt(self.T)*rand)
option_data[:,1] = stock_price - self.E
average = np.sum(np.amax(option_data, axis=1))/float(self.interations)
return np.exp(-1.0)*self.rf*self.T)*average
def put_option_simulation(self):
option_data = np.zeros([self.interations, 2])
rand = np.random.normal(0, 1, [1, self.interations])
stock_price = self.S0 * np.exp(self.T * (self.rf - 0.5 * self.sigma ** 2) + self.sigma * np.sqrt(self.T) * rand)
option_data[:, 1] = self.E - stock_price
average = np.sum(np.amax(option_data, axis=1)) / float(self.interations)
return np.exp(-1.0) * self.rf * self.T) * average
if __name__ == "__name__":
S0=100 #underlaying stock price at t=0
E=100 #strike price
T = 1 #time to maturity
rf = 0.05 #risk-free rate
sigma=0.2 #volatility of the underlying stock
interations = 10000000 #number of interations in the monte-carlo simulation
model = optionPricing(S0,E,T,rf,sigma,interations)
print("call option price with monte-carlo approach: ", model.call_option_simulation())
ptint("put option price with monte-carlo approach: ", model.put_option_simulation())
two open bracket and one close . return np.exp(-1.0)self.rfself.T)*average

Multiprocessing in Python (going from a simple python for loop to multiprocessing for loop)

I have never used multiprocessing module before.
I have a python script that works. It has a for loop that I would like to improve the execution speed by incorporating multiprocessing.
FYI: My Cpu has 8 cores, using Ubuntu 18.04.3 LTS, Python 3.6.8
The code without multiprocessing is as follows:
import math
import cmath
import numpy as np
import matplotlib.pyplot as plt
cj = cmath.sqrt(-1); Tp = 10e-6; Xc = 2e3; c = 3e8; B0 = 500e6; X0 = 50; fc = 2e9; ntarget = 8
w0 = 2 * cmath.pi * B0
wc = 2 * cmath.pi * fc
alpha = w0 / Tp
wcm = wc - alpha * Tp
Ts = (2 * (Xc - X0)) / c
Tf = (2*(Xc + X0))/c+Tp
dt = cmath.pi / (2 * alpha * Tp)
n = 2 * math.ceil(0.5 * (Tf - Ts) / dt)
t = Ts + np.arange(0, n * dt, dt)
dw = 2 * cmath.pi / (n * dt)
w = wc + dw * np.arange(-n / 2, n / 2)
x = Xc + 0.5 * c * dt * np.arange(-n / 2, n / 2)
kx = (2 * w) / c
xn = [0, 25, 35, 36.5, 40, -25, -35, -36.5]
fn = [1, 0.8, 1, 0.8, 1, 0.6, 0.9, 1]
s = np.zeros((1, n))
for i in np.arange(ntarget):
td = t - (2 * (Xc + xn[i]) / c)
pha = wcm * td + alpha * np.power(td, 2)
s = s + np.multiply(fn[i] * np.exp(cj * pha), np.logical_and(td >= 0, td <= Tp))
Based on the 's' output, the code further continues.
The second code block below is " with parallel processing " :
The upper code before "def work(ntarget)" remains same as the above code.
def work(ntarget):
xn = [0, 25, 35, 36.5, 40, -25, -35, -36.5]
fn = [1, 0.8, 1, 0.8, 1, 0.6, 0.9, 1]
s = np.zeros((1, n))
for i in np.arange(ntarget):
td = t - (2 * (Xc + xn[i]) / c)
pha = wcm * td + alpha * np.power(td, 2)
s = s + np.multiply(fn[i] * np.exp(cj * pha), np.logical_and(td >= 0, td <= Tp))
if __name__ == '__main__':
targets = [8]
p = Pool()
p.map(work, targets)
p.close()
p.join()
The first one works fine but the second, although the second code does run without error.
The execution time of the first code is "0.892 secs"
The execution time of the second code is "0.95 secs"
I expect the code to run in multiple cores to reduce the execution time.
Is something wrong with my multiprocessing code? or we cannot reduce the execution time further.
Thanks in advance.

Define a value that minimizes a function through iterations

Currently I have the following code:
call = []
diff = []
def results(S0, K, T, r, sigma, k, N, M, Iteration):
for i in range(1, Iteration):
S0 = float(S0)
d1 = (log(S0 / K) + (r + 0.5 * sigma ** 2) * T) / (sigma * sqrt(T))
d2 = (log(S0 / K) + (r - 0.5 * sigma ** 2) * T) / (sigma * sqrt(T))
call1 = (S0 * stats.norm.cdf(d1, 0.0, 1.0) - K * exp(-r * T) * stats.norm.cdf(d2, 0.0, 1.0))
call.append(call1)
dilution = N/(N +k*M)
Value_2 = Value_1 + call*M
diff1 = Value_1 - Value_2 == 0
diff.append(diff1)
return call
print(results(100,100,1,0.1,0.2,1,100,10, 1000))
I am trying to make make iterations so that the program find value of "call" that gives minimum value of "Value_1 - Value_2) based on the number of iterations. Can you please, advise me how to advance the code? Specifically, I dont know how to code - "return me the output of a "call" such that "Value_1 - Value_2" is minimum that is based on the number of iterations"

Resources