Why Exception Handling doesn't print text? - python-3.x

My question is why Python doesn't execute the print statement in the Exception Handling code below. I am trying to calculate the log of volumes for a bunch of stocks. Each stock has 1259 volume values. But Python generates a RunTimeWarning "divide by zero encountered in log". So I try to use Exception Handling to locate where the log input is zero, but Python doesn't execute the print statement under except. The print statement is supposed to print the name of the stock and the index in the array where the volume is zero. Why?
Here is the code:
for i, stock in enumerate(df.columns):
volumes = df[stock].to_numpy()
for r in range(len(volumes)): # len(volumes) = 1259
try:
v = np.log(volumes[r])
except:
print(stock, r)
Here is the Error that follows after the RunTimeWarning.
LinAlgError Traceback (most recent call last)
<ipython-input-6-6aa283671e2c> in <module>
13 closes = df_close[stock].to_numpy()
14 volumes = df_vol[stock].to_numpy()
---> 15 indicator_values_all_stocks[i] = indicator.price_volume_fit(volumes, closes, histLength)
16
17 indicator_values_all_stocks_no_NaN = indicator_values_all_stocks[:, ~np.isnan(indicator_values_all_stocks).any(axis=0)]
~\Desktop\Python Projects Organized\Finance\Indicator Statistics\B.57. Price Volume Fit\indicator.py in price_volume_fit(volumes, closes, histLength)
1259 x = log_volumes[i - histLength:i]
1260 y = log_prices[i - histLength:i]
-> 1261 model = np.polyfit(x, y, 1, full = True)
1262 slope[i] = model[0][0]
1263
<__array_function__ internals> in polyfit(*args, **kwargs)
c:\users\donald seger\miniconda3\envs\tensorflow\lib\site-packages\numpy\lib\polynomial.py in polyfit(x, y, deg, rcond, full, w, cov)
629 scale = NX.sqrt((lhs*lhs).sum(axis=0))
630 lhs /= scale
--> 631 c, resids, rank, s = lstsq(lhs, rhs, rcond)
632 c = (c.T/scale).T # broadcast scale coefficients
633
<__array_function__ internals> in lstsq(*args, **kwargs)
c:\users\donald seger\miniconda3\envs\tensorflow\lib\site-packages\numpy\linalg\linalg.py in lstsq(a, b, rcond)
2257 # lapack can't handle n_rhs = 0 - so allocate the array one larger in that axis
2258 b = zeros(b.shape[:-2] + (m, n_rhs + 1), dtype=b.dtype)
-> 2259 x, resids, rank, s = gufunc(a, b, rcond, signature=signature, extobj=extobj)
2260 if m == 0:
2261 x[...] = 0
c:\users\donald seger\miniconda3\envs\tensorflow\lib\site-packages\numpy\linalg\linalg.py in _raise_linalgerror_lstsq(err, flag)
107
108 def _raise_linalgerror_lstsq(err, flag):
--> 109 raise LinAlgError("SVD did not converge in Linear Least Squares")
110
111 def get_linalg_error_extobj(callback):
LinAlgError: SVD did not converge in Linear Least Squares

Related

jax.lax.fori_loop Abstract tracer value encountered where concrete value is expected

I've a JAX loop that looks like this where inside the step function I use min between the two arguments
import jax
def step(timestep: int, order: int = 4) -> int:
order = min(timestep + 1, order)
return order
num_steps = 10
order = 100
order = jax.lax.fori_loop(0, num_steps, step, order)
The above code fails with a jax._src.errors.ConcretizationTypeError. This is is the full stacktrace:
WARNING:jax._src.lib.xla_bridge:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)
---------------------------------------------------------------------------
UnfilteredStackTrace Traceback (most recent call last)
<ipython-input-4-9ec280f437cb> in <module>
2 order = 100
----> 3 order = jax.lax.fori_loop(0, num_steps, step, order)
16 frames
/usr/local/lib/python3.8/dist-packages/jax/_src/traceback_util.py in reraise_with_filtered_traceback(*args, **kwargs)
161 try:
--> 162 return fun(*args, **kwargs)
163 except Exception as e:
/usr/local/lib/python3.8/dist-packages/jax/_src/lax/control_flow/loops.py in fori_loop(lower, upper, body_fun, init_val)
1691
-> 1692 (_, result), _ = scan(_fori_scan_body_fun(body_fun), (lower_, init_val),
1693 None, length=upper_ - lower_)
/usr/local/lib/python3.8/dist-packages/jax/_src/traceback_util.py in reraise_with_filtered_traceback(*args, **kwargs)
161 try:
--> 162 return fun(*args, **kwargs)
163 except Exception as e:
/usr/local/lib/python3.8/dist-packages/jax/_src/lax/control_flow/loops.py in scan(f, init, xs, length, reverse, unroll)
258 # necessary, a second time with modified init values.
--> 259 init_flat, carry_avals, carry_avals_out, init_tree, *rest = _create_jaxpr(init)
260 new_init_flat, changed = _promote_weak_typed_inputs(init_flat, carry_avals, carry_avals_out)
/usr/local/lib/python3.8/dist-packages/jax/_src/lax/control_flow/loops.py in _create_jaxpr(init)
244 carry_avals = tuple(_map(_abstractify, init_flat))
--> 245 jaxpr, consts, out_tree = _initial_style_jaxpr(
246 f, in_tree, (*carry_avals, *x_avals), "scan")
/usr/local/lib/python3.8/dist-packages/jax/_src/lax/control_flow/common.py in _initial_style_jaxpr(fun, in_tree, in_avals, primitive_name)
59 primitive_name: Optional[str] = None):
---> 60 jaxpr, consts, out_tree = _initial_style_open_jaxpr(
61 fun, in_tree, in_avals, primitive_name)
/usr/local/lib/python3.8/dist-packages/jax/_src/lax/control_flow/common.py in _initial_style_open_jaxpr(fun, in_tree, in_avals, primitive_name)
53 debug = pe.debug_info(fun, in_tree, False, primitive_name or "<unknown>")
---> 54 jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(wrapped_fun, in_avals, debug)
55 return jaxpr, consts, out_tree()
/usr/local/lib/python3.8/dist-packages/jax/_src/profiler.py in wrapper(*args, **kwargs)
313 with TraceAnnotation(name, **decorator_kwargs):
--> 314 return func(*args, **kwargs)
315 return wrapper
/usr/local/lib/python3.8/dist-packages/jax/interpreters/partial_eval.py in trace_to_jaxpr_dynamic(fun, in_avals, debug_info, keep_inputs)
1980 main.jaxpr_stack = () # type: ignore
-> 1981 jaxpr, out_avals, consts = trace_to_subjaxpr_dynamic(
1982 fun, main, in_avals, keep_inputs=keep_inputs, debug_info=debug_info)
/usr/local/lib/python3.8/dist-packages/jax/interpreters/partial_eval.py in trace_to_subjaxpr_dynamic(fun, main, in_avals, keep_inputs, debug_info)
1997 in_tracers_ = [t for t, keep in zip(in_tracers, keep_inputs) if keep]
-> 1998 ans = fun.call_wrapped(*in_tracers_)
1999 out_tracers = map(trace.full_raise, ans)
/usr/local/lib/python3.8/dist-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs)
166 try:
--> 167 ans = self.f(*args, **dict(self.params, **kwargs))
168 except:
/usr/local/lib/python3.8/dist-packages/jax/_src/lax/control_flow/loops.py in scanned_fun(loop_carry, _)
1607 i, x = loop_carry
-> 1608 return (i + 1, body_fun()(i, x)), None
1609 return scanned_fun
<ipython-input-2-2e3345899235> in step(timestep, order)
1 def step(timestep: int, order: int = 100) -> int:
----> 2 order = min(timestep + 1, order)
3 return order
/usr/local/lib/python3.8/dist-packages/jax/core.py in __bool__(self)
633 def __nonzero__(self): return self.aval._nonzero(self)
--> 634 def __bool__(self): return self.aval._bool(self)
635 def __int__(self): return self.aval._int(self)
/usr/local/lib/python3.8/dist-packages/jax/core.py in error(self, arg)
1266 def error(self, arg):
-> 1267 raise ConcretizationTypeError(arg, fname_context)
1268 return error
UnfilteredStackTrace: jax._src.errors.ConcretizationTypeError: Abstract tracer value encountered where concrete value is expected: Traced<ShapedArray(bool[], weak_type=True)>with<DynamicJaxprTrace(level=1/0)>
The problem arose with the `bool` function.
The error occurred while tracing the function scanned_fun at /usr/local/lib/python3.8/dist-packages/jax/_src/lax/control_flow/loops.py:1606 for scan. This concrete value was not available in Python because it depends on the values of the argument 'loop_carry'.
See https://jax.readthedocs.io/en/latest/errors.html#jax.errors.ConcretizationTypeError
The stack trace below excludes JAX-internal frames.
The preceding is the original exception that occurred, unmodified.
--------------------
The above exception was the direct cause of the following exception:
ConcretizationTypeError Traceback (most recent call last)
<ipython-input-4-9ec280f437cb> in <module>
1 num_steps = 10
2 order = 100
----> 3 order = jax.lax.fori_loop(0, num_steps, step, order)
<ipython-input-2-2e3345899235> in step(timestep, order)
1 def step(timestep: int, order: int = 100) -> int:
----> 2 order = min(timestep + 1, order)
3 return order
ConcretizationTypeError: Abstract tracer value encountered where concrete value is expected: Traced<ShapedArray(bool[], weak_type=True)>with<DynamicJaxprTrace(level=1/0)>
The problem arose with the `bool` function.
The error occurred while tracing the function scanned_fun at /usr/local/lib/python3.8/dist-packages/jax/_src/lax/control_flow/loops.py:1606 for scan. This concrete value was not available in Python because it depends on the values of the argument 'loop_carry'.
See https://jax.readthedocs.io/en/latest/errors.html#jax.errors.ConcretizationTypeError
Everything works fine if instead of using jax.lax.fori_loop i use a simple python loop, but my original code will end up very slow. How can I fix this issue?
Use jax.numpy.minimum in place of min:
def step(timestep: int, order: int = 4) -> int:
order = jax.numpy.minimum(timestep + 1, order)
return order
The reason min does not work is that in the course of executing code within jit, grad, vmap, fori_loop, etc., JAX replaces concrete values with abstract tracers, and Python functions like min don't know how to handle these abstract values. See How to Think in JAX for more background on this.

What is the best way of solving a memory error without changing values in function?

I'm doing some tests to check if some choices from my sampling algorithm is better changing its values.
As I was doing them(till this moment without a hitch) and tried to run a couple more tests for more results I got the MemoryError.
MemoryError Traceback (most recent call last)
<ipython-input-66-1ab060bc6067> in <module>
22 for g in range(0,10000):
23 # sample
---> 24 sample_df = stratified_sample(df,test,size=38, keep_index=False)
25 pathaux = "C://Users//Pedro//Desktop//EscolhasAlgoritmos//Stratified//Stratified_Tests//"
26 example = "exampleFCUL"
<ipython-input-10-7aba847839db> in stratified_sample(df, strata, size, seed, keep_index)
79 # final dataframe
80 if first:
---> 81 stratified_df = df.query(qry).sample(n=n, random_state=seed).reset_index(drop=(not keep_index))
82 first = False
83 else:
D:\Anaconda\lib\site-packages\pandas\core\frame.py in query(self, expr, inplace, **kwargs)
3182 kwargs["level"] = kwargs.pop("level", 0) + 1
3183 kwargs["target"] = None
-> 3184 res = self.eval(expr, **kwargs)
3185
3186 try:
D:\Anaconda\lib\site-packages\pandas\core\frame.py in eval(self, expr, inplace, **kwargs)
3298 kwargs["target"] = self
3299 kwargs["resolvers"] = kwargs.get("resolvers", ()) + tuple(resolvers)
-> 3300 return _eval(expr, inplace=inplace, **kwargs)
3301
3302 def select_dtypes(self, include=None, exclude=None):
D:\Anaconda\lib\site-packages\pandas\core\computation\eval.py in eval(expr, parser, engine, truediv, local_dict, global_dict, resolvers, level, target, inplace)
325 eng = _engines[engine]
326 eng_inst = eng(parsed_expr)
--> 327 ret = eng_inst.evaluate()
328
329 if parsed_expr.assigner is None:
D:\Anaconda\lib\site-packages\pandas\core\computation\engines.py in evaluate(self)
68
69 # make sure no names in resolvers and locals/globals clash
---> 70 res = self._evaluate()
71 return _reconstruct_object(
72 self.result_type, res, self.aligned_axes, self.expr.terms.return_type
D:\Anaconda\lib\site-packages\pandas\core\computation\engines.py in _evaluate(self)
117 truediv = scope["truediv"]
118 _check_ne_builtin_clash(self.expr)
--> 119 return ne.evaluate(s, local_dict=scope, truediv=truediv)
120 except KeyError as e:
121 # python 3 compat kludge
D:\Anaconda\lib\site-packages\numexpr\necompiler.py in evaluate(ex, local_dict, global_dict, out, order, casting, **kwargs)
814 expr_key = (ex, tuple(sorted(context.items())))
815 if expr_key not in _names_cache:
--> 816 _names_cache[expr_key] = getExprNames(ex, context)
817 names, ex_uses_vml = _names_cache[expr_key]
818 arguments = getArguments(names, local_dict, global_dict)
D:\Anaconda\lib\site-packages\numexpr\necompiler.py in getExprNames(text, context)
705
706 def getExprNames(text, context):
--> 707 ex = stringToExpression(text, {}, context)
708 ast = expressionToAST(ex)
709 input_order = getInputOrder(ast, None)
D:\Anaconda\lib\site-packages\numexpr\necompiler.py in stringToExpression(s, types, context)
282 else:
283 flags = 0
--> 284 c = compile(s, '<expr>', 'eval', flags)
285 # make VariableNode's for the names
286 names = {}
MemoryError:
My question is, what is the best way of solving this memory error, without changing number of parameters? With all the search I did here and on Google I have no clear answser.
Code:
def transform(multilevelDict):
return {"t"+'_'+str(key) : (transform(value) if isinstance(value, dict) else value) for key, value in multilevelDict.items()}
df = pd.read_csv('testingwebsitedata6.csv', sep=';')
df['Element_Count'] = df['Element_Count'].apply((json.loads))
df['Tag_Count'] = df['Tag_Count'].apply((json.loads))
for i in range(len(df['Tag_Count'])):
df['Tag_Count'][i] = transform(df['Tag_Count'][i])
df1 = pd.DataFrame(df['Element_Count'].values.tolist())
df2 = pd.DataFrame(df['Tag_Count'].values.tolist())
df = pd.concat([df.drop('Element_Count', axis=1), df1], axis=1)
df= pd.concat([df.drop('Tag_Count', axis=1), df2], axis=1)
df= df.fillna(0)
df[df.select_dtypes(include=['float64']).columns]= df.select_dtypes(include=['float64']).astype(int)
df
test= ['link', 'document', 'heading', 'form', 'textbox', 'button', 'list', 'listitem', 'img', 'navigation', 'banner', 'main', 'article', 'contentinfo', 'checkbox', 'table', 'rowgroup', 'row', 'cell', 'listbox', 'presentation', 'figure', 'columnheader', 'separator', 'group', 'region', 't_html', 't_head', 't_title', 't_meta', 't_link', 't_script', 't_style', 't_body', 't_a', 't_div', 't_h1', 't_form', 't_label', 't_input', 't_ul', 't_li', 't_i', 't_img', 't_nav', 't_header', 't_span', 't_article', 't_p', 't_footer', 't_h3', 't_br', 't_noscript', 't_em', 't_strong', 't_button', 't_h2', 't_ol', 't_time', 't_center', 't_table', 't_tbody', 't_tr', 't_td', 't_font', 't_select', 't_option', 't_b', 't_figure', 't_figcaption', 't_u', 't_iframe', 't_caption', 't_thead', 't_th', 't_h5', 't_sup', 't_map', 't_area', 't_hr', 't_h4', 't_blockquote', 't_sub', 't_fieldset', 't_legend', 't_pre', 't_main', 't_section', 't_small', 't_tfoot', 't_textarea', 't_inserir', 't_s']
print('test1')
print('\n')
for g in range(0,10000):
# sample
sample_df = stratified_sample(df,test,size=38, keep_index=False)
pathaux = "C://Users//Pedro//Desktop//EscolhasAlgoritmos//Stratified//Stratified_Tests//"
example = "exampleFCUL"
randomnumber = g+1
csv = ".csv"
path = pathaux + '26'+'//'+ example +str(randomnumber) + csv
chosencolumns= ["Uri"]
sample_df.to_csv(path,sep=';', index = False, columns =chosencolumns, header = False)
Stratifed Sampling function used:
def stratified_sample(df, strata, size=None, seed=None, keep_index= True):
'''
It samples data from a pandas dataframe using strata. These functions use
proportionate stratification:
n1 = (N1/N) * n
where:
- n1 is the sample size of stratum 1
- N1 is the population size of stratum 1
- N is the total population size
- n is the sampling size
Parameters
----------
:df: pandas dataframe from which data will be sampled.
:strata: list containing columns that will be used in the stratified sampling.
:size: sampling size. If not informed, a sampling size will be calculated
using Cochran adjusted sampling formula:
cochran_n = (Z**2 * p * q) /e**2
where:
- Z is the z-value. In this case we use 1.96 representing 95%
- p is the estimated proportion of the population which has an
attribute. In this case we use 0.5
- q is 1-p
- e is the margin of error
This formula is adjusted as follows:
adjusted_cochran = cochran_n / 1+((cochran_n -1)/N)
where:
- cochran_n = result of the previous formula
- N is the population size
:seed: sampling seed
:keep_index: if True, it keeps a column with the original population index indicator
Returns
-------
A sampled pandas dataframe based in a set of strata.
Examples
--------
>> df.head()
id sex age city
0 123 M 20 XYZ
1 456 M 25 XYZ
2 789 M 21 YZX
3 987 F 40 ZXY
4 654 M 45 ZXY
...
# This returns a sample stratified by sex and city containing 30% of the size of
# the original data
>> stratified = stratified_sample(df=df, strata=['sex', 'city'], size=0.3)
Requirements
------------
- pandas
- numpy
'''
population = len(df)
size = __smpl_size(population, size)
tmp = df[strata]
tmp['size'] = 1
tmp_grpd = tmp.groupby(strata).count().reset_index()
tmp_grpd['samp_size'] = round(size/population * tmp_grpd['size']).astype(int)
# controlling variable to create the dataframe or append to it
first = True
for i in range(len(tmp_grpd)):
# query generator for each iteration
qry=''
for s in range(len(strata)):
stratum = strata[s]
value = tmp_grpd.iloc[i][stratum]
n = tmp_grpd.iloc[i]['samp_size']
if type(value) == str:
value = "'" + str(value) + "'"
if s != len(strata)-1:
qry = qry + stratum + ' == ' + str(value) +' & '
else:
qry = qry + stratum + ' == ' + str(value)
# final dataframe
if first:
stratified_df = df.query(qry).sample(n=n, random_state=seed).reset_index(drop=(not keep_index))
first = False
else:
tmp_df = df.query(qry).sample(n=n, random_state=seed).reset_index(drop=(not keep_index))
stratified_df = stratified_df.append(tmp_df, ignore_index=True)
return stratified_df
def stratified_sample_report(df, strata, size=None):
'''
Generates a dataframe reporting the counts in each stratum and the counts
for the final sampled dataframe.
Parameters
----------
:df: pandas dataframe from which data will be sampled.
:strata: list containing columns that will be used in the stratified sampling.
:size: sampling size. If not informed, a sampling size will be calculated
using Cochran adjusted sampling formula:
cochran_n = (Z**2 * p * q) /e**2
where:
- Z is the z-value. In this case we use 1.96 representing 95%
- p is the estimated proportion of the population which has an
attribute. In this case we use 0.5
- q is 1-p
- e is the margin of error
This formula is adjusted as follows:
adjusted_cochran = cochran_n / 1+((cochran_n -1)/N)
where:
- cochran_n = result of the previous formula
- N is the population size
Returns
-------
A dataframe reporting the counts in each stratum and the counts
for the final sampled dataframe.
'''
population = len(df)
size = __smpl_size(population, size)
tmp = df[strata]
tmp['size'] = 1
tmp_grpd = tmp.groupby(strata).count().reset_index()
tmp_grpd['samp_size'] = round(size/population * tmp_grpd['size']).astype(int)
return tmp_grpd
def __smpl_size(population, size):
'''
A function to compute the sample size. If not informed, a sampling
size will be calculated using Cochran adjusted sampling formula:
cochran_n = (Z**2 * p * q) /e**2
where:
- Z is the z-value. In this case we use 1.96 representing 95%
- p is the estimated proportion of the population which has an
attribute. In this case we use 0.5
- q is 1-p
- e is the margin of error
This formula is adjusted as follows:
adjusted_cochran = cochran_n / 1+((cochran_n -1)/N)
where:
- cochran_n = result of the previous formula
- N is the population size
Parameters
----------
:population: population size
:size: sample size (default = None)
Returns
-------
Calculated sample size to be used in the functions:
- stratified_sample
- stratified_sample_report
'''
if size is None:
cochran_n = round(((1.96)**2 * 0.5 * 0.5)/ 0.02**2)
n = round(cochran_n/(1+((cochran_n -1) /population)))
elif size >= 0 and size < 1:
n = round(population * size)
elif size < 0:
raise ValueError('Parameter "size" must be an integer or a proportion between 0 and 0.99.')
elif size >= 1:
n = size
return n
(Anything that I have forgot to mention that u feel is important to understand the problem please say and I will edit it in)

Broadcast error in scipy but the arrays are the same shape

I am trying to use the scipy statistical functions,
prior = scipy.stats.truncnorm.pdf(x, a, b,
loc=loc, scale=scale)
I'm getting the following error,
<ipython-input-32-9c0390b95343> in prior(values1, values2)
69 # line 6352 return _norm_pdf(x) / self._delta
70 prior = scipy.stats.truncnorm.pdf(x, a, b,
---> 71 loc=loc, scale=scale)
72
/opt/anaconda3/envs/pymc3/lib/python3.6/site-packages/scipy/stats/_distn_infrastructure.py in pdf(self, x, *args, **kwds)
1669 goodargs = argsreduce(cond, *((x,)+args+(scale,)))
1670 scale, goodargs = goodargs[-1], goodargs[:-1]
-> 1671 place(output, cond, self._pdf(*goodargs) / scale)
1672 if output.ndim == 0:
1673 return output[()]
/opt/anaconda3/envs/pymc3/lib/python3.6/site-packages/scipy/stats/_continuous_distns.py in _pdf(self, x, a, b)
6350
6351 def _pdf(self, x, a, b):
-> 6352 return _norm_pdf(x) / self._delta
6353
6354 def _logpdf(self, x, a, b):
ValueError: operands could not be broadcast together with shapes (4084,) (4100,)
But the arrays are all the same shape. When I put print statements in right before the call to truncnorm.pdf,
print(x.shape, a.shape, b.shape, loc.shape, scale.shape)
prior = scipy.stats.truncnorm.pdf(x, a, b,
loc=loc, scale=scale)
I get,
(4100,) (4100,) (4100,) (4100,) (4100,)
confirming that.
I followed the process of calculating the pdf of the truncated normal in the _continuous_distns.py file to try to recreate it for myself and see if something was happening to mask one of the arrays internally,
import scipy.special as sc
def norm_cdf(x):
return sc.ndtr(x)
def norm_sf(x):
return norm_cdf(-x)
nb = norm_cdf(b)
na = norm_cdf(a)
sb = norm_sf(b)
sa = norm_sf(a)
# line 6345 - 6347, defining self._delta
print(np.where(a > 0,
-(sb - sa),
nb - na).shape)
# line 162 return np.exp(-x**2/2.0) / _norm_pdf_C
# defining _norm_pdf(x)
norm_pdf_C = np.sqrt(2*np.pi)
print((np.exp(-x**2/2.0) / norm_pdf_C).shape)
I get
(4100,)
(4100,)
which are correct and the same shape. So I'm at a loss as to what the problem is. I'd appreciate any suggestions.

" ArityMismatch: Adding expressions with non-matching form arguments () vs ('v_1',) " using FEniCS

I want to solve a continuum mechanics problem thanks to FEniCS. I apply pressure and take into account the weight. But when I add the thermoelasticity component, it doesn't work anymore.
Here is my code :
from dolfin import *
from fenics import *
from ufl import nabla_div
from ufl import as_tensor
import matplotlib.pyplot as plt
import numpy as np
E = Constant(100*10**9)
nu = Constant(0.3)
Lg = 0.01; W = 0.2
mu = E/(2+2*nu)
rho = Constant(2200)
delta = W/Lg
gamma = 0.4*delta**2
beta = 8
lambda_ = (E*nu)/((1+nu)*(1-2*nu))
alpha = 1.2*(10**(-8))
deltaT = Constant(50)
Kt = E*alph*deltaT/(1-2*nu)
g = 9.81
tol = 1E-14
# Create mesh and define function space
mesh = RectangleMesh(Point(-2., 0.),Point(2., 10.), 80, 200)
V = VectorFunctionSpace(mesh, "P", 1)
# Define boundary condition
def clamped_boundary(x, on_boundary):
return on_boundary and x[1] < tol
class UpFace(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and (x[1] > 10 - tol)
ueN = UpFace()
boundaries = MeshFunction("size_t", mesh, mesh.topology().dim()-1, 0)
ueN.mark(boundaries, 1)
ds = Measure("ds", domain=mesh, subdomain_data=boundaries)
bc = DirichletBC(V, Constant((0, 0)), clamped_boundary)
def epsilon(u):
return 0.5*(nabla_grad(u) + nabla_grad(u).T)
def sigma(u):
return (lambda_*nabla_div(u) - Kt)*Identity(d) + (2*mu)*epsilon(u)
# Define variational problem
u = TrialFunction(V)
d = u.geometric_dimension() # space dimension
v = TestFunction(V)
f = Constant((0,-rho*g))
T = Constant((0, 0))
Pr = Constant((0, -2*10**9))
a = inner(sigma(u), epsilon(v))*dx
L = dot(f, v)*dx + dot(T, v)*ds + dot(Pr,v)*ds(1)
# Compute solution
u = Function(V)
solve(a == L, u, bc)
# Plot solution
plot(u, mode="displacement", color= "red")
plt.colorbar(plot(u))
I get this error message :
---------------------------------------------------------------------------
ArityMismatch Traceback (most recent call last)
<ipython-input-54-805d7c5b704f> in <module>
17 # Compute solution
18 u = Function(V)
---> 19 solve(a == L, u, bc)
20
21 # Plot solution
/usr/lib/python3/dist-packages/dolfin/fem/solving.py in solve(*args, **kwargs)
218 # tolerance)
219 elif isinstance(args[0], ufl.classes.Equation):
--> 220 _solve_varproblem(*args, **kwargs)
221
222 # Default case, just call the wrapped C++ solve function
/usr/lib/python3/dist-packages/dolfin/fem/solving.py in _solve_varproblem(*args, **kwargs)
240 # Create problem
241 problem = LinearVariationalProblem(eq.lhs, eq.rhs, u, bcs,
--> 242 form_compiler_parameters=form_compiler_parameters)
243
244 # Create solver and call solve
/usr/lib/python3/dist-packages/dolfin/fem/problem.py in __init__(self, a, L, u, bcs, form_compiler_parameters)
54 else:
55 L = Form(L, form_compiler_parameters=form_compiler_parameters)
---> 56 a = Form(a, form_compiler_parameters=form_compiler_parameters)
57
58 # Initialize C++ base class
/usr/lib/python3/dist-packages/dolfin/fem/form.py in __init__(self, form, **kwargs)
42
43 ufc_form = ffc_jit(form, form_compiler_parameters=form_compiler_parameters,
---> 44 mpi_comm=mesh.mpi_comm())
45 ufc_form = cpp.fem.make_ufc_form(ufc_form[0])
46
/usr/lib/python3/dist-packages/dolfin/jit/jit.py in mpi_jit(*args, **kwargs)
45 # Just call JIT compiler when running in serial
46 if MPI.size(mpi_comm) == 1:
---> 47 return local_jit(*args, **kwargs)
48
49 # Default status (0 == ok, 1 == fail)
/usr/lib/python3/dist-packages/dolfin/jit/jit.py in ffc_jit(ufl_form, form_compiler_parameters)
95 p.update(dict(parameters["form_compiler"]))
96 p.update(form_compiler_parameters or {})
---> 97 return ffc.jit(ufl_form, parameters=p)
98
99
/usr/lib/python3/dist-packages/ffc/jitcompiler.py in jit(ufl_object, parameters, indirect)
215
216 # Inspect cache and generate+build if necessary
--> 217 module = jit_build(ufl_object, module_name, parameters)
218
219 # Raise exception on failure to build or import module
/usr/lib/python3/dist-packages/ffc/jitcompiler.py in jit_build(ufl_object, module_name, parameters)
131 name=module_name,
132 params=params,
--> 133 generate=jit_generate)
134 return module
135
/usr/lib/python3/dist-packages/dijitso/jit.py in jit(jitable, name, params, generate, send, receive, wait)
163 elif generate:
164 # 1) Generate source code
--> 165 header, source, dependencies = generate(jitable, name, signature, params["generator"])
166 # Ensure we got unicode from generate
167 header = as_unicode(header)
/usr/lib/python3/dist-packages/ffc/jitcompiler.py in jit_generate(ufl_object, module_name, signature, parameters)
64
65 code_h, code_c, dependent_ufl_objects = compile_object(ufl_object,
---> 66 prefix=module_name, parameters=parameters, jit=True)
67
68 # Jit compile dependent objects separately,
/usr/lib/python3/dist-packages/ffc/compiler.py in compile_form(forms, object_names, prefix, parameters, jit)
141 """This function generates UFC code for a given UFL form or list of UFL forms."""
142 return compile_ufl_objects(forms, "form", object_names,
--> 143 prefix, parameters, jit)
144
145
/usr/lib/python3/dist-packages/ffc/compiler.py in compile_ufl_objects(ufl_objects, kind, object_names, prefix, parameters, jit)
183 # Stage 1: analysis
184 cpu_time = time()
--> 185 analysis = analyze_ufl_objects(ufl_objects, kind, parameters)
186 _print_timing(1, time() - cpu_time)
187
/usr/lib/python3/dist-packages/ffc/analysis.py in analyze_ufl_objects(ufl_objects, kind, parameters)
88 # Analyze forms
89 form_datas = tuple(_analyze_form(form, parameters)
---> 90 for form in forms)
91
92 # Extract unique elements accross all forms
/usr/lib/python3/dist-packages/ffc/analysis.py in <genexpr>(.0)
88 # Analyze forms
89 form_datas = tuple(_analyze_form(form, parameters)
---> 90 for form in forms)
91
92 # Extract unique elements accross all forms
/usr/lib/python3/dist-packages/ffc/analysis.py in _analyze_form(form, parameters)
172 do_apply_geometry_lowering=True,
173 preserve_geometry_types=(Jacobian,),
--> 174 do_apply_restrictions=True)
175 elif r == "tsfc":
176 try:
/usr/lib/python3/dist-packages/ufl/algorithms/compute_form_data.py in compute_form_data(form, do_apply_function_pullbacks, do_apply_integral_scaling, do_apply_geometry_lowering, preserve_geometry_types, do_apply_default_restrictions, do_apply_restrictions, do_estimate_degrees, do_append_everywhere_integrals, complex_mode)
416 preprocessed_form = remove_complex_nodes(preprocessed_form)
417
--> 418 check_form_arity(preprocessed_form, self.original_form.arguments(), complex_mode) # Currently testing how fast this is
419
420 # TODO: This member is used by unit tests, change the tests to
/usr/lib/python3/dist-packages/ufl/algorithms/check_arities.py in check_form_arity(form, arguments, complex_mode)
175 def check_form_arity(form, arguments, complex_mode=False):
176 for itg in form.integrals():
--> 177 check_integrand_arity(itg.integrand(), arguments, complex_mode)
/usr/lib/python3/dist-packages/ufl/algorithms/check_arities.py in check_integrand_arity(expr, arguments, complex_mode)
157 key=lambda x: (x.number(), x.part())))
158 rules = ArityChecker(arguments)
--> 159 arg_tuples = map_expr_dag(rules, expr, compress=False)
160 args = tuple(a[0] for a in arg_tuples)
161 if args != arguments:
/usr/lib/python3/dist-packages/ufl/corealg/map_dag.py in map_expr_dag(function, expression, compress)
35 Return the result of the final function call.
36 """
---> 37 result, = map_expr_dags(function, [expression], compress=compress)
38 return result
39
/usr/lib/python3/dist-packages/ufl/corealg/map_dag.py in map_expr_dags(function, expressions, compress)
84 r = handlers[v._ufl_typecode_](v)
85 else:
---> 86 r = handlers[v._ufl_typecode_](v, *[vcache[u] for u in v.ufl_operands])
87
88 # Optionally check if r is in rcache, a memory optimization
/usr/lib/python3/dist-packages/ufl/algorithms/check_arities.py in sum(self, o, a, b)
46 def sum(self, o, a, b):
47 if a != b:
---> 48 raise ArityMismatch("Adding expressions with non-matching form arguments {0} vs {1}.".format(_afmt(a), _afmt(b)))
49 return a
50
ArityMismatch: Adding expressions with non-matching form arguments () vs ('v_1',).
When I write this (I remove the Kt from sigma(u)):
def sigma(u):
return (lambda_*nabla_div(u))*Identity(d) + (2*mu)*epsilon(u)
It works perfectly.
In this page (Click here), they try to plot the same kind problem and it works on my computer.
Do you know how to fix it ?
I had exactly the same question and a colleague of mine did figure it out for me. As there is no answer given here, I will try to leave some directions to guide others to the solution. I have not a lot of expertise yet, so please consider that my use of terminology might be a little bit off.
The error of fenics somewhat mislead me into thinking the error is in the definition of the stress term sigma. It is not exactly there. The right handside and the left handside in the solve function are not defined correctly (also shown in the very top of the error code). The term kT*Identity(d) in the stress function sigma, is not dependent on the trialfunction u. It is just multiplied by the testfunction v later (epsilon(v)). Therefore it has to go into the L of the equation of the solver.
Beneath the Link that you shared, the scipt uses the rhs and lhs function to correctly split the equation into a and L.

Why am I getting a memory error with the python statsmodels?

Hi I'm getting a memory error running a Tweedie GLM with statsmodels. I've looked at Python statsmodels: memory error but there is not an answer on that post.
The computer I'm running this on has 64gigs of RAM and eight processors. The shape of the data is (722214, 47).
The below is my code:
formula = 'pp_log ~ C(atfault_model) + C(channel_model) + C(CLded_model) + C(credit_model_52778) + \
C(credit_model_c6) + C(package_model) + C(ds_fp_paid_in_full) + C(ds_pn_prior_insurance) + \
C(ds_ip_advanced_purchase) + C(credit_model_c5) + C(ds_ad_affinity) + C(ds_ak_alliance) + \
C(ds_ly_loyalty_discount) + C(ds_mo_multipolicy) + C(ds_pf_performance) + C(majorvio_model) + \
C(driver_age_model):C(marital_status_model) + C(minorvio_model) + C(multi_unit_model) + \
C(unit_drv_exp_model) + C(Vintiles) + C(safety_course_model) + C(instructor_course_model) + \
C(RATING_CLASS_CODE_MODEL) + C(class_model):C(v_age_model) + C(class_model):C(cc_model)'
y, x = patsy.dmatrices(formula, train, return_type = 'dataframe')
weights = train['coll_eu']
lost_cost_model = smf.GLM(y, x-1, family = sm.families.Tweedie(link = sm.families.links.log, var_power = 1.5), weights = weights)
lost_cost_results = lost_cost_model.fit()
Additional Information:
The memory error is thrown at the following line:
lost_cost_results = lost_cost_model.fit()
The below is the traceback:
MemoryError Traceback (most recent call
last) in
----> 1 lost_cost_results = lost_cost_model.fit()
C:\ProgramData\Anaconda3\lib\site-packages\statsmodels\genmod\generalized_linear_model.py
in fit(self, start_params, maxiter, method, tol, scale, cov_type,
cov_kwds, use_t, full_output, disp, max_start_irls, **kwargs) 1010
return self._fit_irls(start_params=start_params, maxiter=maxiter,
1011 tol=tol, scale=scale,
cov_type=cov_type,
-> 1012 cov_kwds=cov_kwds, use_t=use_t, **kwargs) 1013 else: 1014
self._optim_hessian = kwargs.get('optim_hessian')
C:\ProgramData\Anaconda3\lib\site-packages\statsmodels\genmod\generalized_linear_model.py
in _fit_irls(self, start_params, maxiter, tol, scale, cov_type,
cov_kwds, use_t, **kwargs) 1131 wlsendog,
1132 wlsexog,
-> 1133 self.weights).fit(method=wls_method) 1134 lin_pred = np.dot(self.exog, wls_results.params)
1135 lin_pred += self._offset_exposure
C:\ProgramData\Anaconda3\lib\site-packages\statsmodels\regression_tools.py
in init(self, endog, exog, weights)
47 self.wexog = w_half * exog
48 else:
---> 49 self.wexog = w_half[:, None] * exog
50
51 def fit(self, method='pinv'):
MemoryError:
Add 2:
In the insurance industry, it's standard practice to bring everything in as a categorical variable. Then once decisons are made to on how to smooth out each relativity, change what needs to be changed over to an numerical type and fit a polynomial or spline or whatever....
Once I used what variables I could as numerical, it ran fine...in only 3 minutes. Case closed.

Resources