Can't iterate over multiprocessing.managers.DictProxy. through pytest-paralell but works fine in python.
I found this issue even here https://bugs.python.org/issue9733 but since the managers.py is Read-Only; I cannot do changes there. Has anybody faced this issue earlier ? How can I resolve it ?
test_Run.py
from multiprocessing import Process, Manager
def f(d, l):
d[1] = '1'
d['2'] = 2
d[0.25] = None
l.reverse()
if __name__ == '__main__':
with Manager() as manager:
d = manager.dict()
l = manager.list(range(10))
p = Process(target=f, args=(d, l))
p.start()
p.join()
print(d)
print(l)
If you run the
(venv) [tivo#localhost src]$ python test_run.py
{0.25: None, 1: '1', '2': 2}
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
(venv) [tivo#localhost src]$
EDITED:
If you run this with pytest with the following code.
from multiprocessing import Process, Manager
def test_f():
d, l = {}, []
d[1] = '1'
d['2'] = 2
d[0.25] = None
print(d)
with Manager() as manager:
d = manager.dict()
l = manager.list(range(10))
l.reverse()
print(l)
p = Process(target=f)
p.start()
p.join()
(venv) [tivo#localhost src]$ pytest -v -s test_run.py
collecting ... [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
{0.25: None, 1: '1', '2': 2}
collected 1 item
test_run.py::test_f {0.25: None, 1: '1', '2': 2}
PASSED
(venv) [tivo#localhost src]$
But If you run through pytest along with pytest-paralell package, it throws error
(venv) [tivo#localhost src]$ pytest -v -s --tests-per-worker auto --workers auto test_run.py
===================================================================== test session starts ======================================================================
platform linux -- Python 3.4.4, pytest-4.5.0, py-1.8.0, pluggy-0.11.0 -- /home/tivo/workspace/ServicePortal/autotestscripts/CAT/scripts/ServerQE/brat/venv/bin/python3
cachedir: .pytest_cache
rootdir: /home/tivo/workspace/ServicePortal/autotestscripts/CAT/scripts/ServerQE/brat/src, inifile: pytest.ini
plugins: xdist-1.28.0, remotedata-0.3.1, pipeline-0.3.0, parallel-0.0.9, forked-1.0.2, flake8-1.0.4, cov-2.7.1
collecting ... [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
{0.25: None, 1: '1', '2': 2}
collected 1 item
pytest-parallel: 2 workers (processes), 0 test per worker (thread)
Traceback (most recent call last):
File "/home/tivo/workspace/ServicePortal/autotestscripts/CAT/scripts/ServerQE/brat/venv/bin/pytest", line 10, in <module>
sys.exit(main())
File "/home/tivo/workspace/ServicePortal/autotestscripts/CAT/scripts/ServerQE/brat/venv/lib/python3.4/site-packages/_pytest/config/__init__.py", line 79, in main
return config.hook.pytest_cmdline_main(config=config)
File "/home/tivo/workspace/ServicePortal/autotestscripts/CAT/scripts/ServerQE/brat/venv/lib/python3.4/site-packages/pluggy/hooks.py", line 289, in __call__
return self._hookexec(self, self.get_hookimpls(), kwargs)
File "/home/tivo/workspace/ServicePortal/autotestscripts/CAT/scripts/ServerQE/brat/venv/lib/python3.4/site-packages/pluggy/manager.py", line 68, in _hookexec
return self._inner_hookexec(hook, methods, kwargs)
File "/home/tivo/workspace/ServicePortal/autotestscripts/CAT/scripts/ServerQE/brat/venv/lib/python3.4/site-packages/pluggy/manager.py", line 62, in <lambda>
firstresult=hook.spec.opts.get("firstresult") if hook.spec else False,
File "/home/tivo/workspace/ServicePortal/autotestscripts/CAT/scripts/ServerQE/brat/venv/lib/python3.4/site-packages/pluggy/callers.py", line 208, in _multicall
return outcome.get_result()
File "/home/tivo/workspace/ServicePortal/autotestscripts/CAT/scripts/ServerQE/brat/venv/lib/python3.4/site-packages/pluggy/callers.py", line 80, in get_result
raise ex[1].with_traceback(ex[2])
File "/home/tivo/workspace/ServicePortal/autotestscripts/CAT/scripts/ServerQE/brat/venv/lib/python3.4/site-packages/pluggy/callers.py", line 187, in _multicall
res = hook_impl.function(*args)
File "/home/tivo/workspace/ServicePortal/autotestscripts/CAT/scripts/ServerQE/brat/venv/lib/python3.4/site-packages/_pytest/main.py", line 242, in pytest_cmdline_main
return wrap_session(config, _main)
File "/home/tivo/workspace/ServicePortal/autotestscripts/CAT/scripts/ServerQE/brat/venv/lib/python3.4/site-packages/_pytest/main.py", line 235, in wrap_session
session=session, exitstatus=session.exitstatus
File "/home/tivo/workspace/ServicePortal/autotestscripts/CAT/scripts/ServerQE/brat/venv/lib/python3.4/site-packages/pluggy/hooks.py", line 289, in __call__
return self._hookexec(self, self.get_hookimpls(), kwargs)
File "/home/tivo/workspace/ServicePortal/autotestscripts/CAT/scripts/ServerQE/brat/venv/lib/python3.4/site-packages/pluggy/manager.py", line 68, in _hookexec
return self._inner_hookexec(hook, methods, kwargs)
File "/home/tivo/workspace/ServicePortal/autotestscripts/CAT/scripts/ServerQE/brat/venv/lib/python3.4/site-packages/pluggy/manager.py", line 62, in <lambda>
firstresult=hook.spec.opts.get("firstresult") if hook.spec else False,
File "/home/tivo/workspace/ServicePortal/autotestscripts/CAT/scripts/ServerQE/brat/venv/lib/python3.4/site-packages/pluggy/callers.py", line 203, in _multicall
gen.send(outcome)
File "/home/tivo/workspace/ServicePortal/autotestscripts/CAT/scripts/ServerQE/brat/venv/lib/python3.4/site-packages/_pytest/terminal.py", line 678, in pytest_sessionfinish
self.summary_stats()
File "/home/tivo/workspace/ServicePortal/autotestscripts/CAT/scripts/ServerQE/brat/venv/lib/python3.4/site-packages/_pytest/terminal.py", line 876, in summary_stats
(line, color) = build_summary_stats_line(self.stats)
File "/home/tivo/workspace/ServicePortal/autotestscripts/CAT/scripts/ServerQE/brat/venv/lib/python3.4/site-packages/_pytest/terminal.py", line 1034, in build_summary_stats_line
for found_type in stats:
File "<string>", line 2, in __getitem__
File "/usr/local/lib/python3.4/multiprocessing/managers.py", line 747, in _callmethod
raise convert_to_error(kind, result)
KeyError: 0
(venv) [tivo#localhost src]$
I have the following packages with me
pytest-parallel==0.0.9
pytest-pipeline==0.3.0
Q. What workaround can I do to get this above code PASSED without Error logs ? The issue being the results are not giving me output as to how many test cases are PASSED.
Why is it taking pytest-parallel: 2 workers (processes), 1 test per worker (thread) ? I have provided only one function there !
HINT:
If I add flags --worker 1; this above error does not appear; but usually fails my scripts and hence I am forced to use --tests-per-worker 1 along with it. But paralellism is not present here !
Related
I am just learning about multiprocessing in python. And when I am trying to run the code I receive these problems.
Code
from multiprocessing import Process, cpu_count
def counter1(num):
count = 0
while count < num:
count += 1
def main():
t1_start = perf_counter()
processes = []
for _ in range(4):
p = Process(target=counter1, args=(10000,))
p.start()
processes.append(p)
for process in processes:
process.join()
t1_stop = perf_counter()
print("Elapsed time: {}".format(t1_stop - t1_start))
if __name__ == '__main__':
main()
Problems
Traceback (most recent call last):
File "C:\Users\Женя\AppData\Local\Programs\Python\Python310\lib\code.py", line 90, in runcode
exec(code, self.locals)
File "<input>", line 1, in <module>
File "C:\Program Files\JetBrains\PyCharm 2021.3.3\plugins\python\helpers\pydev\_pydev_bundle\pydev_umd.py", line 198, in runfile
pydev_imports.execfile(filename, global_vars, local_vars) # execute the script
File "C:\Program Files\JetBrains\PyCharm 2021.3.3\plugins\python\helpers\pydev\_pydev_imps\_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "C:/Users/Женя/PycharmProjects/test_loria/mp.py", line 54, in <module>
main()
File "C:/Users/Женя/PycharmProjects/test_loria/mp.py", line 43, in main
p.start()
File "C:\Users\Женя\AppData\Local\Programs\Python\Python310\lib\multiprocessing\process.py", line 121, in start
self._popen = self._Popen(self)
File "C:\Users\Женя\AppData\Local\Programs\Python\Python310\lib\multiprocessing\context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Users\Женя\AppData\Local\Programs\Python\Python310\lib\multiprocessing\context.py", line 327, in _Popen
return Popen(process_obj)
File "C:\Users\Женя\AppData\Local\Programs\Python\Python310\lib\multiprocessing\popen_spawn_win32.py", line 93, in __init__
reduction.dump(process_obj, to_child)
File "C:\Users\Женя\AppData\Local\Programs\Python\Python310\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
_pickle.PicklingError: Can't pickle <function counter1 at 0x000001FBC61EC040>: attribute lookup counter1 on __main__ failed
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Users\Женя\AppData\Local\Programs\Python\Python310\lib\multiprocessing\spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "C:\Users\Женя\AppData\Local\Programs\Python\Python310\lib\multiprocessing\spawn.py", line 126, in _main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
I have no idea what is going on. Because if I try to run the same code in another file, everything will be fine.
After around 3 iterations, the code below produces the error: W tensorflow/core/framework/op_kernel.cc:1745] OP_REQUIRES failed at spacetobatch_op.cc:219 : INVALID_ARGUMENT: padded_shape[1]=106 is not divisible by block_shape[1]=6
This error doesn't always occur (explanation below), could someone please help?
Note: img_batch is a SINGLE element list, which contains the path of just ONE single image. (Same error also appeared when using a TWO element list, each element is a path name for ONE image)
If I rearrange images_path i.e. use sorted(images_path), the error somehow no longer appears, thus completing all iterations. Thinking that this may be a memory issue, I tried using K.clear_session(), but this didn't work.
if image_batch_size != None:
chunked_image_list = list(divide_chunks(images_path, image_batch_size))
else:
chunked_image_list = images_path
img_counter = 0
for img_batch in chunked_image_list:
images = img_batch
if not isinstance(images, np.ndarray):
images = [tools.read(image) for image in images]
images = [
tools.resize_image(image, max_scale=scale, max_size=max_size)
for image in images
]
max_height, max_width = np.array(
[image.shape[:2] for image, scale in images]
).max(axis=0)
scales = [scale for _, scale in images]
images = np.array(
[
tools.pad(image, width=max_width, height=max_height)
for image, _ in images
]
)
images = [compute_input(tools.read(image)) for image in images]
box_groups, TScore, TScoreOrig, LMap, LMapOrig, ncomp, lbl,sts, box_scores = getBoxes(
self.model.predict(np.array(images), **kwargs),
detection_threshold=detection_threshold,
text_threshold=text_threshold,
link_threshold=link_threshold,
size_threshold=size_threshold,
)
Error Details:
W tensorflow/core/framework/op_kernel.cc:1745] OP_REQUIRES failed at spacetobatch_op.cc:219 : INVALID_ARGUMENT: padded_shape[1]=106 is not divisible by block_shape[1]=6
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
--> 865 self.model.predict(np.array(images), **kwargs),
866 detection_threshold=detection_threshold,
867 text_threshold=text_threshold,
/opt/conda/lib/python3.7/site-packages/keras/utils/traceback_utils.py in error_handler(*args, **kwargs)
65 except Exception as e: # pylint: disable=broad-except
66 filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
69 del filtered_tb
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
57 ctx.ensure_initialized()
58 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 59 inputs, attrs, num_outputs)
60 except core._NotOkStatusException as e:
61 if name is not None:
InvalidArgumentError: padded_shape[1]=106 is not divisible by block_shape[1]=6
[[node model_5/basenet.slice5.1/Conv2D/SpaceToBatchND
(defined at /opt/conda/lib/python3.7/site-packages/keras/layers/convolutional.py:238)
]] [Op:__inference_predict_function_29936]
Errors may have originated from an input operation.
Input Source operations connected to node model_5/basenet.slice5.1/Conv2D/SpaceToBatchND:
In[0] model_5/basenet.slice5.0/MaxPool (defined at /opt/conda/lib/python3.7/site-packages/keras/layers/pooling.py:362)
In[1] model_5/basenet.slice5.1/Conv2D/SpaceToBatchND/block_shape:
In[2] model_5/basenet.slice5.1/Conv2D/concat/concat:
Operation defined at: (most recent call last)
>>> File "/opt/conda/lib/python3.7/runpy.py", line 193, in _run_module_as_main
>>> "__main__", mod_spec)
>>>
>>> File "/opt/conda/lib/python3.7/runpy.py", line 85, in _run_code
>>> exec(code, run_globals)
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py", line 16, in <module>
>>> app.launch_new_instance()
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/traitlets/config/application.py", line 846, in launch_instance
>>> app.start()
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/ipykernel/kernelapp.py", line 677, in start
>>> self.io_loop.start()
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/tornado/platform/asyncio.py", line 199, in start
>>> self.asyncio_loop.run_forever()
>>>
>>> File "/opt/conda/lib/python3.7/asyncio/base_events.py", line 541, in run_forever
>>> self._run_once()
>>>
>>> File "/opt/conda/lib/python3.7/asyncio/base_events.py", line 1786, in _run_once
>>> handle._run()
>>>
>>> File "/opt/conda/lib/python3.7/asyncio/events.py", line 88, in _run
>>> self._context.run(self._callback, *self._args)
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/ipykernel/kernelbase.py", line 457, in dispatch_queue
>>> await self.process_one()
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/ipykernel/kernelbase.py", line 446, in process_one
>>> await dispatch(*args)
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/ipykernel/kernelbase.py", line 353, in dispatch_shell
>>> await result
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/ipykernel/kernelbase.py", line 648, in execute_request
>>> reply_content = await reply_content
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/ipykernel/ipkernel.py", line 353, in do_execute
>>> res = shell.run_cell(code, store_history=store_history, silent=silent)
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/ipykernel/zmqshell.py", line 533, in run_cell
>>> return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 2915, in run_cell
>>> raw_cell, store_history, silent, shell_futures)
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 2960, in _run_cell
>>> return runner(coro)
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/IPython/core/async_helpers.py", line 78, in _pseudo_sync_runner
>>> coro.send(None)
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3186, in run_cell_async
>>> interactivity=interactivity, compiler=compiler, result=result)
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3377, in run_ast_nodes
>>> if (await self.run_code(code, result, async_=asy)):
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3457, in run_code
>>> exec(code_obj, self.user_global_ns, self.user_ns)
>>>
>>> File "/tmp/ipykernel_17724/3483212994.py", line 3, in <module>
>>> detector1, TScore, TScoreOrig, LMap, LMapOrig, ncomp, labels, stats, box_score = detection_custom.Detector(weights='clovaai_general',load_from_torch=True).detect(images_path=image_list,detection_threshold=0.7 , text_threshold = 0.4, link_threshold=0.3, size_threshold=0,scale=20, image_batch_size = 1, data_df = data, result_folder = result_folder, **kwargs)
>>>
>>> File "/home/jupyter/detection_custom.py", line 865, in detect
>>> self.model.predict(np.array(images), **kwargs),
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/keras/utils/traceback_utils.py", line 64, in error_handler
>>> return fn(*args, **kwargs)
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/keras/engine/training.py", line 1789, in predict
>>> tmp_batch_outputs = self.predict_function(iterator)
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/keras/engine/training.py", line 1621, in predict_function
>>> return step_function(self, iterator)
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/keras/engine/training.py", line 1611, in step_function
>>> outputs = model.distribute_strategy.run(run_step, args=(data,))
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/keras/engine/training.py", line 1604, in run_step
>>> outputs = model.predict_step(data)
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/keras/engine/training.py", line 1572, in predict_step
>>> return self(x, training=False)
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/keras/utils/traceback_utils.py", line 64, in error_handler
>>> return fn(*args, **kwargs)
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/keras/engine/base_layer.py", line 1083, in __call__
>>> outputs = call_fn(inputs, *args, **kwargs)
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/keras/utils/traceback_utils.py", line 92, in error_handler
>>> return fn(*args, **kwargs)
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/keras/engine/functional.py", line 452, in call
>>> inputs, training=training, mask=mask)
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/keras/engine/functional.py", line 589, in _run_internal_graph
>>> outputs = node.layer(*args, **kwargs)
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/keras/utils/traceback_utils.py", line 64, in error_handler
>>> return fn(*args, **kwargs)
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/keras/engine/base_layer.py", line 1083, in __call__
>>> outputs = call_fn(inputs, *args, **kwargs)
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/keras/utils/traceback_utils.py", line 92, in error_handler
>>> return fn(*args, **kwargs)
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/keras/layers/convolutional.py", line 246, in call
>>> outputs = self.convolution_op(inputs, self.kernel)
>>>
>>> File "/opt/conda/lib/python3.7/site-packages/keras/layers/convolutional.py", line 238, in convolution_op
>>> name=self.__class__.__name__)
>>>
The following code works fine
[process_data(item, data_frame_list[item]) for item in data_frame_list if data_frame_list[item].shape[0] > 5]
I'm trying to convert this code to run in parallel
pool_obj = multiprocessing.Pool()
[pool_obj.map(process_data,item, data_frame_list[item]) for item in data_frame_list if data_frame_list[item].shape[0] > 5]
This results in errors
Traceback (most recent call last):
File "/home/pyuser/PycharmProjects/project_sample/testyard_2.py", line 425, in <module>
[pool_obj.map(process_data,item, data_frame_list[item]) for item in data_frame_list if data_frame_list[item].shape[0] > 5]
File "/home/pyuser/PycharmProjects/project_sample/testyard_2.py", line 425, in <listcomp>
[pool_obj.map(process_data,item, data_frame_list[item]) for item in data_frame_list if data_frame_list[item].shape[0] > 5]
File "/usr/lib/python3.8/multiprocessing/pool.py", line 364, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "/usr/lib/python3.8/multiprocessing/pool.py", line 485, in _map_async
result = MapResult(self, chunksize, len(iterable), callback,
File "/usr/lib/python3.8/multiprocessing/pool.py", line 797, in __init__
if chunksize <= 0:
File "/home/pyuser/PycharmProjects/project_sample/venv/lib/python3.8/site-packages/pandas/core/ops/common.py", line 69, in new_method
return method(self, other)
File "/home/pyuser/PycharmProjects/project_sample/venv/lib/python3.8/site-packages/pandas/core/arraylike.py", line 44, in __le__
return self._cmp_method(other, operator.le)
File "/home/pyuser/PycharmProjects/project_sample/venv/lib/python3.8/site-packages/pandas/core/frame.py", line 6849, in _cmp_method
new_data = self._dispatch_frame_op(other, op, axis=axis)
File "/home/pyuser/PycharmProjects/project_sample/venv/lib/python3.8/site-packages/pandas/core/frame.py", line 6888, in _dispatch_frame_op
bm = self._mgr.apply(array_op, right=right)
File "/home/pyuser/PycharmProjects/project_sample/venv/lib/python3.8/site-packages/pandas/core/internals/managers.py", line 325, in apply
applied = b.apply(f, **kwargs)
File "/home/pyuser/PycharmProjects/project_sample/venv/lib/python3.8/site-packages/pandas/core/internals/blocks.py", line 382, in apply
result = func(self.values, **kwargs)
File "/home/pyuser/PycharmProjects/project_sample/venv/lib/python3.8/site-packages/pandas/core/ops/array_ops.py", line 284, in comparison_op
res_values = comp_method_OBJECT_ARRAY(op, lvalues, rvalues)
File "/home/pyuser/PycharmProjects/project_sample/venv/lib/python3.8/site-packages/pandas/core/ops/array_ops.py", line 73, in comp_method_OBJECT_ARRAY
result = libops.scalar_compare(x.ravel(), y, op)
File "pandas/_libs/ops.pyx", line 107, in pandas._libs.ops.scalar_compare
TypeError: '<=' not supported between instances of 'str' and 'int'
I'm not able to work out what is incorrect with what I've done. Could I please request some guidance?
Used a different library that has easier usage. All is working now.
from joblib import Parallel, delayed
import multiprocessing
Parallel(n_jobs=multiprocessing.cpu_count())(delayed(process_data)(item, data_frame_list[item]) for item in data_frame_list if data_frame_list[item].shape[0] > 5)
Using cffi, I'm trying to call some Rust function in Python code.
Here is the python code:
from cffi import FFI
def rust(solution):
ffi = FFI()
lib = ffi.dlopen("./libnorm.so")
ffi.cdef('float norm(float**)')
return lib.norm(solution)
solution = [[1, 1], [1, 2], [1, 3], [1, 4], [1, 5], [1, 6]]
print(rust(solution))
and this is the rust code:
#! [crate_type = "dylib"]
#[no_mangle]
pub extern fn norm(array: Vec< Vec<f64>>) -> f64 {
return array.len() as f64
}
I compiled the lib with:
rustc --crate-type=dylib norm.rs
But when I run my python script, I get this stack:
Traceback (most recent call last):
File "/usr/lib64/python3.5/site-packages/cffi/cparser.py", line 260, in _parse
ast = _get_parser().parse(csource)
File "/usr/lib/python3.5/site-packages/pycparser/c_parser.py", line 151, in parse
debug=debuglevel)
File "/usr/lib/python3.5/site-packages/pycparser/ply/yacc.py", line 331, in parse
return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)
File "/usr/lib/python3.5/site-packages/pycparser/ply/yacc.py", line 1181, in parseopt_notrack
tok = call_errorfunc(self.errorfunc, errtoken, self)
File "/usr/lib/python3.5/site-packages/pycparser/ply/yacc.py", line 193, in call_errorfunc
r = errorfunc(token)
File "/usr/lib/python3.5/site-packages/pycparser/c_parser.py", line 1723, in p_error
self._parse_error('At end of input', self.clex.filename)
File "/usr/lib/python3.5/site-packages/pycparser/plyparser.py", line 55, in _parse_error
raise ParseError("%s: %s" % (coord, msg))
pycparser.plyparser.ParseError: : At end of input
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "main.py", line 35, in <module>
print(rust(solution))
File "main.py", line 28, in rust
ffi.cdef('float norm(float**)')
File "/usr/lib64/python3.5/site-packages/cffi/api.py", line 105, in cdef
self._cdef(csource, override=override, packed=packed)
File "/usr/lib64/python3.5/site-packages/cffi/api.py", line 119, in _cdef
self._parser.parse(csource, override=override, **options)
File "/usr/lib64/python3.5/site-packages/cffi/cparser.py", line 299, in parse
self._internal_parse(csource)
File "/usr/lib64/python3.5/site-packages/cffi/cparser.py", line 304, in _internal_parse
ast, macros, csource = self._parse(csource)
File "/usr/lib64/python3.5/site-packages/cffi/cparser.py", line 262, in _parse
self.convert_pycparser_error(e, csource)
File "/usr/lib64/python3.5/site-packages/cffi/cparser.py", line 291, in convert_pycparser_error
raise api.CDefError(msg)
cffi.api.CDefError: parse error
: At end of input
Am I missing something?
I tried several version for: ffi.cdef('float norm(float**)'). Like ffi.cdef('float norm(Vec< Vec<float>>)') or ffi.cdef('float norm(float[][])')
But it doesn't work too.
Thanks for future replying.
I am updating a Pandas Data Frame.
The script looks up for a product. If the product is already in data frame, it just updates it columns with accumulated new values.
If the product is not there it creates a new set of rows to insert the values of the product.
Code
for m in range(0,len(product_sales_price)):
if exact_match(str(sales_record[n-1]),str(product_sales_price[m]))==True:
total_product_daily_sales = counter * product_sales_price[m+1]
'''
print(total_product_daily_sales)
'''
total_product_daily_net_profit = total_product_daily_sales *.1
print(counter)
print(product_sales_price[m+1])
print(total_product_daily_sales)
print(total_product_daily_net_profit)
print(m)
print(product_sales_price[m])
if (product_revenue_and_net_profit_df.ix[:,0] == product_sales_price[m]).any() == True :
product_revenue_and_net_profit_df.ix[:,:][(product_revenue_and_net_profit_df.ix[:,
0] == product_sales_price[m])] = [
product_revenue_and_net_profit_df.ix[:,0][(product_revenue_and_net_profit_df.ix[:,
0] == product_sales_price[m])],
product_revenue_and_net_profit_df.ix[:,1][(product_revenue_and_net_profit_df.ix[:,
0] == product_sales_price[m])]+counter,
product_revenue_and_net_profit_df.ix[:,2][(product_revenue_and_net_profit_df.ix[:,
0] == product_sales_price[
m])]+total_product_daily_sales,product_revenue_and_net_profit_df.ix[:,
3][(product_revenue_and_net_profit_df.ix[:,0] == product_sales_price[
m])]+total_product_daily_net_profit]
else:
product_revenue_and_net_profit_df.ix[(product_revenue_and_net_profit_df.shape[0]+1),:] = (
[product_sales_price[m],counter,total_product_daily_sales,
total_product_daily_net_profit]
)
Run Time
<sale_frequency time (in seconds):
1
423.44
423.44
42.344
0
Bushwacker Dodge Pocket Style Fender Flare Set of 4
Traceback (most recent call last):
File "32\scriptStarter.py", line 120, in <module>
File "C:\Python Projects\Amazon-Sales\amazon_analysis.py", line 162, in <module>
print (timeit.timeit(fn + "()", "from __main__ import "+fn, number=1))
File "C:\Users\onthego\Anaconda3\lib\timeit.py", line 219, in timeit
return Timer(stmt, setup, timer).timeit(number)
File "C:\Users\onthego\Anaconda3\lib\timeit.py", line 184, in timeit
timing = self.inner(it, self.timer)
File "<timeit-src>", line 6, in inner
File "C:\Python Projects\Amazon-Sales\amazon_analysis.py", line 91, in sale_frequency
m])]+total_product_daily_net_profit]
File "C:\Users\onthego\Anaconda3\lib\site-packages\pandas\core\frame.py", line 2122, in __setitem__
self._setitem_array(key, value)
File "C:\Users\onthego\Anaconda3\lib\site-packages\pandas\core\frame.py", line 2142, in _setitem_array
self.ix._setitem_with_indexer(indexer, value)
File "C:\Users\onthego\Anaconda3\lib\site-packages\pandas\core\indexing.py", line 448, in _setitem_with_indexer
elif np.array(value).ndim == 2:
File "C:\Users\onthego\Anaconda3\lib\site-packages\pandas\core\series.py", line 521, in __getitem__
result = self.index.get_value(self, key)
File "C:\Users\onthego\Anaconda3\lib\site-packages\pandas\core\index.py", line 1595, in get_value
return self._engine.get_value(s, k)
File "pandas\index.pyx", line 100, in pandas.index.IndexEngine.get_value (pandas\index.c:3113)
File "pandas\index.pyx", line 108, in pandas.index.IndexEngine.get_value (pandas\index.c:2844)
File "pandas\index.pyx", line 154, in pandas.index.IndexEngine.get_loc (pandas\index.c:3704)
File "pandas\hashtable.pyx", line 375, in pandas.hashtable.Int64HashTable.get_item (pandas\hashtable.c:7224)
File "pandas\hashtable.pyx", line 381, in pandas.hashtable.Int64HashTable.get_item (pandas\hashtable.c:7162)
KeyError: 0
>>>
>>>
>>>