Cannot delete from datastore emulator - python-3.x

I have this simple piece of code:
from google.cloud import datastore
import requests
ds_c = datastore.Client(_http=requests.Session)
for entity in ds_c.query(kind='Kind').fetch():
ds_c.delete(entity.key)
I am getting:
google.api_core.exceptions.ResourceExhausted: 429 Received message larger than max (4207799 vs. 4194304)
Full stack trace:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/dist-packages/google/api_core/grpc_helpers.py", line 57, in error_remapped_callable
return callable_(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/grpc/_channel.py", line 826, in __call__
return _end_unary_response_blocking(state, call, False, None)
File "/usr/local/lib/python3.7/dist-packages/grpc/_channel.py", line 729, in _end_unary_response_blocking
raise _InactiveRpcError(state)
grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
status = StatusCode.RESOURCE_EXHAUSTED
details = "Received message larger than max (4207799 vs. 4194304)"
debug_error_string = "{"created":"#1589308786.798030838","description":"Received message larger than max (4207799 vs. 4194304)","file":"src/core/ext/filters/message_size/message_size_filter.cc","file_line":191,"grpc_status":8}"
>
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/vagrant/.local/bin/fab", line 8, in <module>
sys.exit(program.run())
File "/home/vagrant/.local/lib/python3.7/site-packages/invoke/program.py", line 384, in run
self.execute()
File "/home/vagrant/.local/lib/python3.7/site-packages/invoke/program.py", line 566, in execute
executor.execute(*self.tasks)
File "/home/vagrant/.local/lib/python3.7/site-packages/invoke/executor.py", line 129, in execute
result = call.task(*args, **call.kwargs)
File "/home/vagrant/.local/lib/python3.7/site-packages/invoke/tasks.py", line 127, in __call__
result = self.body(*args, **kwargs)
File "/vagrant/cyclone/fabfile.py", line 34, in clean_test_datastore
for entity in ds_c.query(kind=kind).fetch():
File "/usr/local/lib/python3.7/dist-packages/google/api_core/page_iterator.py", line 212, in _items_iter
for page in self._page_iter(increment=False):
File "/usr/local/lib/python3.7/dist-packages/google/api_core/page_iterator.py", line 249, in _page_iter
page = self._next_page()
File "/usr/local/lib/python3.7/dist-packages/google/cloud/datastore/query.py", line 537, in _next_page
self._query.project, partition_id, read_options, query=query_pb
File "/usr/local/lib/python3.7/dist-packages/google/cloud/datastore_v1/gapic/datastore_client.py", line 384, in run_query
request, retry=retry, timeout=timeout, metadata=metadata
File "/usr/local/lib/python3.7/dist-packages/google/api_core/gapic_v1/method.py", line 143, in __call__
return wrapped_func(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/google/api_core/retry.py", line 286, in retry_wrapped_func
on_error=on_error,
File "/usr/local/lib/python3.7/dist-packages/google/api_core/retry.py", line 184, in retry_target
return target()
File "/usr/local/lib/python3.7/dist-packages/google/api_core/timeout.py", line 214, in func_with_timeout
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/google/api_core/grpc_helpers.py", line 59, in error_remapped_callable
six.raise_from(exceptions.from_grpc_error(exc), exc)
File "<string>", line 3, in raise_from
google.api_core.exceptions.ResourceExhausted: 429 Received message larger than max (4207799 vs. 4194304)

The easiest work around would be to fetch only the keys you are looking for, e.g. for entity in ds_c.query(kind='Kind').keys_only().fetch(): It's probably a good idea to add a limit to your fetch to reduce the size of the response, e.g. for entity in ds_c.query(kind='Kind').keys_only().fetch(limit=500):

Related

Azure-quantum: problems with jobs submission

I use pyquil for azure quantum, and submit jobs with run_batch method of AzureQuantumComputer class. For batches with up to 10 circuits there are no problems, but larger batches result in an error below.
Traceback (most recent call last):
File "C:\Users\Enter\anaconda3\envs\qiskit_env\lib\site-packages\msrest\serialization.py",
line 1436, in _deserialize
found_value = key_extractor(attr, attr_desc, data)
File "C:\Users\Enter\anaconda3\envs\qiskit_env\lib\site-packages\msrest\serialization.py",
line 1180, in rest_key_extractor
return working_data.get(key)
AttributeError: 'str' object has no attribute 'get'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\Enter\anaconda3\envs\qiskit_env\lib\site-packages\msrest\serialization.py",
line 1509, in failsafe_deserialize
return self(target_obj, data, content_type=content_type)
File "C:\Users\Enter\anaconda3\envs\qiskit_env\lib\site-packages\msrest\serialization.py",
line 1376, in __call__
return self._deserialize(target_obj, data)
File "C:\Users\Enter\anaconda3\envs\qiskit_env\lib\site-packages\msrest\serialization.py",
line 1454, in _deserialize
raise_with_traceback(DeserializationError, msg, err)
File "C:\Users\Enter\anaconda3\envs\qiskit_env\lib\site-packages\msrest\exceptions.py", line
51, in raise_with_traceback
raise error.with_traceback(exc_traceback)
File "C:\Users\Enter\anaconda3\envs\qiskit_env\lib\site-packages\msrest\serialization.py",
line 1436, in _deserialize
found_value = key_extractor(attr, attr_desc, data)
File "C:\Users\Enter\anaconda3\envs\qiskit_env\lib\site-packages\msrest\serialization.py",
line 1180, in rest_key_extractor
return working_data.get(key)
azure.core.exceptions.DeserializationError: ("Unable to deserialize to object: type,
AttributeError: 'str' object has no attribute 'get'", AttributeError("'str' object has no
attribute 'get'"))
Traceback (most recent call last):
File "C:\Users\Enter\PycharmProjects\QREM_pipline_development\pyquil_experiments.py", line
209, in <module>
unprocessed_results_now = pyquil_utilities.run_batches_parametric(backend_name=backend_name,
File
"C:\Users\Enter\PycharmProjects\QREM_SECRET_DEVELOPMENT_LOC\backends_support\pyquil\pyquil_utiliti
es.py", line 415, in run_batches_parametric
results = backend_instance.run_batch(executable,
File "C:\Users\Enter\anaconda3\envs\qiskit_env\lib\site-
packages\pyquil_for_azure_quantum_init_.py", line 141, in run_batch
return qam.run_batch(executable, memory_map)
File "C:\Users\Enter\anaconda3\envs\qiskit_env\lib\site-
packages\pyquil_for_azure_quantum_init_.py", line 336, in run_batch
job = self._target.submit(
File "C:\Users\Enter\anaconda3\envs\qiskit_env\lib\site-
packages\azure\quantum\target\rigetti\target.py", line 183, in submit
return super().submit(input_data, name, input_params, **kwargs)
File "C:\Users\Enter\anaconda3\envs\qiskit_env\lib\site-
packages\azure\quantum\target\target.py", line 141, in submit
return Job.from_input_data(
File "C:\Users\Enter\anaconda3\envs\qiskit_env\lib\site-
packages\azure\quantum\job\base_job.py",
line 117, in from_input_data
return cls.from_storage_uri(
File "C:\Users\Enter\anaconda3\envs\qiskit_env\lib\site-
packages\azure\quantum\job\base_job.py",
line 207, in from_storage_uri
job.submit()
File "C:\Users\Enter\anaconda3\envs\qiskit_env\lib\site-packages\azure\quantum\job\job.py",
line 45, in submit
job = self.workspace.submit_job(self)
File "C:\Users\Enter\anaconda3\envs\qiskit_env\lib\site-packages\azure\quantum\workspace.py",
line 265, in submit_job
details = client.create(
File "C:\Users\Enter\anaconda3\envs\qiskit_env\lib\site-
packages\azure\core\tracing\decorator.py", line 78, in wrapper_use_tracer
return func(*args, **kwargs)
File "C:\Users\Enter\anaconda3\envs\qiskit_env\lib\site-
packages\azure\quantum_client\operations_jobs_operations.py", line 387, in create
raise HttpResponseError(response=response, model=error)
azure.core.exceptions.HttpResponseError: Operation returned an invalid status 'Forbidden'
Content:
403 Forbidden
403 Forbidden
Microsoft-Azure-Application-Gateway/v2
I tried to run different circuits, it seems to only depend on the number of circuits in a batch, not the structure of a circuit. Programs are compiled to native quil locally.

Error when using joblib in python with undetected chromedriver

when i use (self.links is an array of strings)
Parallel(n_jobs=2)(delayed(self.buybysize)(link) for link in self.links)
with this function
def buybysize(self, link):
browser = self.browser()
//other commented stuff
def browser(self):
options = uc.ChromeOptions()
options.user_data_dir = self.user_data_dir
options.add_argument(self.add_argument)
driver = uc.Chrome(options=options)
return driver
i get the error
oblib.externals.loky.process_executor._RemoteTraceback:
Traceback (most recent call last):
File "/home/Me/PycharmProjects/zalando_buy/venv/lib/python3.8/site-packages/joblib/externals/loky/process_executor.py", line 436, in _process_worker
r = call_item()
File "/home/Me/PycharmProjects/zalando_buy/venv/lib/python3.8/site-packages/joblib/externals/loky/process_executor.py", line 288, in __call__
return self.fn(*self.args, **self.kwargs)
File "/home/Me/PycharmProjects/zalando_buy/venv/lib/python3.8/site-packages/joblib/_parallel_backends.py", line 595, in __call__
return self.func(*args, **kwargs)
File "/home/Me/PycharmProjects/zalando_buy/venv/lib/python3.8/site-packages/joblib/parallel.py", line 262, in __call__
return [func(*args, **kwargs)
File "/home/Me/PycharmProjects/zalando_buy/venv/lib/python3.8/site-packages/joblib/parallel.py", line 262, in <listcomp>
return [func(*args, **kwargs)
File "/home/Me/PycharmProjects/zalando_buy/Zalando.py", line 91, in buybysize
browser = self.browser()
File "/home/Me/PycharmProjects/zalando_buy/Zalando.py", line 38, in browser
driver = uc.Chrome(options=options)
File "/home/Me/PycharmProjects/zalando_buy/venv/lib/python3.8/site-packages/undetected_chromedriver/__init__.py", line 388, in __init__
self.browser_pid = start_detached(
File "/home/Me/PycharmProjects/zalando_buy/venv/lib/python3.8/site-packages/undetected_chromedriver/dprocess.py", line 30, in start_detached
multiprocessing.Process(
File "/usr/lib/python3.8/multiprocessing/process.py", line 121, in start
self._popen = self._Popen(self)
File "/usr/lib/python3.8/multiprocessing/context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "/home/Me/PycharmProjects/zalando_buy/venv/lib/python3.8/site-packages/joblib/externals/loky/backend/process.py", line 39, in _Popen
return Popen(process_obj)
File "/home/Me/PycharmProjects/zalando_buy/venv/lib/python3.8/site-packages/joblib/externals/loky/backend/popen_loky_posix.py", line 52, in __init__
self._launch(process_obj)
File "/home/Me/PycharmProjects/zalando_buy/venv/lib/python3.8/site-packages/joblib/externals/loky/backend/popen_loky_posix.py", line 157, in _launch
pid = fork_exec(cmd_python, self._fds, env=process_obj.env)
AttributeError: 'Process' object has no attribute 'env'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/Me/PycharmProjects/zalando_buy/Start.py", line 4, in <module>
class Start:
File "/home/Me/PycharmProjects/zalando_buy/Start.py", line 7, in Start
zalando.startshopping()
File "/home/Me/PycharmProjects/zalando_buy/Zalando.py", line 42, in startshopping
self.openlinks()
File "/home/Me/PycharmProjects/zalando_buy/Zalando.py", line 50, in openlinks
Parallel(n_jobs=2)(delayed(self.buybysize)(link) for link in self.links)
File "/home/Me/PycharmProjects/zalando_buy/venv/lib/python3.8/site-packages/joblib/parallel.py", line 1056, in __call__
self.retrieve()
File "/home/Me/PycharmProjects/zalando_buy/venv/lib/python3.8/site-packages/joblib/parallel.py", line 935, in retrieve
self._output.extend(job.get(timeout=self.timeout))
File "/home/Me/PycharmProjects/zalando_buy/venv/lib/python3.8/site-packages/joblib/_parallel_backends.py", line 542, in wrap_future_result
return future.result(timeout=timeout)
File "/usr/lib/python3.8/concurrent/futures/_base.py", line 444, in result
return self.__get_result()
File "/usr/lib/python3.8/concurrent/futures/_base.py", line 389, in __get_result
raise self._exception
AttributeError: 'Process' object has no attribute 'env'
Process finished with exit code 1
For me it looks like there are instabilities because undetected chromedriver maybe uses multiprocessing already, but isnt there any way where i can open multiple Browsers with UC and process each iteration parallel?
Edit: i debugged and the error appears after trying to execute this line:
driver = uc.Chrome(options=options)

Error on odoo 12 after installation and db creation

I have to start using odoo 12 on my job but I can't start using it, I have already 5 days searching on google to find an answer.
I would be very grateful if you could please help me, with some of your wisdom :)
After I install odoo on my computer and visit localhost:8069 for first time it asks me to create my database but after I do this it doesn't load the login page instead it gives me an 500 internal server error with this console log every time I refresh the page:
2020-10-12 18:31:41,068 21425 ERROR prueba werkzeug: Error on request:
Traceback (most recent call last):
File "/opt/odoo/odoo12/entvirt/lib/python3.8/site-packages/werkzeug/serving.py", line 205, in run_wsgi
execute(self.server.app)
File "/opt/odoo/odoo12/entvirt/lib/python3.8/site-packages/werkzeug/serving.py", line 193, in execute
application_iter = app(environ, start_response)
File "/opt/odoo/odoo12/entvirt/src/odoo/service/server.py", line 434, in app
return self.app(e, s)
File "/opt/odoo/odoo12/entvirt/src/odoo/service/wsgi_server.py", line 142, in application
return application_unproxied(environ, start_response)
File "/opt/odoo/odoo12/entvirt/src/odoo/service/wsgi_server.py", line 117, in application_unproxied
result = odoo.http.root(environ, start_response)
File "/opt/odoo/odoo12/entvirt/src/odoo/http.py", line 1320, in __call__
return self.dispatch(environ, start_response)
File "/opt/odoo/odoo12/entvirt/src/odoo/http.py", line 1293, in __call__
return self.app(environ, start_wrapped)
File "/opt/odoo/odoo12/entvirt/lib/python3.8/site-packages/werkzeug/wsgi.py", line 599, in __call__
return self.app(environ, start_response)
File "/opt/odoo/odoo12/entvirt/src/odoo/http.py", line 1488, in dispatch
result = ir_http._dispatch()
File "/opt/odoo/odoo12/entvirt/src/addons/web_editor/models/ir_http.py", line 22, in _dispatch
return super(IrHttp, cls)._dispatch()
File "/opt/odoo/odoo12/entvirt/src/odoo/addons/base/models/ir_http.py", line 212, in _dispatch
return cls._handle_exception(e)
File "/opt/odoo/odoo12/entvirt/src/odoo/addons/base/models/ir_http.py", line 182, in _handle_exception
return request._handle_exception(exception)
File "/opt/odoo/odoo12/entvirt/src/odoo/http.py", line 776, in _handle_exception
return super(HttpRequest, self)._handle_exception(exception)
File "/opt/odoo/odoo12/entvirt/src/odoo/http.py", line 314, in _handle_exception
raise pycompat.reraise(type(exception), exception, sys.exc_info()[2])
File "/opt/odoo/odoo12/entvirt/src/odoo/tools/pycompat.py", line 87, in reraise
raise value
File "/opt/odoo/odoo12/entvirt/src/odoo/addons/base/models/ir_http.py", line 208, in _dispatch
result = request.dispatch()
File "/opt/odoo/odoo12/entvirt/src/odoo/http.py", line 835, in dispatch
r = self._call_function(**self.params)
File "/opt/odoo/odoo12/entvirt/src/odoo/http.py", line 346, in _call_function
return checked_call(self.db, *args, **kwargs)
File "/opt/odoo/odoo12/entvirt/src/odoo/service/model.py", line 98, in wrapper
return f(dbname, *args, **kwargs)
File "/opt/odoo/odoo12/entvirt/src/odoo/http.py", line 342, in checked_call
result.flatten()
File "/opt/odoo/odoo12/entvirt/src/odoo/http.py", line 1270, in flatten
self.response.append(self.render())
File "/opt/odoo/odoo12/entvirt/src/odoo/http.py", line 1263, in render
return env["ir.ui.view"].render_template(self.template, self.qcontext)
File "/opt/odoo/odoo12/entvirt/src/odoo/addons/base/models/ir_ui_view.py", line 1324, in render_template
return self.browse(self.get_view_id(template)).render(values, engine)
File "/opt/odoo/odoo12/entvirt/src/addons/web_editor/models/ir_ui_view.py", line 29, in render
return super(IrUiView, self).render(values=values, engine=engine, minimal_qcontext=minimal_qcontext)
File "/opt/odoo/odoo12/entvirt/src/odoo/addons/base/models/ir_ui_view.py", line 1333, in render
return self.env[engine].render(self.id, qcontext)
File "/opt/odoo/odoo12/entvirt/src/odoo/addons/base/models/ir_qweb.py", line 59, in render
result = super(IrQWeb, self).render(id_or_xml_id, values=values, **context)
File "/opt/odoo/odoo12/entvirt/src/odoo/addons/base/models/qweb.py", line 275, in render
self.compile(template, options)(self, body.append, values or {})
File "<decorator-gen-54>", line 2, in compile
File "/opt/odoo/odoo12/entvirt/src/odoo/tools/cache.py", line 93, in lookup
value = d[key] = self.method(*args, **kwargs)
File "/opt/odoo/odoo12/entvirt/src/odoo/addons/base/models/ir_qweb.py", line 114, in compile
return super(IrQWeb, self).compile(id_or_xml_id, options=options)
File "/opt/odoo/odoo12/entvirt/src/odoo/addons/base/models/qweb.py", line 338, in compile
raise QWebException("Error when compiling AST", e, path, node and etree.tostring(node[0], encoding='unicode'), name)
odoo.addons.base.models.qweb.QWebException: Name node can't be used with 'None' constant
Traceback (most recent call last):
File "/opt/odoo/odoo12/entvirt/src/odoo/tools/cache.py", line 88, in lookup
r = d[key]
File "/opt/odoo/odoo12/entvirt/src/odoo/tools/func.py", line 69, in wrapper
return func(self, *args, **kwargs)
File "/opt/odoo/odoo12/entvirt/src/odoo/tools/lru.py", line 44, in __getitem__
a = self.d[obj].me
KeyError: ('ir.qweb', <function IrQWeb.compile at 0x7f3b32b84280>, 173, ('en_US', None, None, None, None, None))
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/opt/odoo/odoo12/entvirt/src/odoo/addons/base/models/qweb.py", line 330, in compile
unsafe_eval(compile(astmod, '<template>', 'exec'), ns)
ValueError: Name node can't be used with 'None' constant
Error when compiling AST
ValueError: Name node can't be used with 'None' constant
Template: 173
Path: /templates/t/t/form/input[2]
Node: <input type="hidden" name="redirect" t-att-value="redirect"/> - - -
This is a problem with python 3.8.5.Try applying this fix https://github.com/odoo/odoo/pull/55305/commits/5baf0f2130b8d27d50aa60b54d68a5fc57b127a0

python-telegram-bot doesn't work on Python 3.x

I'm trying to make a program that uses python-telegram-bot, and I need to use Python 3 for that. But when I try to launch it using Python 3, I get some error I don't quite understand. The same happens when I use the built-in examples. Could somebody explain what it means? The precise output of the program follows.
2016-06-29 06:17:44,260 - telegram.ext.updater - ERROR - unhandled exception
Traceback (most recent call last):
File "/usr/local/lib/python3.4/dist-packages/telegram/ext/updater.py", line 105, in _thread_wrapper
target(*args, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/telegram/ext/updater.py", line 216, in _start_polling
self._bootstrap(bootstrap_retries, clean=clean, webhook_url='')
File "/usr/local/lib/python3.4/dist-packages/telegram/ext/updater.py", line 320, in _bootstrap
self.bot.setWebhook(webhook_url=webhook_url, certificate=cert)
File "/usr/local/lib/python3.4/dist-packages/telegram/bot.py", line 121, in decorator
result = func(self, *args, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/telegram/bot.py", line 1263, in setWebhook
result = request.post(url, data, timeout=kwargs.get('timeout'))
File "/usr/local/lib/python3.4/dist-packages/telegram/utils/request.py", line 174, in post
headers={'Content-Type': 'application/json'})
File "/usr/local/lib/python3.4/dist-packages/telegram/utils/request.py", line 100, in _request_wrapper
resp = _get_con_pool().request(*args, **kwargs)
File "/usr/lib/python3/dist-packages/urllib3/request.py", line 72, in request
**urlopen_kw)
File "/usr/lib/python3/dist-packages/urllib3/request.py", line 135, in request_encode_body
**urlopen_kw)
TypeError: urlopen() got multiple values for keyword argument 'body'
Exception in thread updater:
Traceback (most recent call last):
File "/usr/lib/python3.4/threading.py", line 920, in _bootstrap_inner
self.run()
File "/usr/lib/python3.4/threading.py", line 868, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.4/dist-packages/telegram/ext/updater.py", line 105, in _thread_wrapper
target(*args, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/telegram/ext/updater.py", line 216, in _start_polling
self._bootstrap(bootstrap_retries, clean=clean, webhook_url='')
File "/usr/local/lib/python3.4/dist-packages/telegram/ext/updater.py", line 320, in _bootstrap
self.bot.setWebhook(webhook_url=webhook_url, certificate=cert)
File "/usr/local/lib/python3.4/dist-packages/telegram/bot.py", line 121, in decorator
result = func(self, *args, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/telegram/bot.py", line 1263, in setWebhook
result = request.post(url, data, timeout=kwargs.get('timeout'))
File "/usr/local/lib/python3.4/dist-packages/telegram/utils/request.py", line 174, in post
headers={'Content-Type': 'application/json'})
File "/usr/local/lib/python3.4/dist-packages/telegram/utils/request.py", line 100, in _request_wrapper
resp = _get_con_pool().request(*args, **kwargs)
File "/usr/lib/python3/dist-packages/urllib3/request.py", line 72, in request
**urlopen_kw)
File "/usr/lib/python3/dist-packages/urllib3/request.py", line 135, in request_encode_body
**urlopen_kw)
TypeError: urlopen() got multiple values for keyword argument 'body'
2016-06-29 06:17:45,248 - telegram.ext.dispatcher - CRITICAL - stopping due to exception in another thread
For this to work, install urllib3>=1.10 from the package manager.

SyntaxError raised when using joblib with lxml on python 3.5

I am trying to parallelize the tasks of correcting texts on many documents with Python, so I naturally found "joblib". I want each task to be to correct a given document. Here is the structure of the code:
if __name__ == '__main__':
lexicon = build_compact_lexicon()
from joblib import Parallel, delayed
import multiprocessing
num_cores = multiprocessing.cpu_count()
results = Parallel(n_jobs=num_cores)(delayed(find_errors)('GDL', i, 1, lexicon) for i in range(1798, 1820))
I am using the function find_errors summed up here :
def find_errors(newspaper, year, month, lexicon):
# parse the input newspaper text data using etree parser from LXML
# detect errors in the text
return found_errors_type1, found_errors_type2, found_errors_type3
This does raise me a few errors
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/site-packages/joblib/parallel.py", line 130, in __call__
return self.func(*args, **kwargs)
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/site-packages/joblib/parallel.py", line 72, in __call__
return [func(*args, **kwargs) for func, args, kwargs in self.items]
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/site-packages/joblib/parallel.py", line 72, in <listcomp>
return [func(*args, **kwargs) for func, args, kwargs in self.items]
File "hellowordParallel.py", line 85, in find_errors
tree = etree.parse(xml_file_path)
File "src/lxml/lxml.etree.pyx", line 3427, in lxml.etree.parse (src/lxml/lxml.etree.c:79801)
File "src/lxml/parser.pxi", line 1805, in lxml.etree._parseDocument (src/lxml/lxml.etree.c:116293)
TypeError: cannot parse from 'NoneType'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/tokenize.py", line 392, in find_cookie
line_string = line.decode('utf-8')
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xb0 in position 24: invalid start byte
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/site-packages/joblib/parallel.py", line 139, in __call__
tb_offset=1)
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/site-packages/joblib/format_stack.py", line 373, in format_exc
frames = format_records(records)
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/site-packages/joblib/format_stack.py", line 274, in format_records
for token in generate_tokens(linereader):
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/tokenize.py", line 514, in _tokenize
line = readline()
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/site-packages/joblib/format_stack.py", line 265, in linereader
line = getline(file, lnum[0])
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/linecache.py", line 16, in getline
lines = getlines(filename, module_globals)
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/linecache.py", line 47, in getlines
return updatecache(filename, module_globals)
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/linecache.py", line 136, in updatecache
with tokenize.open(fullname) as fp:
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/tokenize.py", line 456, in open
encoding, lines = detect_encoding(buffer.readline)
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/tokenize.py", line 433, in detect_encoding
encoding = find_cookie(first)
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/tokenize.py", line 397, in find_cookie
raise SyntaxError(msg)
File "<string>", line None
SyntaxError: invalid or missing encoding declaration for '/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/site-packages/lxml/etree.so'
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "hellowordParallel.py", line 160, in <module>
results = Parallel(n_jobs=num_cores)(delayed(find_errors)('GDL', i, 1, lexicon) for i in range(1798, 1820))
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/site-packages/joblib/parallel.py", line 810, in __call__
self.retrieve()
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/site-packages/joblib/parallel.py", line 727, in retrieve
self._output.extend(job.get())
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/multiprocessing/pool.py", line 608, in get
raise self._value
SyntaxError: invalid or missing encoding declaration for '/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/site-packages/lxml/etree.so'
I don't understand if this is due do something related with configs or if my function doesn't fit in a parallel implementation... (I guess it should...)
Did it happen to some of you before?
Hope my question is clear and there is enough information for someone to give me some help!

Resources