I'm trying to run Luke for inference on multiple gpus using DataParallel but I'm encountering an error that I can't seem to resolve. Can you help ?
Here is my code
from transformers import LukeTokenizer, LukeForEntitySpanClassification
import torch
luke_model = LukeForEntitySpanClassification.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003")
#Getting inputs (type : transformers.tokenization_utils_base.BatchEncoding)
inputs = []
for i in tqdm(range(10)):
input_filepath = df["input_filepath"].iloc[i]
handle = open(input_filepath,'rb')
input_tensor = pickle.load(handle)
inputs.append(input_tensor)
device_ids = [0,1,2,3]
model= torch.nn.DataParallel(luke_model)
model.to("cuda")
replicas = nn.parallel.replicate(model,device_ids)
inputs_dp = nn.parallel.scatter(inputs[:4], device_ids)
outputs = nn.parallel.parallel_apply(replicas, inputs_dp)`
The error I get is :
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<command-1863732679336681> in <module>
21
22 inputs_dp = nn.parallel.scatter(inputs[:4], device_ids)
---> 23 outputs = nn.parallel.parallel_apply(replicas, inputs_dp)
/databricks/python/lib/python3.8/site-packages/torch/nn/parallel/parallel_apply.py in parallel_apply(modules, inputs, kwargs_tup, devices)
84 output = results[i]
85 if isinstance(output, ExceptionWrapper):
---> 86 output.reraise()
87 outputs.append(output)
88 return outputs
/databricks/python/lib/python3.8/site-packages/torch/_utils.py in reraise(self)
432 # instantiate since we don't know how to
433 raise RuntimeError(msg) from None
--> 434 raise exception
435
436
AttributeError: Caught AttributeError in replica 0 on device 0.
Original Traceback (most recent call last):
File "/databricks/python/lib/python3.8/site-packages/torch/nn/parallel/parallel_apply.py", line 61, in _worker
output = module(*input, **kwargs)
File "/databricks/python/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/databricks/python/lib/python3.8/site-packages/torch/nn/parallel/data_parallel.py", line 168, in forward
outputs = self.parallel_apply(replicas, inputs, kwargs)
File "/databricks/python/lib/python3.8/site-packages/torch/nn/parallel/data_parallel.py", line 178, in parallel_apply
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
File "/databricks/python/lib/python3.8/site-packages/torch/nn/parallel/parallel_apply.py", line 86, in parallel_apply
output.reraise()
File "/databricks/python/lib/python3.8/site-packages/torch/_utils.py", line 434, in reraise
raise exception
AttributeError: Caught AttributeError in replica 0 on device 0.
Original Traceback (most recent call last):
File "/databricks/python/lib/python3.8/site-packages/transformers/tokenization_utils_base.py", line 250, in __getattr__
return self.data[item]
KeyError: 'size'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/databricks/python/lib/python3.8/site-packages/torch/nn/parallel/parallel_apply.py", line 61, in _worker
output = module(*input, **kwargs)
File "/databricks/python/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/databricks/python/lib/python3.8/site-packages/transformers/models/luke/modeling_luke.py", line 1583, in forward
outputs = self.luke(
File "/databricks/python/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/databricks/python/lib/python3.8/site-packages/transformers/models/luke/modeling_luke.py", line 977, in forward
input_shape = input_ids.size()
File "/databricks/python/lib/python3.8/site-packages/transformers/tokenization_utils_base.py", line 252, in __getattr__
raise AttributeError
AttributeError
I tried adding
class MyDataParallel(nn.DataParallel):
def __getattr__(self, name):
return getattr(self.module, name)
But I get : RecursionError: maximum recursion depth exceeded while calling a Python object
Thanks in advance!
Related
The code runs for several iterations and throws the following error.
My Dataset
class Dataset(Dataset):
'Characterizes a dataset for PyTorch'
def __init__(self, input_feature_paths, target_feature_folder) -> None:
self.input_feature_paths = input_feature_paths
self.target_feature_folder = target_feature_folder
def __len__(self):
#return sum(1 for _ in self.input_feature_paths)
return len(self.input_feature_paths)
def __getitem__(self, index) -> None:
input_feature_path = self.input_feature_paths[index]
input_feature = load(input_feature_path, map_location='cpu')
target_feature_path = self.target_feature_folder / input_feature_path.parts[-1]
target_feature = load(target_feature_path, map_location='cpu')
return input_feature.to(dtype=torch.float64), target_feature.to(dtype=torch.float64)
I set dtype torch float64 because it throws the same error while writing on the tensorboard summary writer.
Error Stack
Traceback (most recent call last):
File "student_audio_feature_extractor.py", line 178, in <module>
train(dt, input_frame)
File "student_audio_feature_extractor.py", line 164, in train
model, train_loss = train_step(model, train_loader, optimizer, criterion)
File "student_audio_feature_extractor.py", line 80, in train_step
for input_feature, target_feature in train_loader:
File "/home/visge/miniconda3/envs/zk_torch/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 628, in __next__
data = self._next_data()
File "/home/visge/miniconda3/envs/zk_torch/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 1313, in _next_data
return self._process_data(data)
File "/home/visge/miniconda3/envs/zk_torch/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 1359, in _process_data
data.reraise()
File "/home/visge/miniconda3/envs/zk_torch/lib/python3.8/site-packages/torch/_utils.py", line 543, in reraise
raise exception
RuntimeError: Caught RuntimeError in DataLoader worker process 4.
Original Traceback (most recent call last):
File "/home/visge/miniconda3/envs/zk_torch/lib/python3.8/site-packages/torch/utils/data/_utils/worker.py", line 302, in _worker_loop
data = fetcher.fetch(index)
File "/home/visge/miniconda3/envs/zk_torch/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py", line 61, in fetch
return self.collate_fn(data)
File "/home/visge/miniconda3/envs/zk_torch/lib/python3.8/site-packages/torch/utils/data/_utils/collate.py", line 265, in default_collate
return collate(batch, collate_fn_map=default_collate_fn_map)
File "/home/visge/miniconda3/envs/zk_torch/lib/python3.8/site-packages/torch/utils/data/_utils/collate.py", line 143, in collate
return [collate(samples, collate_fn_map=collate_fn_map) for samples in transposed] # Backwards compatibility.
File "/home/visge/miniconda3/envs/zk_torch/lib/python3.8/site-packages/torch/utils/data/_utils/collate.py", line 143, in <listcomp>
return [collate(samples, collate_fn_map=collate_fn_map) for samples in transposed] # Backwards compatibility.
File "/home/visge/miniconda3/envs/zk_torch/lib/python3.8/site-packages/torch/utils/data/_utils/collate.py", line 120, in collate
return collate_fn_map[elem_type](batch, collate_fn_map=collate_fn_map)
File "/home/visge/miniconda3/envs/zk_torch/lib/python3.8/site-packages/torch/utils/data/_utils/collate.py", line 162, in collate_tensor_fn
out = elem.new(storage).resize_(len(batch), *list(elem.size()))
RuntimeError: Trying to resize storage that is not resizable
I had a tensor of shape [] that's why it throws this error i changed it and it works now.
I'm having this problem which I'm unable to solve.
Has anyone had the same issue?
I'm using windows 10
Tensorflow 2
This is the command that I ran :
python test.py --model_architecture ds_cnn --model_size_info 5 64 10 4 2 2 64 3 3 1 1 64 3 3 1 1 64 3 3 1 1 64 3 3 1 1 --dct_coefficient_count 10 --window_size_ms 40 --window_stride_ms 20 --checkpoint ../Pretrained_models/DS_CNN/DS_CNN_S/ckpt/ds_cnn_0.94_ckpt
Untarring speech_commands_v0.02.tar.gz...
Running testing on validation set...Traceback (most recent call last):
File "C:\Users\x\anaconda3\envs\newenvt\lib\site-packages\tensorflow\python\util\dispatch.py",
line 206, in wrapper
return target(*args, **kwargs)
File "C:\Users\x\anaconda3\envs\newenvt\lib\site-packages\tensorflow\python\ops\math_ops.py",
line 1838, in tensor_not_equals
return gen_math_ops.not_equal(self, other, incompatible_shape_error=False)
File "C:\Users\x\anaconda3\envs\newenvt\lib\site-
packages\tensorflow\python\ops\gen_math_ops.py", line 6573, in not_equal
ctx=_ctx)
File "C:\Users\x\anaconda3\envs\newenvt\lib\site-
packages\tensorflow\python\ops\gen_math_ops.py", line 6601, in not_equal_eager_fallback
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [])
File "C:\Users\x\anaconda3\envs\newenvt\lib\site-packages\tensorflow\python\eager\execute.py",
line 280, in args_to_matching_eager
ret = [ops.convert_to_tensor(t, dtype, ctx=ctx) for t in l]
File "C:\Users\x\anaconda3\envs\newenvt\lib\site-packages\tensorflow\python\eager\execute.py",
line 280, in
ret = [ops.convert_to_tensor(t, dtype, ctx=ctx) for t in l]
File "C:\Users\x\anaconda3\envs\newenvt\lib\site-
packages\tensorflow\python\profiler\trace.py", line 163, in wrapped
return func(*args, **kwargs)
File "C:\Users\x\anaconda3\envs\newenvt\lib\site-packages\tensorflow\python\framework\ops.py",
line 1566, in convert_to_tensor
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
File "C:\Users\x\anaconda3\envs\newenvt\lib\site-
packages\tensorflow\python\framework\constant_op.py", line 339, in
_constant_tensor_conversion_function
return constant(v, dtype=dtype, name=name)
File "C:\Users\x\anaconda3\envs\newenvt\lib\site-
packages\tensorflow\python\framework\constant_op.py", line 265, in constant
allow_broadcast=True)
File "C:\Users\x\anaconda3\envs\newenvt\lib\site-
packages\tensorflow\python\framework\constant_op.py", line 276, in _constant_impl
return _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
File "C:\Users\x\anaconda3\envs\newenvt\lib\site-
packages\tensorflow\python\framework\constant_op.py", line 301, in _constant_eager_impl
t = convert_to_eager_tensor(value, ctx, type)
File "C:\Users\x\anaconda3\envs\newenvt\lib\site-
packages\tensorflow\python\framework\constant_op.py", line 98, in convert_to_eager_tensor
return ops.EagerTensor(value, ctx.device_name, type)
ValueError: TypeError: object of type 'RaggedTensor' has no len()
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "test.py", line 182, in
test()
File "test.py", line 48, in test
val_data = audio_processor.get_data(audio_processor.Modes.VALIDATION).batch(FLAGS.batch_size)
File "C:\Users\x\Dropbox\Documents\x\Coding\KWS\tflu-kws-cortex-m\Training\data.py", line
190, in get_data
use_background = (self.background_data != []) and (mode == AudioProcessor.Modes.TRAINING)
File "C:\Users\x\anaconda3\envs\newenvt\lib\site-
packages\tensorflow\python\util\dispatch.py", line 210, in wrapper
result = dispatch(wrapper, args, kwargs)
File "C:\Users\x\anaconda3\envs\newenvt\lib\site-
packages\tensorflow\python\util\dispatch.py", line 122, in dispatch
result = dispatcher.handle(args, kwargs)
File "C:\Users\x\anaconda3\envs\newenvt\lib\site-
packages\tensorflow\python\ops\ragged\ragged_dispatch.py", line 219, in handle
ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(y))
File "C:\Users\x\anaconda3\envs\newenvt\lib\site-
packages\tensorflow\python\ops\ragged\ragged_tensor_shape.py", line 470, in
broadcast_dynamic_shape
shape_x = shape_x.broadcast_dimension(axis, shape_y.dimension_size(axis))
File "C:\Users\x\anaconda3\envs\newenvt\lib\site-
packages\tensorflow\python\ops\ragged\ragged_tensor_shape.py", line 351, in
broadcast_dimension
condition, data=broadcast_err, summarize=10)
File "C:\Users\x\anaconda3\envs\newenvt\lib\site-
packages\tensorflow\python\util\dispatch.py", line 206, in wrapper
return target(*args, **kwargs)
File "C:\Users\x\anaconda3\envs\newenvt\lib\site-
packages\tensorflow\python\util\tf_should_use.py", line 247, in wrapped
return _add_should_use_warning(fn(*args, **kwargs),
File "C:\Users\x\anaconda3\envs\newenvt\lib\site-
packages\tensorflow\python\ops\control_flow_ops.py", line 164, in Assert
(condition, "\n".join(data_str)))
tensorflow.python.framework.errors_impl.InvalidArgumentError: Expected 'tf.Tensor(False,
shape=(), dtype=bool)' to be true. Summarized data: b'Unable to broadcast: dimension
size mismatch in dimension'
1
b'lengths='
0
b'dim_size='
1522930, 988891, 980062, 960000, 978488, 960000`
Thank you
The issue was TensorFlow compatibility. Downgrading from TensorFlow 2.5 to 2.3 fixes the problem.
This is the case of using Keras ImageDataGenerator with .flow_from_directory, wrapping it with tf.data.Dataset.from_generator(...). The dataset failed in any attempt to iterate through it.
Error summary:
InvalidArgumentError: TypeError: endswith first arg must be bytes or a tuple of bytes, not str
Code snippet:
import tensorflow as tf # version 2.1.0
DATA_URL = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz'
flowers_root_path = tf.keras.utils.get_file(origin=DATA_URL, fname='flower_photos', untar=True)
img_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255, rotation_range=20)
gen = img_gen.flow_from_directory(flowers_root_path)
ds = tf.data.Dataset.from_generator(
# lambda: gen, # this works
img_gen.flow_from_directory, args=[flowers_root_path], # this failed.
output_types=(tf.float32, tf.float32),
output_shapes=([32,256,256,3], [32,5])
)
it = iter(ds)
batch = next(it)
print(batch)
Using "lambda: gen" looks ok. Any idea why?
Full Stack trace:
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/context.py in execution_mode(mode)
1896 ctx.executor = executor_new
-> 1897 yield
1898 finally:
10 frames
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/data/ops/iterator_ops.py in _next_internal(self)
658 output_types=self._flat_output_types,
--> 659 output_shapes=self._flat_output_shapes)
660
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/gen_dataset_ops.py in iterator_get_next_sync(iterator, output_types, output_shapes, name)
2478 except _core._NotOkStatusException as e:
-> 2479 _ops.raise_from_not_ok_status(e, name)
2480 # Add nodes to the TensorFlow graph.
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/ops.py in raise_from_not_ok_status(e, name)
6605 # pylint: disable=protected-access
-> 6606 six.raise_from(core._status_to_exception(e.code, message), None)
6607 # pylint: enable=protected-access
/usr/local/lib/python3.6/dist-packages/six.py in raise_from(value, from_value)
InvalidArgumentError: TypeError: endswith first arg must be bytes or a tuple of bytes, not str
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 673, in get_iterator
return self._iterators[iterator_id]
KeyError: 0
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/script_ops.py", line 236, in __call__
ret = func(*args)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 789, in generator_py_func
values = next(generator_state.get_iterator(iterator_id))
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 675, in get_iterator
iterator = iter(self._generator(*self._args.pop(iterator_id)))
File "/usr/local/lib/python3.6/dist-packages/keras_preprocessing/image/image_data_generator.py", line 540, in flow_from_directory
interpolation=interpolation
File "/usr/local/lib/python3.6/dist-packages/keras_preprocessing/image/directory_iterator.py", line 126, in __init__
classes, filenames = res.get()
File "/usr/lib/python3.6/multiprocessing/pool.py", line 644, in get
raise self._value
File "/usr/lib/python3.6/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/usr/local/lib/python3.6/dist-packages/keras_preprocessing/image/utils.py", line 216, in _list_valid_filenames_in_directory
for root, fname in valid_files:
File "/usr/local/lib/python3.6/dist-packages/keras_preprocessing/image/utils.py", line 172, in _iter_valid_files
if fname.lower().endswith('.tiff'):
TypeError: endswith first arg must be bytes or a tuple of bytes, not str
[[{{node PyFunc}}]] [Op:IteratorGetNextSync]
During handling of the above exception, another exception occurred:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-56-a2623f5ab104> in <module>()
1 it = iter(ds)
----> 2 batch = next(it)
3 print(batch)
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/data/ops/iterator_ops.py in __next__(self)
628
629 def __next__(self): # For Python 3 compatibility
--> 630 return self.next()
631
632 def _next_internal(self):
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/data/ops/iterator_ops.py in next(self)
672 """Returns a nested structure of `Tensor`s containing the next element."""
673 try:
--> 674 return self._next_internal()
675 except errors.OutOfRangeError:
676 raise StopIteration
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/data/ops/iterator_ops.py in _next_internal(self)
663 return self._element_spec._from_compatible_tensor_list(ret) # pylint: disable=protected-access
664 except AttributeError:
--> 665 return structure.from_compatible_tensor_list(self._element_spec, ret)
666
667 #property
/usr/lib/python3.6/contextlib.py in __exit__(self, type, value, traceback)
97 value = type()
98 try:
---> 99 self.gen.throw(type, value, traceback)
100 except StopIteration as exc:
101 # Suppress StopIteration *unless* it's the same exception that
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/context.py in execution_mode(mode)
1898 finally:
1899 ctx.executor = executor_old
-> 1900 executor_new.wait()
1901
1902
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/executor.py in wait(self)
65 def wait(self):
66 """Waits for ops dispatched in this executor to finish."""
---> 67 pywrap_tensorflow.TFE_ExecutorWaitForAllPendingNodes(self._handle)
68
69 def clear_error(self):
InvalidArgumentError: TypeError: endswith first arg must be bytes or a tuple of bytes, not str
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 673, in get_iterator
return self._iterators[iterator_id]
KeyError: 0
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/script_ops.py", line 236, in __call__
ret = func(*args)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 789, in generator_py_func
values = next(generator_state.get_iterator(iterator_id))
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 675, in get_iterator
iterator = iter(self._generator(*self._args.pop(iterator_id)))
File "/usr/local/lib/python3.6/dist-packages/keras_preprocessing/image/image_data_generator.py", line 540, in flow_from_directory
interpolation=interpolation
File "/usr/local/lib/python3.6/dist-packages/keras_preprocessing/image/directory_iterator.py", line 126, in __init__
classes, filenames = res.get()
File "/usr/lib/python3.6/multiprocessing/pool.py", line 644, in get
raise self._value
File "/usr/lib/python3.6/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/usr/local/lib/python3.6/dist-packages/keras_preprocessing/image/utils.py", line 216, in _list_valid_filenames_in_directory
for root, fname in valid_files:
File "/usr/local/lib/python3.6/dist-packages/keras_preprocessing/image/utils.py", line 172, in _iter_valid_files
if fname.lower().endswith('.tiff'):
TypeError: endswith first arg must be bytes or a tuple of bytes, not str
[[{{node PyFunc}}]]
As per this Stack Overflow Answer, you can make your code to work properly by replacing
gen = img_gen.flow_from_directory(flowers_root_path)
with
def Gen():
gen = img_gen.flow_from_directory(flowers_root_path)
for (x,y) in gen:
yield (x,y)
Complete working code is shown below:
import tensorflow as tf # version 2.1.0
DATA_URL = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz'
flowers_root_path = tf.keras.utils.get_file(origin=DATA_URL, fname='flower_photos', untar=True)
img_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255, rotation_range=20)
def Gen():
gen = img_gen.flow_from_directory(flowers_root_path)
for (x,y) in gen:
yield (x,y)
ds = tf.data.Dataset.from_generator(
Gen, output_types=(tf.float32, tf.float32), output_shapes=([32,256,256,3], [32,5]))
it = iter(ds)
batch = next(it)
print(batch)
Also, please find the Github Gist with the working code.
running keras gives me following error:
Using TensorFlow backend.
ERROR:root:Internal Python error in the inspect module.
Below is the traceback from this internal error.
ERROR:root:Internal Python error in the inspect module.
Below is the traceback from this internal error.
Traceback (most recent call last):
File "c:\users\chetan garg\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\pywrap_tensorflow.py", line 58, in <module>
from tensorflow.python.pywrap_tensorflow_internal import *
File "c:\users\chetan garg\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\pywrap_tensorflow_internal.py", line 28, in <module>
_pywrap_tensorflow_internal = swig_import_helper()
File "c:\users\chetan garg\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\pywrap_tensorflow_internal.py", line 24, in swig_import_helper
_mod = imp.load_module('_pywrap_tensorflow_internal', fp, pathname, description)
File "c:\users\chetan garg\appdata\local\programs\python\python36\lib\imp.py", line 243, in load_module
return load_dynamic(name, filename, file)
File "c:\users\chetan garg\appdata\local\programs\python\python36\lib\imp.py", line 343, in load_dynamic
return _load(spec)
ImportError: DLL load failed: The specified module could not be found.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "c:\users\chetan garg\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py", line 2034, in showtraceback
stb = value._render_traceback_()
AttributeError: 'ImportError' object has no attribute '_render_traceback_'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "c:\users\chetan garg\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py", line 3242, in run_ast_nodes
if (await self.run_code(code, result, async_=asy)):
File "c:\users\chetan garg\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py", line 3336, in run_code
self.showtraceback(running_compiled_code=True)
File "c:\users\chetan garg\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py", line 2037, in showtraceback
value, tb, tb_offset=tb_offset)
File "c:\users\chetan garg\appdata\local\programs\python\python36\lib\site-packages\IPython\core\ultratb.py", line 1418, in structured_traceback
self, etype, value, tb, tb_offset, number_of_lines_of_context)
File "c:\users\chetan garg\appdata\local\programs\python\python36\lib\site-packages\IPython\core\ultratb.py", line 1318, in structured_traceback
self, etype, value, tb, tb_offset, number_of_lines_of_context
File "c:\users\chetan garg\appdata\local\programs\python\python36\lib\site-packages\IPython\core\ultratb.py", line 1186, in structured_traceback
formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__)
TypeError: must be str, not list
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "c:\users\chetan garg\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py", line 2034, in showtraceback
stb = value._render_traceback_()
AttributeError: 'TypeError' object has no attribute '_render_traceback_'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "c:\users\chetan garg\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\pywrap_tensorflow.py", line 58, in <module>
from tensorflow.python.pywrap_tensorflow_internal import *
File "c:\users\chetan garg\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\pywrap_tensorflow_internal.py", line 28, in <module>
_pywrap_tensorflow_internal = swig_import_helper()
File "c:\users\chetan garg\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\pywrap_tensorflow_internal.py", line 24, in swig_import_helper
_mod = imp.load_module('_pywrap_tensorflow_internal', fp, pathname, description)
File "c:\users\chetan garg\appdata\local\programs\python\python36\lib\imp.py", line 243, in load_module
return load_dynamic(name, filename, file)
File "c:\users\chetan garg\appdata\local\programs\python\python36\lib\imp.py", line 343, in load_dynamic
return _load(spec)
ImportError: DLL load failed: The specified module could not be found.
Failed to load the native TensorFlow runtime.
See https://www.tensorflow.org/install/errors
for some common reasons and solutions. Include the entire stack trace
above this error message when asking for help.
---------------------------------------------------------------------------
ImportError Traceback (most recent call last)
c:\users\chetan garg\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\pywrap_tensorflow.py in <module>
57
---> 58 from tensorflow.python.pywrap_tensorflow_internal import *
59 from tensorflow.python.pywrap_tensorflow_internal import __version__
c:\users\chetan garg\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\pywrap_tensorflow_internal.py in <module>
27 return _mod
---> 28 _pywrap_tensorflow_internal = swig_import_helper()
29 del swig_import_helper
c:\users\chetan garg\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\pywrap_tensorflow_internal.py in swig_import_helper()
23 try:
---> 24 _mod = imp.load_module('_pywrap_tensorflow_internal', fp, pathname, description)
25 finally:
c:\users\chetan garg\appdata\local\programs\python\python36\lib\imp.py in load_module(name, file, filename, details)
242 else:
--> 243 return load_dynamic(name, filename, file)
244 elif type_ == PKG_DIRECTORY:
c:\users\chetan garg\appdata\local\programs\python\python36\lib\imp.py in load_dynamic(name, path, file)
342 name=name, loader=loader, origin=path)
--> 343 return _load(spec)
344
ImportError: DLL load failed: The specified module could not be found.
During handling of the above exception, another exception occurred
During handling of the above exception, another exception occurred:
AttributeError Traceback (most recent call last)
c:\users\chetan garg\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py in showtraceback(self, exc_tuple, filename, tb_offset, exception_only, running_compiled_code)
2033 # in the engines. This should return a list of strings.
-> 2034 stb = value._render_traceback_()
2035 except Exception:
AttributeError: 'ImportError' object has no attribute '_render_traceback_'
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
c:\users\chetan garg\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py in run_code(self, code_obj, result, async_)
3334 if result is not None:
3335 result.error_in_exec = sys.exc_info()[1]
-> 3336 self.showtraceback(running_compiled_code=True)
3337 else:
3338 outflag = False
c:\users\chetan garg\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py in showtraceback(self, exc_tuple, filename, tb_offset, exception_only, running_compiled_code)
2035 except Exception:
2036 stb = self.InteractiveTB.structured_traceback(etype,
-> 2037 value, tb, tb_offset=tb_offset)
2038
2039 self._showtraceback(etype, value, stb)
c:\users\chetan garg\appdata\local\programs\python\python36\lib\site-packages\IPython\core\ultratb.py in structured_traceback(self, etype, value, tb, tb_offset, number_of_lines_of_context)
1416 self.tb = tb
1417 return FormattedTB.structured_traceback(
-> 1418 self, etype, value, tb, tb_offset, number_of_lines_of_context)
1419
1420
c:\users\chetan garg\appdata\local\programs\python\python36\lib\site-packages\IPython\core\ultratb.py in structured_traceback(self, etype, value, tb, tb_offset, number_of_lines_of_context)
1316 # Verbose modes need a full traceback
1317 return VerboseTB.structured_traceback(
-> 1318 self, etype, value, tb, tb_offset, number_of_lines_of_context
1319 )
1320 elif mode == 'Minimal':
c:\users\chetan garg\appdata\local\programs\python\python36\lib\site-packages\IPython\core\ultratb.py in structured_traceback(self, etype, evalue, etb, tb_offset, number_of_lines_of_context)
1184 exception = self.get_parts_of_chained_exception(evalue)
1185 if exception:
-> 1186 formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__)
1187 etype, evalue, etb = exception
1188 else:
TypeError: must be str, not list
n_pts = 100
Try uninstall and install TensorFlow. If you use conda:
conda uninstall tensorflow
conda uninstall keras
conda install tensorflow
conda install keras
Next time, it is better to provide the code you run, not just error. In that case, people can better help you.
I am trying to parallelize the tasks of correcting texts on many documents with Python, so I naturally found "joblib". I want each task to be to correct a given document. Here is the structure of the code:
if __name__ == '__main__':
lexicon = build_compact_lexicon()
from joblib import Parallel, delayed
import multiprocessing
num_cores = multiprocessing.cpu_count()
results = Parallel(n_jobs=num_cores)(delayed(find_errors)('GDL', i, 1, lexicon) for i in range(1798, 1820))
I am using the function find_errors summed up here :
def find_errors(newspaper, year, month, lexicon):
# parse the input newspaper text data using etree parser from LXML
# detect errors in the text
return found_errors_type1, found_errors_type2, found_errors_type3
This does raise me a few errors
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/site-packages/joblib/parallel.py", line 130, in __call__
return self.func(*args, **kwargs)
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/site-packages/joblib/parallel.py", line 72, in __call__
return [func(*args, **kwargs) for func, args, kwargs in self.items]
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/site-packages/joblib/parallel.py", line 72, in <listcomp>
return [func(*args, **kwargs) for func, args, kwargs in self.items]
File "hellowordParallel.py", line 85, in find_errors
tree = etree.parse(xml_file_path)
File "src/lxml/lxml.etree.pyx", line 3427, in lxml.etree.parse (src/lxml/lxml.etree.c:79801)
File "src/lxml/parser.pxi", line 1805, in lxml.etree._parseDocument (src/lxml/lxml.etree.c:116293)
TypeError: cannot parse from 'NoneType'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/tokenize.py", line 392, in find_cookie
line_string = line.decode('utf-8')
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xb0 in position 24: invalid start byte
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/site-packages/joblib/parallel.py", line 139, in __call__
tb_offset=1)
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/site-packages/joblib/format_stack.py", line 373, in format_exc
frames = format_records(records)
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/site-packages/joblib/format_stack.py", line 274, in format_records
for token in generate_tokens(linereader):
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/tokenize.py", line 514, in _tokenize
line = readline()
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/site-packages/joblib/format_stack.py", line 265, in linereader
line = getline(file, lnum[0])
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/linecache.py", line 16, in getline
lines = getlines(filename, module_globals)
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/linecache.py", line 47, in getlines
return updatecache(filename, module_globals)
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/linecache.py", line 136, in updatecache
with tokenize.open(fullname) as fp:
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/tokenize.py", line 456, in open
encoding, lines = detect_encoding(buffer.readline)
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/tokenize.py", line 433, in detect_encoding
encoding = find_cookie(first)
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/tokenize.py", line 397, in find_cookie
raise SyntaxError(msg)
File "<string>", line None
SyntaxError: invalid or missing encoding declaration for '/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/site-packages/lxml/etree.so'
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "hellowordParallel.py", line 160, in <module>
results = Parallel(n_jobs=num_cores)(delayed(find_errors)('GDL', i, 1, lexicon) for i in range(1798, 1820))
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/site-packages/joblib/parallel.py", line 810, in __call__
self.retrieve()
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/site-packages/joblib/parallel.py", line 727, in retrieve
self._output.extend(job.get())
File "/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/multiprocessing/pool.py", line 608, in get
raise self._value
SyntaxError: invalid or missing encoding declaration for '/home/mbl/anaconda3/envs/OCR_Correction/lib/python3.5/site-packages/lxml/etree.so'
I don't understand if this is due do something related with configs or if my function doesn't fit in a parallel implementation... (I guess it should...)
Did it happen to some of you before?
Hope my question is clear and there is enough information for someone to give me some help!