Pandas files not found - python-3.x

i can't understand why Pandas not found my fer2013.csv. I have this code:
class DataManager(object):
"""Class for loading fer2013 emotion classification dataset or
imdb gender classification dataset."""
def __init__(self, dataset_name='imdb', dataset_path=None, image_size=(48, 48)):
self.dataset_name = dataset_name
self.dataset_path = dataset_path
self.image_size = image_size
if self.dataset_path != None:
self.dataset_path = dataset_path
elif self.dataset_name == 'imdb':
self.dataset_path = '../datasets/imdb_crop/imdb.mat'
elif self.dataset_name == 'fer2013':
self.dataset_path = '../datasets/fer2013/fer2013.csv'
elif self.dataset_name == 'KDEF':
self.dataset_path = '../datasets/KDEF/'
else:
raise Exception('Incorrect dataset name, please input imdb or fer2013')
I get this error:
Traceback (most recent call last):
File "train_model.py", line 55, in <module>
faces, emotions = data_loader.get_data()
File "C:\Users\devel\Documents\Proyectos\erecog\utils\datasets.py", line 31, in get_data
ground_truth_data = self._load_fer2013()
File "C:\Users\devel\Documents\Proyectos\erecog\utils\datasets.py", line 57, in _load_fer2013
data = pd.read_csv(self.dataset_path)
File "C:\Users\devel\Documents\Proyectos\erecog\env\lib\site-packages\pandas\io\parsers.py", line 676, in parser_f
return _read(filepath_or_buffer, kwds)
File "C:\Users\devel\Documents\Proyectos\erecog\env\lib\site-packages\pandas\io\parsers.py", line 448, in _read
parser = TextFileReader(fp_or_buf, **kwds)
File "C:\Users\devel\Documents\Proyectos\erecog\env\lib\site-packages\pandas\io\parsers.py", line 880, in __init__
self._make_engine(self.engine)
File "C:\Users\devel\Documents\Proyectos\erecog\env\lib\site-packages\pandas\io\parsers.py", line 1114, in _make_engine
self._engine = CParserWrapper(self.f, **self.options)
File "C:\Users\devel\Documents\Proyectos\erecog\env\lib\site-packages\pandas\io\parsers.py", line 1891, in __init__
self._reader = parsers.TextReader(src, **kwds)
File "pandas\_libs\parsers.pyx", line 374, in pandas._libs.parsers.TextReader.__cinit__
File "pandas\_libs\parsers.pyx", line 674, in pandas._libs.parsers.TextReader._setup_parser_source
FileNotFoundError: [Errno 2] File ../datasets/fer2013/fer2013.csv does not exist: '../datasets/fer2013/fer2013.csv'
I have that file in that folder, i don't know why it not found my file.

I solved this error. Thanks to #DYZ i could see the directory where the process was working, the process was out the 'utils' folder. I changed the route to './datasets/fer2013/fer2013.csv' with the point '.' before the '/'.

Related

BERTopic: pop from empty list IndexError while Inferencing

I have trained a BERTopic model on colab and I am now trying to use it locally I get the IndexError.
IndexError: Failed in nopython mode pipeline (step: analyzing bytecode)
pop from empty list
The code I used is:
from sentence_transformers import SentenceTransformer
sentence_model = SentenceTransformer('KBLab/sentence-bert-swedish-cased')
model = BERTopic.load('bertopic_model')
text = "my text here for example"
text = [text]
embeddings = sentence_model.encode(text)
topic, _ = model.transform(text, embeddings)
The last line gives me the error.
Noticeably, the same code works just fine on colab. Not sure whats going on mlocally.
My numba and other related libraries are up-to-date as it was on colab.
Full Traceback:
Traceback (most recent call last):
File "/home/vaibhav/.local/lib/python3.10/site-packages/flask/app.py", line 2525, in wsgi_app
response = self.full_dispatch_request()
File "/home/vaibhav/.local/lib/python3.10/site-packages/flask/app.py", line 1822, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/vaibhav/.local/lib/python3.10/site-packages/flask/app.py", line 1820, in full_dispatch_request
rv = self.dispatch_request()
File "/home/vaibhav/.local/lib/python3.10/site-packages/flask/app.py", line 1796, in dispatch_request
return self.ensure_sync(self.view_functions[rule.endpoint])(**view_args)
File "app.py", line 20, in reference_prediction
preds = data_process(input_api)
File "data_process.py", line 63, in data_process
topic, _ = topic_model_mi.transform(text, embeddings)
File "/home/vaibhav/.local/lib/python3.10/site-packages/bertopic/_bertopic.py", line 423, in transform
umap_embeddings = self.umap_model.transform(embeddings)
File "/home/vaibhav/.local/lib/python3.10/site-packages/umap/umap_.py", line 2859, in transform
dmat = pairwise_distances(
File "/home/vaibhav/.local/lib/python3.10/site-packages/sklearn/metrics/pairwise.py", line 2022, in pairwise_distances
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
File "/home/vaibhav/.local/lib/python3.10/site-packages/sklearn/metrics/pairwise.py", line 1563, in _parallel_pairwise
return func(X, Y, **kwds)
File "/home/vaibhav/.local/lib/python3.10/site-packages/sklearn/metrics/pairwise.py", line 1607, in _pairwise_callable
out[i, j] = metric(X[i], Y[j], **kwds)
File "/home/vaibhav/.local/lib/python3.10/site-packages/numba/core/dispatcher.py", line 487, in _compile_for_args
raise e
File "/home/vaibhav/.local/lib/python3.10/site-packages/numba/core/dispatcher.py", line 420, in _compile_for_args
return_val = self.compile(tuple(argtypes))
File "/home/vaibhav/.local/lib/python3.10/site-packages/numba/core/dispatcher.py", line 965, in compile
cres = self._compiler.compile(args, return_type)
File "/home/vaibhav/.local/lib/python3.10/site-packages/numba/core/dispatcher.py", line 125, in compile
status, retval = self._compile_cached(args, return_type)
File "/home/vaibhav/.local/lib/python3.10/site-packages/numba/core/dispatcher.py", line 139, in _compile_cached
retval = self._compile_core(args, return_type)
File "/home/vaibhav/.local/lib/python3.10/site-packages/numba/core/dispatcher.py", line 152, in _compile_core
cres = compiler.compile_extra(self.targetdescr.typing_context,
File "/home/vaibhav/.local/lib/python3.10/site-packages/numba/core/compiler.py", line 716, in compile_extra
return pipeline.compile_extra(func)
File "/home/vaibhav/.local/lib/python3.10/site-packages/numba/core/compiler.py", line 452, in compile_extra
return self._compile_bytecode()
File "/home/vaibhav/.local/lib/python3.10/site-packages/numba/core/compiler.py", line 520, in _compile_bytecode
return self._compile_core()
File "/home/vaibhav/.local/lib/python3.10/site-packages/numba/core/compiler.py", line 499, in _compile_core
raise e
File "/home/vaibhav/.local/lib/python3.10/site-packages/numba/core/compiler.py", line 486, in _compile_core
pm.run(self.state)
File "/home/vaibhav/.local/lib/python3.10/site-packages/numba/core/compiler_machinery.py", line 368, in run
raise patched_exception
File "/home/vaibhav/.local/lib/python3.10/site-packages/numba/core/compiler_machinery.py", line 356, in run
self._runPass(idx, pass_inst, state)
File "/home/vaibhav/.local/lib/python3.10/site-packages/numba/core/compiler_lock.py", line 35, in _acquire_compile_lock
return func(*args, **kwargs)
File "/home/vaibhav/.local/lib/python3.10/site-packages/numba/core/compiler_machinery.py", line 311, in _runPass
mutated |= check(pss.run_pass, internal_state)
File "/home/vaibhav/.local/lib/python3.10/site-packages/numba/core/compiler_machinery.py", line 273, in check
mangled = func(compiler_state)
File "/home/vaibhav/.local/lib/python3.10/site-packages/numba/core/untyped_passes.py", line 86, in run_pass
func_ir = interp.interpret(bc)
File "/home/vaibhav/.local/lib/python3.10/site-packages/numba/core/interpreter.py", line 1321, in interpret
flow.run()
File "/home/vaibhav/.local/lib/python3.10/site-packages/numba/core/byteflow.py", line 107, in run
runner.dispatch(state)
File "/home/vaibhav/.local/lib/python3.10/site-packages/numba/core/byteflow.py", line 282, in dispatch
fn(state, inst)
File "/home/vaibhav/.local/lib/python3.10/site-packages/numba/core/byteflow.py", line 1061, in _binaryop
rhs = state.pop()
File "/home/vaibhav/.local/lib/python3.10/site-packages/numba/core/byteflow.py", line 1344, in pop
return self._stack.pop()
IndexError: Failed in nopython mode pipeline (step: analyzing bytecode)
pop from empty list

Is it possible to iterate through Tensor in graph mode?

I am trying to implement Aleju's Imgaug to TFOD API. Noticed that you can not iterate through Tensors in the graph mode . I looked up for the solution and tried many suggestions but neither of them worked for my case. Do you know any work around?
import imgaug.augmenters as iaa
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
from tensorflow.python.framework.ops import EagerTensor
import tensorflow.compat.v1 as tf
import numpy as np
augseq = iaa.Sequential([# augmentation options], random_order=True)
#tf.function
def augment(image, boxes):
image_np = image.numpy().astype(np.uint8) if type(image) == EagerTensor else image
boxes_np = boxes.numpy() if type(boxes) == EagerTensor else boxes
width, height, _ = image_np.shape
bbs = []
for i in range(len(boxes_np)):
box = boxes_np[i]
ymin, xmin, ymax, xmax = box.numpy()
bbs.append(BoundingBox(
x1=xmin*width, y1=ymin*height,
x2=xmax*width, y2=ymax*height,))
bbs = BoundingBoxesOnImage(bbs, shape=image_np.shape)
image_aug, bbs_aug = augseq(image=image_np, bounding_boxes=bbs) # float np.ndarray
bbs_aug = bbs_aug.remove_out_of_image().clip_out_of_image()
boxes_aug = []
for bb in bbs_aug:
boxes_aug.append([bb.y1/height, bb.x1/width, bb.y2/height, bb.x2/width])
boxes_aug = np.array(boxes_aug)
return image_aug, boxes_aug
Stack Trace:
raceback (most recent call last):
File "/content/models/research/object_detection/model_main_tf2.py", line 115, in <module>
tf.compat.v1.app.run()
File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/platform/app.py", line 40, in run
_run(main=main, argv=argv, flags_parser=_parse_flags_tolerate_undef)
File "/usr/local/lib/python3.7/dist-packages/absl/app.py", line 303, in run
_run_main(main, args)
File "/usr/local/lib/python3.7/dist-packages/absl/app.py", line 251, in _run_main
sys.exit(main(argv))
File "/content/models/research/object_detection/model_main_tf2.py", line 112, in main
record_summaries=FLAGS.record_summaries)
File "/usr/local/lib/python3.7/dist-packages/object_detection/model_lib_v2.py", line 558, in train_loop
train_dataset_fn)
File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/deprecation.py", line 348, in new_func
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/distribute_lib.py", line 1199, in experimental_distribute_datasets_from_function
return self.distribute_datasets_from_function(dataset_fn, options)
File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/distribute_lib.py", line 1191, in distribute_datasets_from_function
dataset_fn, options)
File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/tpu_strategy.py", line 979, in _distribute_datasets_from_function
options=options)
File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/input_lib.py", line 181, in get_distributed_datasets_from_function
build=build,
File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/input_lib.py", line 1618, in __init__
self.build()
File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/input_lib.py", line 1639, in build
self._input_contexts, self._input_workers, self._dataset_fn))
File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/input_lib.py", line 2350, in _create_datasets_from_function_with_input_context
dataset = dataset_fn(ctx)
File "/usr/local/lib/python3.7/dist-packages/object_detection/model_lib_v2.py", line 553, in train_dataset_fn
input_context=input_context)
File "/usr/local/lib/python3.7/dist-packages/object_detection/inputs.py", line 906, in train_input
reduce_to_frame_fn=reduce_to_frame_fn)
File "/usr/local/lib/python3.7/dist-packages/object_detection/builders/dataset_builder.py", line 258, in build
batch_size, input_reader_config)
File "/usr/local/lib/python3.7/dist-packages/object_detection/builders/dataset_builder.py", line 237, in dataset_map_fn
fn_to_map, num_parallel_calls=num_parallel_calls)
File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/deprecation.py", line 348, in new_func
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/data/ops/dataset_ops.py", line 3886, in map_with_legacy_function
use_legacy_function=True))
File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/data/ops/dataset_ops.py", line 5505, in __init__
use_legacy_function=use_legacy_function)
File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/data/ops/dataset_ops.py", line 4540, in __init__
self._function.add_to_graph(ops.get_default_graph())
File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/function.py", line 544, in add_to_graph
self._create_definition_if_needed()
File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/function.py", line 380, in _create_definition_if_needed
self._create_definition_if_needed_impl()
File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/function.py", line 407, in _create_definition_if_needed_impl
capture_resource_var_by_value=self._capture_resource_var_by_value)
File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/function.py", line 970, in func_graph_from_py_func
outputs = func(*func_graph.inputs)
File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/data/ops/dataset_ops.py", line 4458, in wrapped_fn
ret = wrapper_helper(*args)
File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/data/ops/dataset_ops.py", line 4440, in wrapper_helper
ret = autograph.tf_convert(self._func, ag_ctx)(*nested_args)
File "/usr/local/lib/python3.7/dist-packages/tensorflow/python/autograph/impl/api.py", line 699, in wrapper
raise e.ag_error_metadata.to_exception(e)
AttributeError: in user code:
File "/usr/local/lib/python3.7/dist-packages/object_detection/inputs.py", line 886, in transform_and_pad_input_data_fn *
tensor_dict = pad_input_data_to_static_shapes(
File "/usr/local/lib/python3.7/dist-packages/object_detection/inputs.py", line 272, in transform_input_data *
out_tensor_dict = data_augmentation_fn(out_tensor_dict)
File "/usr/local/lib/python3.7/dist-packages/object_detection/inputs.py", line 623, in augment_input_data *
tensor_dict = preprocessor.preprocess(
File "/usr/local/lib/python3.7/dist-packages/object_detection/core/preprocessor.py", line 4812, in preprocess *
results = func(*args, **params)
File "/usr/local/lib/python3.7/dist-packages/object_detection/core/preprocessor.py", line 4422, in _adjust_imgaug *
adjusted_image, adjusted_boxes = tf.cast(imgaug_utils.augment(image,boxes), tf.float32)
File "/usr/local/lib/python3.7/dist-packages/object_detection/core/imgaug_utils.py", line 24, in augment *
ymin, xmin, ymax, xmax = box.numpy()
AttributeError: 'Tensor' object has no attribute 'numpy'
Here is what I tried and did not work:
Enable eager execution(It is default in tf 2.x)
Decorate/Not Decorate function with #tf.function.
Create Tf session and try to eval() or run():
InvalidArgumentError: You must feed a value for placeholder tensor 'while/Placeholder' with dtype int32
Tried on both TPU and CPU

Odoo : Error on the second creation of project deadline

In Odoo framework, i've all projects done and created already, now im stack on projects deadline, when i want to create a deadline of projet, the first commit and operation passes successfully, but the second gives me this error below :
Traceback (most recent call last):
File "D:\Odoo 11.0\server\odoo\fields.py", line 936, in __get__
value = record.env.cache.get(record, self)
File "D:\Odoo 11.0\server\odoo\api.py", line 960, in get
value = self._data[field][record.id][key]
KeyError: <odoo.api.Environment object at 0x000000000B34F550>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "D:\Odoo 11.0\server\odoo\http.py", line 647, in _handle_exception
return super(JsonRequest, self)._handle_exception(exception)
File "D:\Odoo 11.0\server\odoo\http.py", line 307, in _handle_exception
raise pycompat.reraise(type(exception), exception, sys.exc_info()[2])
File "D:\Odoo 11.0\server\odoo\tools\pycompat.py", line 87, in reraise
raise value
File "D:\Odoo 11.0\server\odoo\http.py", line 689, in dispatch
result = self._call_function(**self.params)
File "D:\Odoo 11.0\server\odoo\http.py", line 339, in _call_function
return checked_call(self.db, *args, **kwargs)
File "D:\Odoo 11.0\server\odoo\service\model.py", line 97, in wrapper
return f(dbname, *args, **kwargs)
File "D:\Odoo 11.0\server\odoo\http.py", line 332, in checked_call
result = self.endpoint(*a, **kw)
File "D:\Odoo 11.0\server\odoo\http.py", line 933, in __call__
return self.method(*args, **kw)
File "D:\Odoo 11.0\server\odoo\http.py", line 512, in response_wrap
response = f(*args, **kw)
File "D:\Odoo 11.0\server\odoo\addons\web\controllers\main.py", line 872, in search_read
return self.do_search_read(model, fields, offset, limit, domain, sort)
File "D:\Odoo 11.0\server\odoo\addons\web\controllers\main.py", line 894, in do_search_read
offset=offset or 0, limit=limit or False, order=sort or False)
File "D:\Odoo 11.0\server\odoo\models.py", line 4169, in search_read
result = records.read(fields)
File "D:\Odoo 11.0\server\odoo\models.py", line 2535, in read
values[name] = field.convert_to_read(record[name], record, use_name_get)
File "D:\Odoo 11.0\server\odoo\models.py", line 4688, in __getitem__
return self._fields[key].__get__(self, type(self))
File "D:\Odoo 11.0\server\odoo\fields.py", line 940, in __get__
self.determine_value(record)
File "D:\Odoo 11.0\server\odoo\fields.py", line 1051, in determine_value
self.compute_value(recs)
File "D:\Odoo 11.0\server\odoo\fields.py", line 1007, in compute_value
self._compute_value(records)
File "D:\Odoo 11.0\server\odoo\fields.py", line 998, in _compute_value
getattr(records, self.compute)()
File "D:\Odoo 11.0\server\odoo\addons\dev\models\delais.py", line 24, in _delai_arret
domaine = ('project_ids', '=', self.project_ids).id
File "D:\Odoo 11.0\server\odoo\fields.py", line 934, in __get__
record.ensure_one()
File "D:\Odoo 11.0\server\odoo\models.py", line 4296, in ensure_one
raise ValueError("Expected singleton: %s" % self)
ValueError: Expected singleton: delais.delais(2, 18)
And this is the function which calculate the sum of missions number of each project, in this function i got the error. Please i didn't figure out where is the problem exactly, help me to get rid of this please, thank's for your support.
def _delai_arret(self):
domaine = ('project_ids', '=', self.project_ids).id
dict = self.env['mission.mission'].search_read([domaine], ['nbr_mission'])
# print(dict)
if not dict:
pass
else:
somme = 0
for key in dict:
# print(key['nbr_mission'])
somme = somme + key['nbr_mission'] / 30
# print('la somme est : {somme}')
self.delai_arr_mois = somme
# print(somme)

Do I have to restart colaboratory runtime every time?

I cannot run my code in Google Colaboratory twice without restarting runtime. Is there a way to run it without restarting runtime.
My code takes TensorD libraries and compute aproximation of a random 2x4x4 tensor using CP ALS algorithm. (this is an example taken from https://github.com/Large-Scale-Tensor-Decomposition/tensorD)
!git clone https://github.com/Large-Scale-Tensor-Decomposition/tensorD.git
import sys
import time
sys.path.append("/content/tensorD")
from tensorD.factorization.env import Environment
from tensorD.dataproc.provider import Provider
from tensorD.demo.DataGenerator import *
from tensorD.factorization.cp import CP_ALS
# generate a random tensor with shape 3x4x4
t = time.time()
X = synthetic_data_cp([3, 4, 4], 7)
data_provider = Provider()
data_provider.full_tensor = lambda: X
env = Environment(data_provider, summary_path='/tmp/cp_' + '7')
cp = CP_ALS(env)
args = CP_ALS.CP_Args(rank=7, validation_internal=1)
# build CP model with arguments
cp.build_model(args)
# train CP model with the maximum iteration of 100
cp.train(50)
# obtain factor matrices from trained model
factor_matrices = cp.factors
# obtain scaling vector from trained model
lambdas = cp.lambdas
for matrix in factor_matrices:
print(matrix)
elapsed = time.time() - t
print(elapsed)
when I run it first time I have no problem. When I run it again (without restart of runtime) I obtain:
CP model initial finish
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1333 try:
-> 1334 return fn(*args)
1335 except errors.OpError as e:
7 frames
InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder' with dtype float and shape [3,4,4]
[[{{node Placeholder}}]]
During handling of the above exception, another exception occurred:
InvalidArgumentError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1346 pass
1347 message = error_interpolation.interpolate(message, self._graph)
-> 1348 raise type(e)(node_def, op, message)
1349
1350 def _extend_graph(self):
InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder' with dtype float and shape [3,4,4]
[[node Placeholder (defined at /content/tensorD/tensorD/factorization/cp.py:69) ]]
Caused by op 'Placeholder', defined at:
File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "/usr/local/lib/python3.6/dist-packages/traitlets/config/application.py", line 658, in launch_instance
app.start()
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelapp.py", line 477, in start
ioloop.IOLoop.instance().start()
File "/usr/local/lib/python3.6/dist-packages/tornado/ioloop.py", line 888, in start
handler_func(fd_obj, events)
File "/usr/local/lib/python3.6/dist-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/zmq/eventloop/zmqstream.py", line 450, in _handle_events
self._handle_recv()
File "/usr/local/lib/python3.6/dist-packages/zmq/eventloop/zmqstream.py", line 480, in _handle_recv
self._run_callback(callback, msg)
File "/usr/local/lib/python3.6/dist-packages/zmq/eventloop/zmqstream.py", line 432, in _run_callback
callback(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py", line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py", line 235, in dispatch_shell
handler(stream, idents, msg)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py", line 399, in execute_request
user_expressions, allow_stdin)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/ipkernel.py", line 196, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/zmqshell.py", line 533, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py", line 2718, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py", line 2822, in run_ast_nodes
if self.run_code(code, result):
File "/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py", line 2882, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-4-4d2250ec007b>", line 21, in <module>
cp.build_model(args)
File "/content/tensorD/tensorD/factorization/cp.py", line 69, in build_model
input_data = tf.placeholder(tf.float32, shape=self._env.full_shape())
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/array_ops.py", line 2077, in placeholder
return gen_array_ops.placeholder(dtype=dtype, shape=shape, name=name)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gen_array_ops.py", line 5791, in placeholder
"Placeholder", dtype=dtype, shape=shape, name=name)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py", line 788, in _apply_op_helper
op_def=op_def)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py", line 3300, in create_op
op_def=op_def)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py", line 1801, in __init__
self._traceback = tf_stack.extract_stack()
InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'Placeholder' with dtype float and shape [3,4,4]
[[node Placeholder (defined at /content/tensorD/tensorD/factorization/cp.py:69) ]]
Any help will be appreciated!
This seems to mostly be a question about TensorFlow. Do you get what you want outside of Colab? I'm not totally clear about what you are expecting, but import tensorflow as tf; tf.reset_default_graph() at the top of your snippet seems sensible and squelches the error.

How can i convert a .ARFF file to CSV in Pycharm using a Mac?

Im new to data analysis and looking for help. I converted an .arff file to csv using Pycharm but I am not sure that I did in the right way. Is it possible, right? If so how? Also when I tried this code:
import pandas as pd
data_frame = pd.read_csv("train.csv")
I got an error.
Traceback (most recent call last):
File "/Users/cristinamulas/PycharmProjects/exam/exam_p.py", line 53, in
data_frame = pd.read_csv("train.csv")
File "/Users/cristinamulas/exam/lib/python3.7/site-packages/pandas/io/parsers.py", line 678, in parser_f
return _read(filepath_or_buffer, kwds)
File "/Users/cristinamulas/exam/lib/python3.7/site-packages/pandas/io/parsers.py", line 440, in _read
parser = TextFileReader(filepath_or_buffer, **kwds)
File "/Users/cristinamulas/exam/lib/python3.7/site-packages/pandas/io/parsers.py", line 787, in init
self._make_engine(self.engine)
File "/Users/cristinamulas/exam/lib/python3.7/site-packages/pandas/io/parsers.py", line 1014, in _make_engine
self._engine = CParserWrapper(self.f, **self.options)
File "/Users/cristinamulas/exam/lib/python3.7/site-packages/pandas/io/parsers.py", line 1708, in init
self._reader = parsers.TextReader(src, **kwds)
File "pandas/_libs/parsers.pyx", line 384, in pandas._libs.parsers.TextReader.cinit
File "pandas/_libs/parsers.pyx", line 695, in pandas._libs.parsers.TextReader._setup_parser_source
FileNotFoundError: File b'train.csv' does not exist

Resources