STAN on Databricks - AttributeError: 'ConsoleBuffer' object has no attribute 'closed' - databricks

Running STAN (pystan) on Databricks 8.2 ML throws the following Error
To reproduce, just run the simple example from https://pystan.readthedocs.io/en/latest/
Seems like the ConsoleBuffer Class doesn't have an implementation for closed? Have others run into this issue? Any workarounds recommended? I am currently using a single node Cluster and ideally don't want to run this on a local machine.
Stack Trace
AttributeError Traceback (most recent call last)
<command-261559943577864> in <module>
3 "sigma": [15, 10, 16, 11, 9, 11, 10, 18]}
4
----> 5 posterior = stan.build(schools_code, data=schools_data)
6 fit = posterior.sample(num_chains=4, num_samples=1000)
7 eta = fit["eta"] # array with shape (8, 4000)
/databricks/python/lib/python3.8/site-packages/stan/model.py in build(program_code, data, random_seed)
468
469 try:
--> 470 return asyncio.run(go())
471 except KeyboardInterrupt:
472 return # type: ignore
/databricks/python/lib/python3.8/asyncio/runners.py in run(main, debug)
41 events.set_event_loop(loop)
42 loop.set_debug(debug)
---> 43 return loop.run_until_complete(main)
44 finally:
45 try:
/databricks/python/lib/python3.8/asyncio/base_events.py in run_until_complete(self, future)
614 raise RuntimeError('Event loop stopped before Future completed.')
615
--> 616 return future.result()
617
618 def stop(self):
/databricks/python/lib/python3.8/site-packages/stan/model.py in go()
438 async def go():
439 io = ConsoleIO()
--> 440 io.error("<info>Building...</info>")
441 async with stan.common.HttpstanClient() as client:
442 # Check to see if model is in cache.
/databricks/python/lib/python3.8/site-packages/clikit/api/io/io.py in error(self, string, flags)
84 The string is formatted before it is written to the output.
85 """
---> 86 self._error_output.write(string, flags=flags)
87
88 def error_line(self, string, flags=None): # type: (str, Optional[int]) -> None
/databricks/python/lib/python3.8/site-packages/clikit/api/io/output.py in write(self, string, flags, new_line)
59 formatted += "\n"
60
---> 61 self._stream.write(to_str(formatted))
62
63 def write_line(self, string, flags=None): # type: (str, Optional[int]) -> None
/databricks/python/lib/python3.8/site-packages/clikit/io/output_stream/stream_output_stream.py in write(self, string)
19 Writes a string to the stream.
20 """
---> 21 if self.is_closed():
22 raise io.UnsupportedOperation("Cannot write to a closed input.")
23
/databricks/python/lib/python3.8/site-packages/clikit/io/output_stream/stream_output_stream.py in is_closed(self)
114 Returns whether the stream is closed.
115 """
--> 116 return self._stream.closed
AttributeError: 'ConsoleBuffer' object has no attribute 'closed'

After trying some old clusters, I realized that pystan 3 is a complete re-write. So one workaround is to go back to pystan==2.19.1.1

Related

Error received when retrieving dataset in fast.ai: TypeError: '<' not supported between instances of 'L' and 'int'

I am following this article on medium for this contest.
Everything seems to be fine up to the point where I am retrieving the dataset where I am getting a:
TypeError: '<' not supported between instances of 'L' and 'int'
My code is:
img_pipe = Pipeline([get_filenames, open_ms_tif])
mask_pipe = Pipeline([label_func, partial(open_tif, cls=TensorMask)])
db = DataBlock(blocks=(TransformBlock(img_pipe),
TransformBlock(mask_pipe)),
splitter=RandomSplitter(valid_pct=0.2, seed=42)
)
ds = db.datasets(source=train_files)
dl = db.dataloaders(source=train_files, bs=4)
train_files is a list of Paths. Here's the first 5.
[Path('nasa_rwanda_field_boundary_competition/nasa_rwanda_field_boundary_competition_source_train/nasa_rwanda_field_boundary_competition_source_train_09_2021_08/B01.tif'),
Path('nasa_rwanda_field_boundary_competition/nasa_rwanda_field_boundary_competition_source_train/nasa_rwanda_field_boundary_competition_source_train_39_2021_04/B01.tif'),
Path('nasa_rwanda_field_boundary_competition/nasa_rwanda_field_boundary_competition_source_train/nasa_rwanda_field_boundary_competition_source_train_12_2021_11/B01.tif'),
Path('nasa_rwanda_field_boundary_competition/nasa_rwanda_field_boundary_competition_source_train/nasa_rwanda_field_boundary_competition_source_train_06_2021_10/B01.tif'),
Path('nasa_rwanda_field_boundary_competition/nasa_rwanda_field_boundary_competition_source_train/nasa_rwanda_field_boundary_competition_source_train_08_2021_08/B01.tif')]
the full stack trace is:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Input In [66], in <cell line: 10>()
2 mask_pipe = Pipeline([label_func, partial(open_tif, cls=TensorMask)])
4 db = DataBlock(blocks=(TransformBlock(img_pipe),
5 TransformBlock(mask_pipe)),
6 splitter=RandomSplitter(valid_pct=0.2, seed=42)
7 )
---> 10 ds = db.datasets(source=train_files)
11 dl = db.dataloaders(source=train_files, bs=4)
File /usr/local/lib/python3.9/dist-packages/fastai/data/block.py:147, in DataBlock.datasets(self, source, verbose)
145 splits = (self.splitter or RandomSplitter())(items)
146 pv(f"{len(splits)} datasets of sizes {','.join([str(len(s)) for s in splits])}", verbose)
--> 147 return Datasets(items, tfms=self._combine_type_tfms(), splits=splits, dl_type=self.dl_type, n_inp=self.n_inp, verbose=verbose)
File /usr/local/lib/python3.9/dist-packages/fastai/data/core.py:451, in Datasets.__init__(self, items, tfms, tls, n_inp, dl_type, **kwargs)
442 def __init__(self,
443 items:list=None, # List of items to create `Datasets`
444 tfms:list|Pipeline=None, # List of `Transform`(s) or `Pipeline` to apply
(...)
448 **kwargs
449 ):
450 super().__init__(dl_type=dl_type)
--> 451 self.tls = L(tls if tls else [TfmdLists(items, t, **kwargs) for t in L(ifnone(tfms,[None]))])
452 self.n_inp = ifnone(n_inp, max(1, len(self.tls)-1))
File /usr/local/lib/python3.9/dist-packages/fastai/data/core.py:451, in <listcomp>(.0)
442 def __init__(self,
443 items:list=None, # List of items to create `Datasets`
444 tfms:list|Pipeline=None, # List of `Transform`(s) or `Pipeline` to apply
(...)
448 **kwargs
449 ):
450 super().__init__(dl_type=dl_type)
--> 451 self.tls = L(tls if tls else [TfmdLists(items, t, **kwargs) for t in L(ifnone(tfms,[None]))])
452 self.n_inp = ifnone(n_inp, max(1, len(self.tls)-1))
File /usr/local/lib/python3.9/dist-packages/fastcore/foundation.py:98, in _L_Meta.__call__(cls, x, *args, **kwargs)
96 def __call__(cls, x=None, *args, **kwargs):
97 if not args and not kwargs and x is not None and isinstance(x,cls): return x
---> 98 return super().__call__(x, *args, **kwargs)
File /usr/local/lib/python3.9/dist-packages/fastai/data/core.py:361, in TfmdLists.__init__(self, items, tfms, use_list, do_setup, split_idx, train_setup, splits, types, verbose, dl_type)
359 if isinstance(tfms,TfmdLists): tfms = tfms.tfms
360 if isinstance(tfms,Pipeline): do_setup=False
--> 361 self.tfms = Pipeline(tfms, split_idx=split_idx)
362 store_attr('types,split_idx')
363 if do_setup:
File /usr/local/lib/python3.9/dist-packages/fastcore/transform.py:190, in Pipeline.__init__(self, funcs, split_idx)
188 else:
189 if isinstance(funcs, Transform): funcs = [funcs]
--> 190 self.fs = L(ifnone(funcs,[noop])).map(mk_transform).sorted(key='order')
191 for f in self.fs:
192 name = camel2snake(type(f).__name__)
File /usr/local/lib/python3.9/dist-packages/fastcore/foundation.py:136, in L.sorted(self, key, reverse)
--> 136 def sorted(self, key=None, reverse=False): return self._new(sorted_ex(self, key=key, reverse=reverse))
File /usr/local/lib/python3.9/dist-packages/fastcore/basics.py:619, in sorted_ex(iterable, key, reverse)
617 elif isinstance(key,int): k=itemgetter(key)
618 else: k=key
--> 619 return sorted(iterable, key=k, reverse=reverse)
TypeError: '<' not supported between instances of 'L' and 'int'
I'm not sure what thing is causing the issue. Let me know if you need more of the code.
I expected the data loader to create itself successfully.
I figured it out. It seems the TransformBlocks do not like accepting a Pipeline. I changed the
TransformBlock(img_pipe), TransformBlock(mask_pipe)
to
TransformBlock([get_filenames, open_ms_tif]), TransformBlock([label_func, partial(open_tif, cls=TensorMask)])
which removed the Pipeline wrapper.

Running parallel in the python

I am going to parallelizing the function train() to make different clients run in parallel and return the output as an array with object [weight, loss]:
pool = mp.Pool(mp.cpu_count())
a = [pool.apply_async(train, (model[i], remote_torch[i], train_loader_ptr[i], epoch, args, train_data_length[i], clients[i], wglob)) for i in range(cnum)]
output = [out.get() for out in a]
However, it throws the following error when I compile and getting stuck at the output array:
Starting Training
--------------------------------------------------------------------------- TypeError Traceback (most recent call last) Cell In [24], line 11
9 pool = mp.Pool(mp.cpu_count())
10 a = [pool.apply_async(train, (model[i], remote_torch[i], train_loader_ptr[i], epoch, args, train_data_length[i], clients[i], wglob)) for i in range(cnum)]
---> 11 output = [out.get() for out in a]
12 for i in range(cnum):
13 for wlocal, loss in output:
Cell In [24], line 11, in <listcomp>(.0)
9 pool = mp.Pool(mp.cpu_count())
10 a = [pool.apply_async(train, (model[i], remote_torch[i], train_loader_ptr[i], epoch, args, train_data_length[i], clients[i], wglob)) for i in range(cnum)]
---> 11 output = [out.get() for out in a]
12 for i in range(cnum):
13 for wlocal, loss in output:
File ~\anaconda\envs\env\lib\multiprocessing\pool.py:771, in ApplyResult.get(self, timeout)
769 return self._value
770 else:
--> 771 raise self._value
File ~\anaconda\envs\env\lib\multiprocessing\pool.py:537, in Pool._handle_tasks(taskqueue, put, outqueue, pool, cache)
535 break
536 try:
--> 537 put(task)
538 except Exception as e:
539 job, idx = task[:2]
File ~\anaconda\envs\env\lib\multiprocessing\connection.py:206, in
_ConnectionBase.send(self, obj)
204 self._check_closed()
205 self._check_writable()
--> 206 self._send_bytes(_ForkingPickler.dumps(obj))
File ~\anaconda\envs\env\lib\multiprocessing\reduction.py:51, in ForkingPickler.dumps(cls, obj, protocol)
48 #classmethod
49 def dumps(cls, obj, protocol=None):
50 buf = io.BytesIO()
---> 51 cls(buf, protocol).dump(obj)
52 return buf.getbuffer()
File stringsource:2, in zmq.backend.cython.socket.Socket.__reduce_cython__()
TypeError: no default __reduce__ due to non-trivial __cinit__
What happen? Does the output get the result too early to cause this problem?

Cytoscape: How do you import ABC file types with py2cytoscape's cyrest api?

I have a file of the type:
A B 0.123
A C 0.84
B D 0.52
...
Where the data are tab separated, and the first and second columns are the nodes, and the third is the associated edge weight.
When trying to import this file into cytoscape using py2cytoscape, I'm receiving an error:
from py2cytoscape import cyrest
fileName="/Users/96v/Documents/lco/lcoAllAt25/lcoAll25/lcoAll25_top0.041pct_data/lcoAll25_top0.041pct.txt"
cyclient = cyrest.cyclient()
cyclient.network.import_file(dataTypeList='string,string,double',
afile=fileName,
delimiters='\t',
indexColumnSourceInteraction="0",
indexColumnTargetInteraction="1",
verbose=True)
'http://localhost:1234/v1/commands/network/import file'
TypeError Traceback (most recent call last)
in
----> 1 cyclient.network.import_file(dataTypeList='string,string,double', afile=fileName, delimiters='\t', indexColumnSourceInteraction="0", indexColumnTargetInteraction="1", defaultInteraction="Edge Attribute",verbose=True)
2
~/opt/anaconda3/lib/python3.8/site-packages/py2cytoscape/cyrest/network.py in import_file(self, dataTypeList, defaultInteraction, delimiters, delimitersForDataList, afile, firstRowAsColumnNames, indexColumnSourceInteraction, indexColumnTargetInteraction, indexColumnTypeInteraction, NetworkViewRendererList, RootNetworkList, startLoadRow, TargetColumnList, verbose)
464 afile,firstRowAsColumnNames,indexColumnSourceInteraction,indexColumnTargetInteraction,
465 indexColumnTypeInteraction,NetworkViewRendererList,RootNetworkList,startLoadRow,TargetColumnList])
--> 466 response=api(url=self.__url+"/import file", PARAMS=PARAMS, method="POST", verbose=verbose)
467 return response
468
~/opt/anaconda3/lib/python3.8/site-packages/py2cytoscape/cyrest/base.py in api(namespace, command, PARAMS, body, host, port, version, method, verbose, url, parse_params)
139 sys.stdout.flush()
140 r = requests.post(url = baseurl, json = PARAMS)
--> 141 verbose_=checkresponse(r, verbose=verbose)
142 if (verbose) or (verbose_):
143 verbose=True
~/opt/anaconda3/lib/python3.8/site-packages/py2cytoscape/cyrest/base.py in checkresponse(r, verbose)
43 if 200 <= status < 300:
44 if verbose:
---> 45 print("response status "+status)
46 sys.stdout.flush()
47 res=None
TypeError: can only concatenate str (not "int") to str
The edge weights aren't being recognized, yet the documentation isn't as verbose for this function.
Any help would be extremely appreciated!
After looking further at the GUI, I realized:
Columns are not 0 indexed.
Verbose has an error in it.
The below code works fine:
from py2cytoscape import cyrest
fileName="pathToFile"
cyclient = cyrest.cyclient()
collection = cyclient.network.import_file(dataTypeList='string,string,double',
afile=fileName,
delimiters='\t',
indexColumnSourceInteraction="1",
indexColumnTargetInteraction="2",
defaultInteraction="interacts with")

Error using tfds.load on Tensorflow Dataset

I was wondering if tensorflow 2.2 dataset has an issue on Windows release.
Here is my diagnostic code
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
print("Version: ", tf.__version__)
print("Eager mode: ", tf.executing_eagerly())
print("Hub version: ", hub.__version__)
print("GPU is", "available" if tf.config.experimental.list_physical_devices("GPU") else "NOT AVAILABLE")
Version: 2.2.0
Eager mode: True
Hub version: 0.8.0
GPU is available
I can load the list of datasets
tfds.list_builders()
['abstract_reasoning',
'aeslc',
'aflw2k3d',
'amazon_us_reviews',
'anli',
.
.
.
'xnli',
'xsum',
'yelp_polarity_reviews']
However, I am unable to load any dataset
imdb, info = tfds.load('imdb_reviews', with_info=True, as_supervised=True)
I receive the following errors
---------------------------------------------------------------------------
UnimplementedError Traceback (most recent call last)
c:\python37\lib\site-packages\tensorflow_datasets\core\utils\py_utils.py in try_reraise(*args, **kwargs)
398 try:
--> 399 yield
400 except Exception: # pylint: disable=broad-except
c:\python37\lib\site-packages\tensorflow_datasets\core\registered.py in builder(name, **builder_init_kwargs)
243 prefix="Failed to construct dataset {}".format(name)):
--> 244 return builder_cls(name)(**builder_kwargs)
245
c:\python37\lib\site-packages\wrapt\wrappers.py in __call__(self, *args, **kwargs)
602 return self._self_wrapper(self.__wrapped__, self._self_instance,
--> 603 args, kwargs)
604
c:\python37\lib\site-packages\tensorflow_datasets\core\api_utils.py in disallow_positional_args_dec(fn, instance, args, kwargs)
68 _check_required(fn, kwargs)
---> 69 return fn(*args, **kwargs)
70
c:\python37\lib\site-packages\tensorflow_datasets\core\dataset_builder.py in __init__(self, data_dir, config, version)
205 else: # Use the code version (do not restore data)
--> 206 self.info.initialize_from_bucket()
207
c:\python37\lib\site-packages\tensorflow_datasets\core\dataset_info.py in initialize_from_bucket(self)
422 tmp_dir = tempfile.mkdtemp("tfds")
--> 423 data_files = gcs_utils.gcs_dataset_info_files(self.full_name)
424 if not data_files:
c:\python37\lib\site-packages\tensorflow_datasets\core\utils\gcs_utils.py in gcs_dataset_info_files(dataset_dir)
69 """Return paths to GCS files in the given dataset directory."""
---> 70 return gcs_listdir(posixpath.join(GCS_DATASET_INFO_DIR, dataset_dir))
71
c:\python37\lib\site-packages\tensorflow_datasets\core\utils\gcs_utils.py in gcs_listdir(dir_name)
62 root_dir = gcs_path(dir_name)
---> 63 if _is_gcs_disabled or not tf.io.gfile.exists(root_dir):
64 return None
c:\python37\lib\site-packages\tensorflow\python\lib\io\file_io.py in file_exists_v2(path)
266 try:
--> 267 _pywrap_file_io.FileExists(compat.as_bytes(path))
268 except errors.NotFoundError:
UnimplementedError: File system scheme 'gs' not implemented (file: 'gs://tfds-data/dataset_info/imdb_reviews/plain_text/1.0.0')
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
<ipython-input-36-06930b64f980> in <module>
1 #tfds.list_builders()
----> 2 imdb, info = tfds.load('imdb_reviews', with_info=True, as_supervised=True)
c:\python37\lib\site-packages\wrapt\wrappers.py in __call__(self, *args, **kwargs)
562
563 return self._self_wrapper(self.__wrapped__, self._self_instance,
--> 564 args, kwargs)
565
566 class BoundFunctionWrapper(_FunctionWrapperBase):
c:\python37\lib\site-packages\tensorflow_datasets\core\api_utils.py in disallow_positional_args_dec(fn, instance, args, kwargs)
67 _check_no_positional(fn, args, ismethod, allowed=allowed)
68 _check_required(fn, kwargs)
---> 69 return fn(*args, **kwargs)
70
71 return disallow_positional_args_dec(wrapped) # pylint: disable=no-value-for-parameter
c:\python37\lib\site-packages\tensorflow_datasets\core\registered.py in load(name, split, data_dir, batch_size, shuffle_files, download, as_supervised, decoders, read_config, with_info, builder_kwargs, download_and_prepare_kwargs, as_dataset_kwargs, try_gcs)
366 data_dir = constants.DATA_DIR
367
--> 368 dbuilder = builder(name, data_dir=data_dir, **builder_kwargs)
369 if download:
370 download_and_prepare_kwargs = download_and_prepare_kwargs or {}
c:\python37\lib\site-packages\tensorflow_datasets\core\registered.py in builder(name, **builder_init_kwargs)
242 with py_utils.try_reraise(
243 prefix="Failed to construct dataset {}".format(name)):
--> 244 return builder_cls(name)(**builder_kwargs)
245
246
c:\python37\lib\contextlib.py in __exit__(self, type, value, traceback)
128 value = type()
129 try:
--> 130 self.gen.throw(type, value, traceback)
131 except StopIteration as exc:
132 # Suppress StopIteration *unless* it's the same exception that
c:\python37\lib\site-packages\tensorflow_datasets\core\utils\py_utils.py in try_reraise(*args, **kwargs)
399 yield
400 except Exception: # pylint: disable=broad-except
--> 401 reraise(*args, **kwargs)
402
403
c:\python37\lib\site-packages\tensorflow_datasets\core\utils\py_utils.py in reraise(prefix, suffix)
390 suffix = '\n' + suffix if suffix else ''
391 msg = prefix + str(exc_value) + suffix
--> 392 six.reraise(exc_type, exc_type(msg), exc_traceback)
393
394
TypeError: __init__() missing 2 required positional arguments: 'op' and 'message'
Is the library broken? As mentioned, I am on Windows 10 machine and using Jupyter Lab.
After I reported the issue on GitHub, the problem was fixed in version 3.2.1.

Getting TypeError: can't pickle SSLContext objects in Using Ray

I am trying to experiment with the Ray library for parallel processing some of my functions to get output faster. In my local machine, it works ok in my cloud instance it is showing error
TypeError Traceback (most recent call last)
<ipython-input-14-1941686e1604> in <module>
4 # datalist=f1.result()
5
----> 6 datalist_rayval=Customer_Merchant_value_pass.remote(customerlist)
7 #datalist=ray.get(datalist_rayval)
8
~/anaconda3/lib/python3.7/site-packages/ray/remote_function.py in _remote_proxy(*args, **kwargs)
93 #wraps(function)
94 def _remote_proxy(*args, **kwargs):
---> 95 return self._remote(args=args, kwargs=kwargs)
96
97 self.remote = _remote_proxy
~/anaconda3/lib/python3.7/site-packages/ray/remote_function.py in _remote(self, args, kwargs, num_return_vals, is_direct_call, num_cpus, num_gpus, memory, object_store_memory, resources, max_retries)
168 # first driver. This is an argument for repickling the function,
169 # which we do here.
--> 170 self._pickled_function = pickle.dumps(self._function)
171
172 self._function_descriptor = PythonFunctionDescriptor.from_function(
~/anaconda3/lib/python3.7/site-packages/ray/cloudpickle/cloudpickle_fast.py in dumps(obj, protocol, buffer_callback)
70 cp = CloudPickler(file, protocol=protocol,
71 buffer_callback=buffer_callback)
---> 72 cp.dump(obj)
73 return file.getvalue()
74
~/anaconda3/lib/python3.7/site-packages/ray/cloudpickle/cloudpickle_fast.py in dump(self, obj)
615 def dump(self, obj):
616 try:
--> 617 return Pickler.dump(self, obj)
618 except RuntimeError as e:
619 if "recursion" in e.args[0]:
TypeError: can't pickle SSLContext objects
My Ray decorated code is
#ray.remote
def Prefer_Attachment_query2(listval):
customer_wallet=listval[0]
merchant_wallet=listval[1]
#print(x,y)
prefquery="""MATCH (p1:CUSTOMER {WALLETID: '%s'})
MATCH (p2:MERCHANT {WALLETID: '%s'})
RETURN gds.alpha.linkprediction.preferentialAttachment(p1, p2,{relationshipQuery: "PAYMENT"}) as score"""%(customer_wallet,merchant_wallet)
#print(prefquery)
return prefquery
from timeit import default_timer as timer
import itertools
#ray.remote
def Customer_Merchant_value_pass(text):
minicustomer=text
begin=timer()
sum_val=0
list_avg_score=[]
list_category_val=[]
dict_list=[]
#Avg_score=0
with graphdriver.session()as session:
for i in itertools.islice(minicustomer,len(minicustomer)):
for key in list_of_unique_merchants:
print("Here at list_of_unique_merchants customer value is ",i)
print("BMCC_Code",key)
valuelist=list_of_unique_merchants[key]
#print("Uniquelistfor:",key,valuelist)
for j in valuelist:
#print("list len",len(valuelist))
#print("Here the iner of value list ",i)
#print("--------------------------------")
#print([i,j])
pref_attach_score_rayvalue=Prefer_Attachment_query2.remote([i,j])
pref_attach_score=ray.get(pref_attach_score_rayvalue)
#print(pref_attach_score)
result=session.run(pref_attach_score)
for line in result:
#print(line["score"])
sum_val=sum_val+line["score"]
#Avg_score=sum_val/len(valuelist)
Totalsumval=sum_val
print("Totalsum",Totalsumval)
Avg_score=sum_val/len(valuelist)
print("Avg_score",Avg_score)
sum_val=0
list_avg_score.append(Avg_score)
list_category_val.append(key)
avg_score_list=list_avg_score
category_list=list_category_val
#print("sumval is now",sum_val)
#print(result)
max_dictionary =MaxValue_calc(i,category_list,avg_score_list)
#MaxValue_calc(i,category_list,avg_score_list)
print("max_dicitionary",max_dictionary)
dict_list.append(max_dictionary)
rowlist=dict_list
print('appended list',rowlist)
print('process',len(rowlist))
#dict_list=[]
list_avg_score=[]
list_category_val=[]
#print("rowlist", rowlist)
#print("list_category_val is now",list_category_val)
#print("for",i," category AVG scores is now ",category_list)
#print("list_avg_score is now",list_avg_score)
#print("for",i," category AVG scores is now ",avg_score_list)
session.close()
end=timer()
print("Total time :",(end-begin))
return rowlist
datalist_rayval=Customer_Merchant_value_pass.remote(customerlist)
datalist=ray.get(datalist_rayval)
why I am getting this error. and kindly help me to solve this

Resources