OptionError: "No such keys(s): 'display.height'" - python-3.x

So I have been trying to figure this out but my pandas 'display.height' option is not working and comes up with OptionError: "No such keys(s): 'display.height'". Here is what I have done.
import pandas as pd
pd.set_option('display.height', 1000)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
The code returns the following error:
C:\Anaconda3\lib\site-packages\pandas\core\config.py in __call__(self, *args, **kwds)
225
226 def __call__(self, *args, **kwds):
--> 227 return self.__func__(*args, **kwds)
228
229 #property
C:\Anaconda3\lib\site-packages\pandas\core\config.py in _set_option(*args, **kwargs)
117
118 for k, v in zip(args[::2], args[1::2]):
--> 119 key = _get_single_key(k, silent)
120
121 o = _get_registered_option(key)
C:\Anaconda3\lib\site-packages\pandas\core\config.py in _get_single_key(pat, silent)
81 if not silent:
82 _warn_if_deprecated(pat)
---> 83 raise OptionError('No such keys(s): {pat!r}'.format(pat=pat))
84 if len(keys) > 1:
85 raise OptionError('Pattern matched multiple keys')
OptionError: "No such keys(s): 'display.height'"
Do you have any suggestions how to correct this?

According to pandas 0.15 document, you should use display.max_rows to replace display.height.
display.height : int
Deprecated. [default: 60] [currently: 15] (Deprecated, use display.max_rows instead.)

Related

Error received when retrieving dataset in fast.ai: TypeError: '<' not supported between instances of 'L' and 'int'

I am following this article on medium for this contest.
Everything seems to be fine up to the point where I am retrieving the dataset where I am getting a:
TypeError: '<' not supported between instances of 'L' and 'int'
My code is:
img_pipe = Pipeline([get_filenames, open_ms_tif])
mask_pipe = Pipeline([label_func, partial(open_tif, cls=TensorMask)])
db = DataBlock(blocks=(TransformBlock(img_pipe),
TransformBlock(mask_pipe)),
splitter=RandomSplitter(valid_pct=0.2, seed=42)
)
ds = db.datasets(source=train_files)
dl = db.dataloaders(source=train_files, bs=4)
train_files is a list of Paths. Here's the first 5.
[Path('nasa_rwanda_field_boundary_competition/nasa_rwanda_field_boundary_competition_source_train/nasa_rwanda_field_boundary_competition_source_train_09_2021_08/B01.tif'),
Path('nasa_rwanda_field_boundary_competition/nasa_rwanda_field_boundary_competition_source_train/nasa_rwanda_field_boundary_competition_source_train_39_2021_04/B01.tif'),
Path('nasa_rwanda_field_boundary_competition/nasa_rwanda_field_boundary_competition_source_train/nasa_rwanda_field_boundary_competition_source_train_12_2021_11/B01.tif'),
Path('nasa_rwanda_field_boundary_competition/nasa_rwanda_field_boundary_competition_source_train/nasa_rwanda_field_boundary_competition_source_train_06_2021_10/B01.tif'),
Path('nasa_rwanda_field_boundary_competition/nasa_rwanda_field_boundary_competition_source_train/nasa_rwanda_field_boundary_competition_source_train_08_2021_08/B01.tif')]
the full stack trace is:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Input In [66], in <cell line: 10>()
2 mask_pipe = Pipeline([label_func, partial(open_tif, cls=TensorMask)])
4 db = DataBlock(blocks=(TransformBlock(img_pipe),
5 TransformBlock(mask_pipe)),
6 splitter=RandomSplitter(valid_pct=0.2, seed=42)
7 )
---> 10 ds = db.datasets(source=train_files)
11 dl = db.dataloaders(source=train_files, bs=4)
File /usr/local/lib/python3.9/dist-packages/fastai/data/block.py:147, in DataBlock.datasets(self, source, verbose)
145 splits = (self.splitter or RandomSplitter())(items)
146 pv(f"{len(splits)} datasets of sizes {','.join([str(len(s)) for s in splits])}", verbose)
--> 147 return Datasets(items, tfms=self._combine_type_tfms(), splits=splits, dl_type=self.dl_type, n_inp=self.n_inp, verbose=verbose)
File /usr/local/lib/python3.9/dist-packages/fastai/data/core.py:451, in Datasets.__init__(self, items, tfms, tls, n_inp, dl_type, **kwargs)
442 def __init__(self,
443 items:list=None, # List of items to create `Datasets`
444 tfms:list|Pipeline=None, # List of `Transform`(s) or `Pipeline` to apply
(...)
448 **kwargs
449 ):
450 super().__init__(dl_type=dl_type)
--> 451 self.tls = L(tls if tls else [TfmdLists(items, t, **kwargs) for t in L(ifnone(tfms,[None]))])
452 self.n_inp = ifnone(n_inp, max(1, len(self.tls)-1))
File /usr/local/lib/python3.9/dist-packages/fastai/data/core.py:451, in <listcomp>(.0)
442 def __init__(self,
443 items:list=None, # List of items to create `Datasets`
444 tfms:list|Pipeline=None, # List of `Transform`(s) or `Pipeline` to apply
(...)
448 **kwargs
449 ):
450 super().__init__(dl_type=dl_type)
--> 451 self.tls = L(tls if tls else [TfmdLists(items, t, **kwargs) for t in L(ifnone(tfms,[None]))])
452 self.n_inp = ifnone(n_inp, max(1, len(self.tls)-1))
File /usr/local/lib/python3.9/dist-packages/fastcore/foundation.py:98, in _L_Meta.__call__(cls, x, *args, **kwargs)
96 def __call__(cls, x=None, *args, **kwargs):
97 if not args and not kwargs and x is not None and isinstance(x,cls): return x
---> 98 return super().__call__(x, *args, **kwargs)
File /usr/local/lib/python3.9/dist-packages/fastai/data/core.py:361, in TfmdLists.__init__(self, items, tfms, use_list, do_setup, split_idx, train_setup, splits, types, verbose, dl_type)
359 if isinstance(tfms,TfmdLists): tfms = tfms.tfms
360 if isinstance(tfms,Pipeline): do_setup=False
--> 361 self.tfms = Pipeline(tfms, split_idx=split_idx)
362 store_attr('types,split_idx')
363 if do_setup:
File /usr/local/lib/python3.9/dist-packages/fastcore/transform.py:190, in Pipeline.__init__(self, funcs, split_idx)
188 else:
189 if isinstance(funcs, Transform): funcs = [funcs]
--> 190 self.fs = L(ifnone(funcs,[noop])).map(mk_transform).sorted(key='order')
191 for f in self.fs:
192 name = camel2snake(type(f).__name__)
File /usr/local/lib/python3.9/dist-packages/fastcore/foundation.py:136, in L.sorted(self, key, reverse)
--> 136 def sorted(self, key=None, reverse=False): return self._new(sorted_ex(self, key=key, reverse=reverse))
File /usr/local/lib/python3.9/dist-packages/fastcore/basics.py:619, in sorted_ex(iterable, key, reverse)
617 elif isinstance(key,int): k=itemgetter(key)
618 else: k=key
--> 619 return sorted(iterable, key=k, reverse=reverse)
TypeError: '<' not supported between instances of 'L' and 'int'
I'm not sure what thing is causing the issue. Let me know if you need more of the code.
I expected the data loader to create itself successfully.
I figured it out. It seems the TransformBlocks do not like accepting a Pipeline. I changed the
TransformBlock(img_pipe), TransformBlock(mask_pipe)
to
TransformBlock([get_filenames, open_ms_tif]), TransformBlock([label_func, partial(open_tif, cls=TensorMask)])
which removed the Pipeline wrapper.

Huggingface tokenizer not able to load model after upgrading python to 3.10

I just updated Python to version 3.10.8. Note that I use JupyterLab.
I had to re-install a lot of packages, but now I get an error when I try to load the tokenizer of an HuggingFace model
This is my code:
# Import libraries
from transformers import pipeline, AutoTokenizer
# Define checkpoint
model_checkpoint = 'deepset/xlm-roberta-large-squad2'
# Tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
Note that version of transformers is 4.24.0.
This is the error I get:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
Cell In [3], line 2
1 # Tokenizer
----> 2 tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
File ~/.local/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py:637, in AutoTokenizer.from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs)
635 tokenizer_class_py, tokenizer_class_fast = TOKENIZER_MAPPING[type(config)]
636 if tokenizer_class_fast and (use_fast or tokenizer_class_py is None):
--> 637 return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
638 else:
639 if tokenizer_class_py is not None:
File ~/.local/lib/python3.10/site-packages/transformers/tokenization_utils_base.py:1777, in PreTrainedTokenizerBase.from_pretrained(cls, pretrained_model_name_or_path, *init_inputs, **kwargs)
1774 else:
1775 logger.info(f"loading file {file_path} from cache at {resolved_vocab_files[file_id]}")
-> 1777 return cls._from_pretrained(
1778 resolved_vocab_files,
1779 pretrained_model_name_or_path,
1780 init_configuration,
1781 *init_inputs,
1782 use_auth_token=use_auth_token,
1783 cache_dir=cache_dir,
1784 local_files_only=local_files_only,
1785 _commit_hash=commit_hash,
1786 **kwargs,
1787 )
File ~/.local/lib/python3.10/site-packages/transformers/tokenization_utils_base.py:1932, in PreTrainedTokenizerBase._from_pretrained(cls, resolved_vocab_files, pretrained_model_name_or_path, init_configuration, use_auth_token, cache_dir, local_files_only, _commit_hash, *init_inputs, **kwargs)
1930 # Instantiate tokenizer.
1931 try:
-> 1932 tokenizer = cls(*init_inputs, **init_kwargs)
1933 except OSError:
1934 raise OSError(
1935 "Unable to load vocabulary from file. "
1936 "Please check that the provided vocabulary is accessible and not corrupted."
1937 )
File ~/.local/lib/python3.10/site-packages/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py:155, in XLMRobertaTokenizerFast.__init__(self, vocab_file, tokenizer_file, bos_token, eos_token, sep_token, cls_token, unk_token, pad_token, mask_token, **kwargs)
139 def __init__(
140 self,
141 vocab_file=None,
(...)
151 ):
152 # Mask token behave like a normal word, i.e. include the space before it
153 mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
--> 155 super().__init__(
156 vocab_file,
157 tokenizer_file=tokenizer_file,
158 bos_token=bos_token,
159 eos_token=eos_token,
160 sep_token=sep_token,
161 cls_token=cls_token,
162 unk_token=unk_token,
163 pad_token=pad_token,
164 mask_token=mask_token,
165 **kwargs,
166 )
168 self.vocab_file = vocab_file
169 self.can_save_slow_tokenizer = False if not self.vocab_file else True
File ~/.local/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py:114, in PreTrainedTokenizerFast.__init__(self, *args, **kwargs)
111 fast_tokenizer = TokenizerFast.from_file(fast_tokenizer_file)
112 elif slow_tokenizer is not None:
113 # We need to convert a slow tokenizer to build the backend
--> 114 fast_tokenizer = convert_slow_tokenizer(slow_tokenizer)
115 elif self.slow_tokenizer_class is not None:
116 # We need to create and convert a slow tokenizer to build the backend
117 slow_tokenizer = self.slow_tokenizer_class(*args, **kwargs)
File ~/.local/lib/python3.10/site-packages/transformers/convert_slow_tokenizer.py:1162, in convert_slow_tokenizer(transformer_tokenizer)
1154 raise ValueError(
1155 f"An instance of tokenizer class {tokenizer_class_name} cannot be converted in a Fast tokenizer instance."
1156 " No converter was found. Currently available slow->fast convertors:"
1157 f" {list(SLOW_TO_FAST_CONVERTERS.keys())}"
1158 )
1160 converter_class = SLOW_TO_FAST_CONVERTERS[tokenizer_class_name]
-> 1162 return converter_class(transformer_tokenizer).converted()
File ~/.local/lib/python3.10/site-packages/transformers/convert_slow_tokenizer.py:438, in SpmConverter.__init__(self, *args)
434 requires_backends(self, "protobuf")
436 super().__init__(*args)
--> 438 from .utils import sentencepiece_model_pb2 as model_pb2
440 m = model_pb2.ModelProto()
441 with open(self.original_tokenizer.vocab_file, "rb") as f:
File ~/.local/lib/python3.10/site-packages/transformers/utils/sentencepiece_model_pb2.py:20
18 from google.protobuf import descriptor as _descriptor
19 from google.protobuf import message as _message
---> 20 from google.protobuf import reflection as _reflection
21 from google.protobuf import symbol_database as _symbol_database
24 # ##protoc_insertion_point(imports)
File /usr/lib/python3/dist-packages/google/protobuf/reflection.py:58
56 from google.protobuf.pyext import cpp_message as message_impl
57 else:
---> 58 from google.protobuf.internal import python_message as message_impl
60 # The type of all Message classes.
61 # Part of the public interface, but normally only used by message factories.
62 GeneratedProtocolMessageType = message_impl.GeneratedProtocolMessageType
File /usr/lib/python3/dist-packages/google/protobuf/internal/python_message.py:69
66 import copyreg as copyreg
68 # We use "as" to avoid name collisions with variables.
---> 69 from google.protobuf.internal import containers
70 from google.protobuf.internal import decoder
71 from google.protobuf.internal import encoder
File /usr/lib/python3/dist-packages/google/protobuf/internal/containers.py:182
177 collections.MutableMapping.register(MutableMapping)
179 else:
180 # In Python 3 we can just use MutableMapping directly, because it defines
181 # __slots__.
--> 182 MutableMapping = collections.MutableMapping
185 class BaseContainer(object):
187 """Base container class."""
AttributeError: module 'collections' has no attribute 'MutableMapping'
I tried several solutions (for example, this and this), but none seem to work.
According to this link, I should change collections.Mapping into collections.abc.Mapping, but I wouldn't knwo where to do it.
Another possible solution is downgrading Python to 3.9, but I would like to keep it as last resort.
How can I fix this?
Turned out it was a problem related to protobuf module. I updated it to the latest version to date (which is 4.21.9).
This changed the error to:
TypeError: Descriptors cannot not be created directly.
If this call came from a _pb2.py file, your generated code is out of date and must be regenerated with protoc >= 3.19.0.
If you cannot immediately regenerate your protos, some other possible workarounds are:
1. Downgrade the protobuf package to 3.20.x or lower.
2. Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python (but this will use pure-Python parsing and will be much slower).
More information: https://developers.google.com/protocol-buffers/docs/news/2022-05-06#python-updates
So I downgraded protobuf to version 3.20.0 and that worked.
For further details, look here.

Error using tfds.load on Tensorflow Dataset

I was wondering if tensorflow 2.2 dataset has an issue on Windows release.
Here is my diagnostic code
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
print("Version: ", tf.__version__)
print("Eager mode: ", tf.executing_eagerly())
print("Hub version: ", hub.__version__)
print("GPU is", "available" if tf.config.experimental.list_physical_devices("GPU") else "NOT AVAILABLE")
Version: 2.2.0
Eager mode: True
Hub version: 0.8.0
GPU is available
I can load the list of datasets
tfds.list_builders()
['abstract_reasoning',
'aeslc',
'aflw2k3d',
'amazon_us_reviews',
'anli',
.
.
.
'xnli',
'xsum',
'yelp_polarity_reviews']
However, I am unable to load any dataset
imdb, info = tfds.load('imdb_reviews', with_info=True, as_supervised=True)
I receive the following errors
---------------------------------------------------------------------------
UnimplementedError Traceback (most recent call last)
c:\python37\lib\site-packages\tensorflow_datasets\core\utils\py_utils.py in try_reraise(*args, **kwargs)
398 try:
--> 399 yield
400 except Exception: # pylint: disable=broad-except
c:\python37\lib\site-packages\tensorflow_datasets\core\registered.py in builder(name, **builder_init_kwargs)
243 prefix="Failed to construct dataset {}".format(name)):
--> 244 return builder_cls(name)(**builder_kwargs)
245
c:\python37\lib\site-packages\wrapt\wrappers.py in __call__(self, *args, **kwargs)
602 return self._self_wrapper(self.__wrapped__, self._self_instance,
--> 603 args, kwargs)
604
c:\python37\lib\site-packages\tensorflow_datasets\core\api_utils.py in disallow_positional_args_dec(fn, instance, args, kwargs)
68 _check_required(fn, kwargs)
---> 69 return fn(*args, **kwargs)
70
c:\python37\lib\site-packages\tensorflow_datasets\core\dataset_builder.py in __init__(self, data_dir, config, version)
205 else: # Use the code version (do not restore data)
--> 206 self.info.initialize_from_bucket()
207
c:\python37\lib\site-packages\tensorflow_datasets\core\dataset_info.py in initialize_from_bucket(self)
422 tmp_dir = tempfile.mkdtemp("tfds")
--> 423 data_files = gcs_utils.gcs_dataset_info_files(self.full_name)
424 if not data_files:
c:\python37\lib\site-packages\tensorflow_datasets\core\utils\gcs_utils.py in gcs_dataset_info_files(dataset_dir)
69 """Return paths to GCS files in the given dataset directory."""
---> 70 return gcs_listdir(posixpath.join(GCS_DATASET_INFO_DIR, dataset_dir))
71
c:\python37\lib\site-packages\tensorflow_datasets\core\utils\gcs_utils.py in gcs_listdir(dir_name)
62 root_dir = gcs_path(dir_name)
---> 63 if _is_gcs_disabled or not tf.io.gfile.exists(root_dir):
64 return None
c:\python37\lib\site-packages\tensorflow\python\lib\io\file_io.py in file_exists_v2(path)
266 try:
--> 267 _pywrap_file_io.FileExists(compat.as_bytes(path))
268 except errors.NotFoundError:
UnimplementedError: File system scheme 'gs' not implemented (file: 'gs://tfds-data/dataset_info/imdb_reviews/plain_text/1.0.0')
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
<ipython-input-36-06930b64f980> in <module>
1 #tfds.list_builders()
----> 2 imdb, info = tfds.load('imdb_reviews', with_info=True, as_supervised=True)
c:\python37\lib\site-packages\wrapt\wrappers.py in __call__(self, *args, **kwargs)
562
563 return self._self_wrapper(self.__wrapped__, self._self_instance,
--> 564 args, kwargs)
565
566 class BoundFunctionWrapper(_FunctionWrapperBase):
c:\python37\lib\site-packages\tensorflow_datasets\core\api_utils.py in disallow_positional_args_dec(fn, instance, args, kwargs)
67 _check_no_positional(fn, args, ismethod, allowed=allowed)
68 _check_required(fn, kwargs)
---> 69 return fn(*args, **kwargs)
70
71 return disallow_positional_args_dec(wrapped) # pylint: disable=no-value-for-parameter
c:\python37\lib\site-packages\tensorflow_datasets\core\registered.py in load(name, split, data_dir, batch_size, shuffle_files, download, as_supervised, decoders, read_config, with_info, builder_kwargs, download_and_prepare_kwargs, as_dataset_kwargs, try_gcs)
366 data_dir = constants.DATA_DIR
367
--> 368 dbuilder = builder(name, data_dir=data_dir, **builder_kwargs)
369 if download:
370 download_and_prepare_kwargs = download_and_prepare_kwargs or {}
c:\python37\lib\site-packages\tensorflow_datasets\core\registered.py in builder(name, **builder_init_kwargs)
242 with py_utils.try_reraise(
243 prefix="Failed to construct dataset {}".format(name)):
--> 244 return builder_cls(name)(**builder_kwargs)
245
246
c:\python37\lib\contextlib.py in __exit__(self, type, value, traceback)
128 value = type()
129 try:
--> 130 self.gen.throw(type, value, traceback)
131 except StopIteration as exc:
132 # Suppress StopIteration *unless* it's the same exception that
c:\python37\lib\site-packages\tensorflow_datasets\core\utils\py_utils.py in try_reraise(*args, **kwargs)
399 yield
400 except Exception: # pylint: disable=broad-except
--> 401 reraise(*args, **kwargs)
402
403
c:\python37\lib\site-packages\tensorflow_datasets\core\utils\py_utils.py in reraise(prefix, suffix)
390 suffix = '\n' + suffix if suffix else ''
391 msg = prefix + str(exc_value) + suffix
--> 392 six.reraise(exc_type, exc_type(msg), exc_traceback)
393
394
TypeError: __init__() missing 2 required positional arguments: 'op' and 'message'
Is the library broken? As mentioned, I am on Windows 10 machine and using Jupyter Lab.
After I reported the issue on GitHub, the problem was fixed in version 3.2.1.

folium heatmap with `Object of type 'int64' is not JSON serializable` error

I got an error as "Object of type 'int64' is not JSON serializable" when I tried to generate heatmap from folium.
I am running my jupyter notebook in anaconda using python 3.6, and the version of folium is '0.9.1'.
df_2y_cons_LatLo.dtypes: Latitude float64;
Longitude float64;
Descriptor int64.
def generateBaseMap(default_location=[40.704652, -73.923688], default_zoom_start=11):
base_map = folium.Map(location=default_location, control_scale=True, zoom_start=default_zoom_start)
return base_map
base_map = generateBaseMap()
hm = HeatMap(list(zip(df_2y_cons_LatLo.Latitude.values,df_2y_cons_LatLo.Longitude.values,df_2y_cons_LatLo.Descriptor.values)))
base_map.add_child(hm)
I expected to have a heatmap showing. But, after I run the above code, it gave me the error mentioned above.
TypeError Traceback (most recent call last)
~\Anaconda3\lib\site-packages\IPython\core\formatters.py in __call__(self, obj)
343 method = get_real_method(obj, self.print_method)
344 if method is not None:
--> 345 return method()
346 return None
347 else:
~\Anaconda3\lib\site-packages\folium\folium.py in _repr_html_(self, **kwargs)
291 self._parent = None
292 else:
--> 293 out = self._parent._repr_html_(**kwargs)
294 return out
295
~\Anaconda3\lib\site-packages\branca\element.py in _repr_html_(self, **kwargs)
326
327 """
--> 328 html = self.render(**kwargs)
329 html = "data:text/html;charset=utf-8;base64," + base64.b64encode(html.encode('utf8')).decode('utf8') # noqa
330
~\Anaconda3\lib\site-packages\branca\element.py in render(self, **kwargs)
319 """Renders the HTML representation of the element."""
320 for name, child in self._children.items():
--> 321 child.render(**kwargs)
322 return self._template.render(this=self, kwargs=kwargs)
323
~\Anaconda3\lib\site-packages\folium\folium.py in render(self, **kwargs)
368 '</style>'), name='map_style')
369
--> 370 super(Map, self).render(**kwargs)
371
372 def fit_bounds(self, bounds, padding_top_left=None,
~\Anaconda3\lib\site-packages\branca\element.py in render(self, **kwargs)
631
632 for name, element in self._children.items():
--> 633 element.render(**kwargs)
~\Anaconda3\lib\site-packages\folium\plugins\heat_map.py in render(self, **kwargs)
79
80 def render(self, **kwargs):
---> 81 super(HeatMap, self).render(**kwargs)
82
83 figure = self.get_root()
~\Anaconda3\lib\site-packages\branca\element.py in render(self, **kwargs)
627 script = self._template.module.__dict__.get('script', None)
628 if script is not None:
--> 629 figure.script.add_child(Element(script(self, kwargs)),
630 name=self.get_name())
631
~\Anaconda3\lib\site-packages\jinja2\runtime.py in __call__(self, *args, **kwargs)
573 (self.name, len(self.arguments)))
574
--> 575 return self._invoke(arguments, autoescape)
576
577 def _invoke(self, arguments, autoescape):
~\Anaconda3\lib\site-packages\jinja2\asyncsupport.py in _invoke(self, arguments, autoescape)
108 def _invoke(self, arguments, autoescape):
109 if not self._environment.is_async:
--> 110 return original_invoke(self, arguments, autoescape)
111 return async_invoke(self, arguments, autoescape)
112 return update_wrapper(_invoke, original_invoke)
~\Anaconda3\lib\site-packages\jinja2\runtime.py in _invoke(self, arguments, autoescape)
577 def _invoke(self, arguments, autoescape):
578 """This method is being swapped out by the async implementation."""
--> 579 rv = self._func(*arguments)
580 if autoescape:
581 rv = Markup(rv)
<template> in macro(l_1_this, l_1_kwargs)
~\Anaconda3\lib\site-packages\jinja2\filters.py in do_tojson(eval_ctx, value, indent)
1076 options = dict(options)
1077 options['indent'] = indent
-> 1078 return htmlsafe_json_dumps(value, dumper=dumper, **options)
1079
1080
~\Anaconda3\lib\site-packages\jinja2\utils.py in htmlsafe_json_dumps(obj, dumper, **kwargs)
563 if dumper is None:
564 dumper = json.dumps
--> 565 rv = dumper(obj, **kwargs) \
566 .replace(u'<', u'\\u003c') \
567 .replace(u'>', u'\\u003e') \
~\Anaconda3\lib\json\__init__.py in dumps(obj, skipkeys, ensure_ascii, check_circular, allow_nan, cls, indent, separators, default, sort_keys, **kw)
236 check_circular=check_circular, allow_nan=allow_nan, indent=indent,
237 separators=separators, default=default, sort_keys=sort_keys,
--> 238 **kw).encode(obj)
239
240
~\Anaconda3\lib\json\encoder.py in encode(self, o)
197 # exceptions aren't as detailed. The list call should be roughly
198 # equivalent to the PySequence_Fast that ''.join() would do.
--> 199 chunks = self.iterencode(o, _one_shot=True)
200 if not isinstance(chunks, (list, tuple)):
201 chunks = list(chunks)
~\Anaconda3\lib\json\encoder.py in iterencode(self, o, _one_shot)
255 self.key_separator, self.item_separator, self.sort_keys,
256 self.skipkeys, _one_shot)
--> 257 return _iterencode(o, 0)
258
259 def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
~\Anaconda3\lib\json\encoder.py in default(self, o)
178 """
179 raise TypeError("Object of type '%s' is not JSON serializable" %
--> 180 o.__class__.__name__)
181
182 def encode(self, o):
TypeError: Object of type 'int64' is not JSON serializable

pyspark ibmdpy giving ValueError: ('ordinal must be >= 1

I am loading data from netezza into a dataframe and then trying to write to dashdb. I am using ibmdpy to try to load the data into dashdb on bluemix. Ibmdpy requires a pandas dataframe so I convert the spark dataframe to pandas to load into dashdb.
all_disputes_df = sqlContext.read.format('jdbc').options(url='jdbc:netezza://pda1-wall.pok.ibm.com:5480/BACC_PRD_ISCNZ_GAPNZ', user=user, password=password, dbtable='METRICS.AR_EM_D2_02_AGG', driver='org.netezza.Driver').load()
from ibmdbpy import IdaDataBase
idadb = IdaDataBase(dsn='BLUDB', uid='dash107474', pwd='k5TY24AbzFjE')
print("current_schema is %s" % idadb.current_schema)
print("tables %s" % idadb.show_tables())
idadb.as_idadataframe(all_disputes_df.toPandas(), "all_disputes")
I am getting the following traceback.
ValueError Traceback (most recent call last)
<ipython-input-4-63dde713c67b> in <module>()
----> 1 idadb.as_idadataframe(all_disputes_df.toPandas(), "all_disputes")
/home/brente/spark/spark-1.6.1-bin-hadoop2.6/python/pyspark/sql/dataframe.pyc in toPandas(self)
1379 """
1380 import pandas as pd
-> 1381 return pd.DataFrame.from_records(self.collect(), columns=self.columns)
1382
1383 ##########################################################################################
/home/brente/spark/spark-1.6.1-bin-hadoop2.6/python/pyspark/sql/dataframe.pyc in collect(self)
279 with SCCallSiteSync(self._sc) as css:
280 port = self._jdf.collectToPython()
--> 281 return list(_load_from_socket(port, BatchedSerializer(PickleSerializer())))
282
283 #ignore_unicode_prefix
/home/brente/spark/spark-1.6.1-bin-hadoop2.6/python/pyspark/rdd.pyc in _load_from_socket(port, serializer)
140 try:
141 rf = sock.makefile("rb", 65536)
--> 142 for item in serializer.load_stream(rf):
143 yield item
144 finally:
/home/brente/spark/spark-1.6.1-bin-hadoop2.6/python/pyspark/serializers.pyc in load_stream(self, stream)
137 while True:
138 try:
--> 139 yield self._read_with_length(stream)
140 except EOFError:
141 return
/home/brente/spark/spark-1.6.1-bin-hadoop2.6/python/pyspark/serializers.pyc in _read_with_length(self, stream)
162 if len(obj) < length:
163 raise EOFError
--> 164 return self.loads(obj)
165
166 def dumps(self, obj):
/home/brente/spark/spark-1.6.1-bin-hadoop2.6/python/pyspark/serializers.pyc in loads(self, obj, encoding)
420 else:
421 def loads(self, obj, encoding=None):
--> 422 return pickle.loads(obj)
423
424
/home/brente/spark/spark-1.6.1-bin-hadoop2.6/python/pyspark/sql/types.pyc in <lambda>(*a)
1157 # This is used to unpickle a Row from JVM
1158 def _create_row_inbound_converter(dataType):
-> 1159 return lambda *a: dataType.fromInternal(a)
1160
1161
/home/brente/spark/spark-1.6.1-bin-hadoop2.6/python/pyspark/sql/types.pyc in fromInternal(self, obj)
563 return obj
564 if self._needSerializeAnyField:
--> 565 values = [f.fromInternal(v) for f, v in zip(self.fields, obj)]
566 else:
567 values = obj
/home/brente/spark/spark-1.6.1-bin-hadoop2.6/python/pyspark/sql/types.pyc in fromInternal(self, obj)
436
437 def fromInternal(self, obj):
--> 438 return self.dataType.fromInternal(obj)
439
440
/home/brente/spark/spark-1.6.1-bin-hadoop2.6/python/pyspark/sql/types.pyc in fromInternal(self, v)
174 def fromInternal(self, v):
175 if v is not None:
--> 176 return datetime.date.fromordinal(v + self.EPOCH_ORDINAL)
177
178
ValueError: ('ordinal must be >= 1', <function <lambda> at 0x7f97c0be76e0>, (u'788', u'10', u'00620000 ', u'0129101548 ', 1, u'000028628 ', 16520, Decimal('2124.76'), Decimal('2124.76'), 16525, 16525, u'000611099
Any ideas on what the problem is?
Reading your data from Netezza into dataframes fails. Everything beyond that is speculation from my side:
Could there be invalid data stored in Netezza, that throws off the deserialization into dataframes?
Maybe try some other queries, to make sure that there is no connectivity problem, no typo in the database name, things like that.

Resources