'Line2D' object has no property 'ylabel' error with pd.plot() - python-3.x

I am trying to plot using df.plot from the pandas plotting library, and was using the following code:
df_mean.plot(kind='line', subplots=True, layout=(1,8), figsize=(40,8),
sharey=True, ylabel = "Percent Change", title="Average movement")
I thought it might have something to do with using np.transpose() since it would convert it into a numpy array, but after conversion back to a pd.DataFrame(), the error still persists.
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-269-85f6c80ca026> in <module>
1 df_mean = pd.DataFrame(df_mean)
2
----> 3 df_mean.plot(kind='line', subplots=True, layout=(1,8), figsize=(40,8),
4 title="Average movement",
5 sharey=True, ylabel = "Percent Change")
~\anaconda3\lib\site-packages\pandas\plotting\_core.py in __call__(self, *args,
**kwargs)
845 keyword_args = ", ".join(
846 f"{name}={repr(value)}" for (name, default), value in
zip(arg_def, args)
--> 847 )
848 msg = (
849 "`Series.plot()` should not be called with positional "
~\anaconda3\lib\site-packages\pandas\plotting\_matplotlib\__init__.py in plot(data,
kind, **kwargs)
59 kwargs["ax"] = getattr(ax, "left_ax", ax)
60 plot_obj = PLOT_CLASSES[kind](data, **kwargs)
---> 61 plot_obj.generate()
62 plot_obj.draw()
63 return plot_obj.result
~\anaconda3\lib\site-packages\pandas\plotting\_matplotlib\core.py in generate(self)
261 else:
262 return self.data.shape[1]
--> 263
264 def draw(self):
265 self.plt.draw_if_interactive()
~\anaconda3\lib\site-packages\pandas\plotting\_matplotlib\core.py in
_make_plot(self)
1075 self.data = self.data.fillna(value=0)
1076 self.x_compat = plot_params["x_compat"]
-> 1077 if "x_compat" in self.kwds:
1078 self.x_compat = bool(self.kwds.pop("x_compat"))
1079
~\anaconda3\lib\site-packages\pandas\plotting\_matplotlib\core.py in _plot(cls, ax,
x, y, style, column_num, stacking_id, **kwds)
1102
1103 stacking_id = self._get_stacking_id()
-> 1104 is_errorbar = com.any_not_none(*self.errors.values())
1105
1106 colors = self._get_colors()
~\anaconda3\lib\site-packages\pandas\plotting\_matplotlib\converter.py in
wrapper(*args, **kwargs)
64 with pandas_converters():
65 return func(*args, **kwargs)
---> 66
67 return wrapper
68
~\anaconda3\lib\site-packages\pandas\plotting\_matplotlib\core.py in _plot(cls, ax,
x, y, style, is_errorbar, **kwds)
654
655 if is_errorbar:
--> 656 if "xerr" in kwds:
657 kwds["xerr"] = np.array(kwds.get("xerr"))
658 if "yerr" in kwds:
~\anaconda3\lib\site-packages\matplotlib\axes\_axes.py in plot(self, scalex,
scaley, data, *args, **kwargs)
1741
1742 kwargs = cbook.normalize_kwargs(kwargs, mlines.Line2D)
-> 1743 lines = [*self._get_lines(*args, data=data, **kwargs)]
1744 for line in lines:
1745 self.add_line(line)
~\anaconda3\lib\site-packages\matplotlib\axes\_base.py in __call__(self, data,
*args, **kwargs)
271 this += args[0],
272 args = args[1:]
--> 273 yield from self._plot_args(this, kwargs)
274
275 def get_next_color(self):
~\anaconda3\lib\site-packages\matplotlib\axes\_base.py in _plot_args(self, tup,
kwargs)
416 if ncx > 1 and ncy > 1 and ncx != ncy:
417 raise ValueError(f"x has {ncx} columns but y has {ncy}
columns")
--> 418 return [func(x[:, j % ncx], y[:, j % ncy], kw, kwargs)
419 for j in range(max(ncx, ncy))]
420
~\anaconda3\lib\site-packages\matplotlib\axes\_base.py in <listcomp>(.0)
416 if ncx > 1 and ncy > 1 and ncx != ncy:
417 raise ValueError(f"x has {ncx} columns but y has {ncy}
columns")
--> 418 return [func(x[:, j % ncx], y[:, j % ncy], kw, kwargs)
419 for j in range(max(ncx, ncy))]
420
~\anaconda3\lib\site-packages\matplotlib\axes\_base.py in _makeline(self, x, y,
kw, kwargs)
310 default_dict = self._getdefaults(set(), kw)
311 self._setdefaults(default_dict, kw)
--> 312 seg = mlines.Line2D(x, y, **kw)
313 return seg
314
~\anaconda3\lib\site-packages\matplotlib\lines.py in __init__(self, xdata, ydata,
linewidth, linestyle, color, marker, markersize, markeredgewidth, markeredgecolor,
markerfacecolor, markerfacecoloralt, fillstyle, antialiased, dash_capstyle,
solid_capstyle, dash_joinstyle, solid_joinstyle, pickradius, drawstyle, markevery,
**kwargs)
388 # update kwargs before updating data to give the caller a
389 # chance to init axes (and hence unit support)
--> 390 self.update(kwargs)
391 self.pickradius = pickradius
392 self.ind_offset = 0
~\anaconda3\lib\site-packages\matplotlib\artist.py in update(self, props)
994 func = getattr(self, f"set_{k}", None)
995 if not callable(func):
--> 996 raise AttributeError(f"{type(self).__name__!r}
object "
997 f"has no property {k!r}")
998 ret.append(func(v))
AttributeError: 'Line2D' object has no property 'ylabel'
I was able to run this code fine on my mac, but when I transferred it over to my desktop, I get this error, and I'm not sure why. I thought it could be a version problem, but I updated pandas and it didn't fix anything.
Anybody have an idea what could be causing something like this?

You can try this trick:
If the ylabel parameter is the problem, remove it and set it directly to ax.
ax = df_mean.plot(kind='line', subplots=True, layout=(1,8), figsize=(40,8),
sharey=True, title="Average movement")
ax.set_ylabel('Percent Change')
plt.show()

Related

Colab IndexError: Target 255 is out of bounds

I'm trying to perform an image semantic segmentation (segment mining fields) using lightning-flash. My images are all RGB/uint8/512x512 and the masks are L/uint8/512x512.
When I run the code, I get an error when fitting.
My code is this one:
import torch
import flash
from flash.image import SemanticSegmentation, SemanticSegmentationData
import os
from google.colab import drive
import ssl
drive.mount("/content/drive")
DATA_DIR = '/content/drive/MyDrive/data/'
x_train_dir = os.path.join(DATA_DIR, 'train_images')
y_train_dir = os.path.join(DATA_DIR, 'train_masks')
x_valid_dir = os.path.join(DATA_DIR, 'val_images')
y_valid_dir = os.path.join(DATA_DIR, 'val_masks')
x_test_dir = os.path.join(DATA_DIR, 'test_images')
y_test_dir = os.path.join(DATA_DIR, 'test_masks')
datamodule = SemanticSegmentationData.from_folders(
train_folder=x_train_dir,
train_target_folder=y_train_dir,
val_folder=x_valid_dir,
val_target_folder=y_valid_dir,
test_folder=x_test_dir,
test_target_folder=y_test_dir,
transform_kwargs=dict(image_size=(256, 256)),
num_classes=1,
batch_size=16,
)
#avoid ssl error
ssl._create_default_https_context = ssl._create_unverified_context
model = SemanticSegmentation(
head="unetplusplus",
backbone="densenet169",
pretrained="imagenet",
num_classes=datamodule.num_classes
)
GPUS = torch.cuda.device_count()
if GPUS > 0:
trainer = flash.Trainer(max_epochs=2, gpus=torch.cuda.device_count())
else:
trainer = flash.Trainer(max_epochs=2)
trainer.finetune(model, datamodule=datamodule, strategy="freeze")
trainer.save_checkpoint("semantic_segmentation_model.pt")
When I run the code, I get this error:
IndexError Traceback (most recent call last)
<ipython-input-7-11e2ce087ca0> in <module>
6
7 #trainer.fit(model, datamodule=datamodule)
----> 8 trainer.finetune(model, datamodule=datamodule, strategy="freeze")
9 trainer.save_checkpoint("semantic_segmentation_model.pt")
19 frames
/usr/local/lib/python3.7/dist-packages/flash/core/trainer.py in finetune(self, model, train_dataloader, val_dataloaders, datamodule, strategy, train_bn)
162 """
163 self._resolve_callbacks(model, strategy, train_bn=train_bn)
--> 164 return super().fit(model, train_dataloader, val_dataloaders, datamodule)
165
166 def predict(
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
695 self.strategy.model = model
696 self._call_and_handle_interrupt(
--> 697 self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path
698 )
699
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _call_and_handle_interrupt(self, trainer_fn, *args, **kwargs)
648 return self.strategy.launcher.launch(trainer_fn, *args, trainer=self, **kwargs)
649 else:
--> 650 return trainer_fn(*args, **kwargs)
651 # TODO(awaelchli): Unify both exceptions below, where `KeyboardError` doesn't re-raise
652 except KeyboardInterrupt as exception:
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _fit_impl(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
735 ckpt_path, model_provided=True, model_connected=self.lightning_module is not None
736 )
--> 737 results = self._run(model, ckpt_path=self.ckpt_path)
738
739 assert self.state.stopped
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _run(self, model, ckpt_path)
1166 self._checkpoint_connector.resume_end()
1167
-> 1168 results = self._run_stage()
1169
1170 log.detail(f"{self.__class__.__name__}: trainer tearing down")
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _run_stage(self)
1252 if self.predicting:
1253 return self._run_predict()
-> 1254 return self._run_train()
1255
1256 def _pre_training_routine(self):
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _run_train(self)
1274
1275 with isolate_rng():
-> 1276 self._run_sanity_check()
1277
1278 # enable train mode
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _run_sanity_check(self)
1343 # run eval step
1344 with torch.no_grad():
-> 1345 val_loop.run()
1346
1347 self._call_callback_hooks("on_sanity_check_end")
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/loop.py in run(self, *args, **kwargs)
198 try:
199 self.on_advance_start(*args, **kwargs)
--> 200 self.advance(*args, **kwargs)
201 self.on_advance_end()
202 self._restarting = False
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/dataloader/evaluation_loop.py in advance(self, *args, **kwargs)
153 if self.num_dataloaders > 1:
154 kwargs["dataloader_idx"] = dataloader_idx
--> 155 dl_outputs = self.epoch_loop.run(self._data_fetcher, dl_max_batches, kwargs)
156
157 # store batch level output per dataloader
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/loop.py in run(self, *args, **kwargs)
198 try:
199 self.on_advance_start(*args, **kwargs)
--> 200 self.advance(*args, **kwargs)
201 self.on_advance_end()
202 self._restarting = False
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/epoch/evaluation_epoch_loop.py in advance(self, data_fetcher, dl_max_batches, kwargs)
141
142 # lightning module methods
--> 143 output = self._evaluation_step(**kwargs)
144 output = self._evaluation_step_end(output)
145
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/epoch/evaluation_epoch_loop.py in _evaluation_step(self, **kwargs)
238 """
239 hook_name = "test_step" if self.trainer.testing else "validation_step"
--> 240 output = self.trainer._call_strategy_hook(hook_name, *kwargs.values())
241
242 return output
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _call_strategy_hook(self, hook_name, *args, **kwargs)
1704
1705 with self.profiler.profile(f"[Strategy]{self.strategy.__class__.__name__}.{hook_name}"):
-> 1706 output = fn(*args, **kwargs)
1707
1708 # restore current_fx when nested context
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/strategies/strategy.py in validation_step(self, *args, **kwargs)
368 with self.precision_plugin.val_step_context():
369 assert isinstance(self.model, ValidationStep)
--> 370 return self.model.validation_step(*args, **kwargs)
371
372 def test_step(self, *args: Any, **kwargs: Any) -> Optional[STEP_OUTPUT]:
/usr/local/lib/python3.7/dist-packages/flash/image/segmentation/model.py in validation_step(self, batch, batch_idx)
151 def validation_step(self, batch: Any, batch_idx: int) -> Any:
152 batch = (batch[DataKeys.INPUT], batch[DataKeys.TARGET])
--> 153 return super().validation_step(batch, batch_idx)
154
155 def test_step(self, batch: Any, batch_idx: int) -> Any:
/usr/local/lib/python3.7/dist-packages/flash/core/model.py in validation_step(self, batch, batch_idx)
423
424 def validation_step(self, batch: Any, batch_idx: int) -> None:
--> 425 output = self.step(batch, batch_idx, self.val_metrics)
426 log_kwargs = {"batch_size": output.get(OutputKeys.BATCH_SIZE, None)} if _PL_GREATER_EQUAL_1_5_0 else {}
427 self.log_dict(
/usr/local/lib/python3.7/dist-packages/flash/core/model.py in step(self, batch, batch_idx, metrics)
360 output = {OutputKeys.OUTPUT: y_hat}
361 y_hat = self.to_loss_format(output[OutputKeys.OUTPUT])
--> 362 losses = {name: l_fn(y_hat, y) for name, l_fn in self.loss_fn.items()}
363
364 y_hat = self.to_metrics_format(output[OutputKeys.OUTPUT])
/usr/local/lib/python3.7/dist-packages/flash/core/model.py in <dictcomp>(.0)
360 output = {OutputKeys.OUTPUT: y_hat}
361 y_hat = self.to_loss_format(output[OutputKeys.OUTPUT])
--> 362 losses = {name: l_fn(y_hat, y) for name, l_fn in self.loss_fn.items()}
363
364 y_hat = self.to_metrics_format(output[OutputKeys.OUTPUT])
/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction, label_smoothing)
3012 if size_average is not None or reduce is not None:
3013 reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 3014 return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing)
3015
3016
IndexError: Target 255 is out of bounds.
How can I solve this problem? I researched others issues on stackoverflow and they were all related to the number of classes. But in my case, I only want to segment mining fields.

AttributeError: /home/hp/anaconda3/lib/libxgboost.so: undefined symbol: XGDMatrixSetDenseInfo

I have installed XGBoost with pip3.
While trying to run this line:
clf = GridSearchCV(estimator=xgb.XGBClassifier(use_label_encoder =False), param_grid=params, scoring = 'accuracy', cv=20).fit(data_train, label_train)
I get this error :
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-90-718a4a11a2ba> in <module>
4 startTime = time.time()
5
----> 6 clf = GridSearchCV(estimator=xgb.XGBClassifier(use_label_encoder =False), param_grid=params, scoring = 'accuracy', cv=20).fit(data_train, label_train)
7 print(clf.best_estimator_)
8 print('--------------------------------------------------')
~/anaconda3/lib/python3.8/site-packages/sklearn/utils/validation.py in inner_f(*args, **kwargs)
61 extra_args = len(args) - len(all_args)
62 if extra_args <= 0:
---> 63 return f(*args, **kwargs)
64
65 # extra_args > 0
~/anaconda3/lib/python3.8/site-packages/sklearn/model_selection/_search.py in fit(self, X, y, groups, **fit_params)
878 refit_start_time = time.time()
879 if y is not None:
--> 880 self.best_estimator_.fit(X, y, **fit_params)
881 else:
882 self.best_estimator_.fit(X, **fit_params)
~/anaconda3/lib/python3.8/site-packages/xgboost/core.py in inner_f(*args, **kwargs)
420 for k, arg in zip(sig.parameters, args):
421 kwargs[k] = arg
--> 422 return f(**kwargs)
423
424 return inner_f
~/anaconda3/lib/python3.8/site-packages/xgboost/sklearn.py in fit(self, X, y, sample_weight, base_margin, eval_set, eval_metric, early_stopping_rounds, verbose, xgb_model, sample_weight_eval_set, feature_weights, callbacks)
901 self.n_features_in_ = self._features_count
902
--> 903 train_dmatrix, evals = self._wrap_evaluation_matrices(
904 X, y, group=None, sample_weight=sample_weight, base_margin=base_margin,
905 feature_weights=feature_weights,
~/anaconda3/lib/python3.8/site-packages/xgboost/sklearn.py in _wrap_evaluation_matrices(self, X, y, group, sample_weight, base_margin, feature_weights, eval_set, sample_weight_eval_set, eval_group, label_transform)
263
264 y = label_transform(y)
--> 265 train_dmatrix = DMatrix(data=X, label=y, weight=sample_weight,
266 base_margin=base_margin,
267 missing=self.missing, nthread=self.n_jobs)
~/anaconda3/lib/python3.8/site-packages/xgboost/core.py in __init__(self, data, label, weight, base_margin, missing, silent, feature_names, feature_types, nthread, enable_categorical)
507 self.handle = handle
508
--> 509 self.set_info(label=label, weight=weight, base_margin=base_margin)
510
511 self.feature_names = feature_names
~/anaconda3/lib/python3.8/site-packages/xgboost/core.py in inner_f(*args, **kwargs)
420 for k, arg in zip(sig.parameters, args):
421 kwargs[k] = arg
--> 422 return f(**kwargs)
423
424 return inner_f
~/anaconda3/lib/python3.8/site-packages/xgboost/core.py in set_info(self, label, weight, base_margin, group, label_lower_bound, label_upper_bound, feature_names, feature_types, feature_weights)
528 '''Set meta info for DMatrix.'''
529 if label is not None:
--> 530 self.set_label(label)
531 if weight is not None:
532 self.set_weight(weight)
~/anaconda3/lib/python3.8/site-packages/xgboost/core.py in set_label(self, label)
657 """
658 from .data import dispatch_meta_backend
--> 659 dispatch_meta_backend(self, label, 'label', 'float')
660
661 def set_weight(self, weight):
~/anaconda3/lib/python3.8/site-packages/xgboost/data.py in dispatch_meta_backend(matrix, data, name, dtype)
674 data = data.values.astype('float')
675 assert len(data.shape) == 1 or data.shape[1] == 0 or data.shape[1] == 1
--> 676 _meta_from_numpy(data, name, dtype, handle)
677 return
678 if _is_dlpack(data):
~/anaconda3/lib/python3.8/site-packages/xgboost/data.py in _meta_from_numpy(data, field, dtype, handle)
598 ptr = interface['data'][0]
599 ptr = ctypes.c_void_p(ptr)
--> 600 _check_call(_LIB.XGDMatrixSetDenseInfo(
601 handle,
602 c_str(field),
~/anaconda3/lib/python3.8/ctypes/__init__.py in __getattr__(self, name)
392 if name.startswith('__') and name.endswith('__'):
393 raise AttributeError(name)
--> 394 func = self.__getitem__(name)
395 setattr(self, name, func)
396 return func
~/anaconda3/lib/python3.8/ctypes/__init__.py in __getitem__(self, name_or_ordinal)
397
398 def __getitem__(self, name_or_ordinal):
--> 399 func = self._FuncPtr((name_or_ordinal, self))
400 if not isinstance(name_or_ordinal, int):
401 func.__name__ = name_or_ordinal
AttributeError: /home/hp/anaconda3/lib/libxgboost.so: undefined symbol: XGDMatrixSetDenseInfo

Contourf's number of color levels in Matplotlib

I'm trying to make a 3d contour plot using countourfcommand in Matplotlib.
Here's the variable distribution:
I'm trying to plot the contour over 3 sides of a cube:
plt.close('all')
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.pbaspect = [1.0, 1.0, 1.0]
levels = np.linspace(0,1,10)
cset = [[],[],[]]
n=0
for i in range(0,m-1):
for j in range(0,m-1):
znum[i,j]=DELTAcrit[n]
n+=1
cset[0] = ax.contourf(xnum, ynum, znum, zdir='z', offset=2*np.pi,
levels=levels,cmap='jet',antialiased=True)
for i in range(0,m-1):
for j in range(0,m-1):
znum[i,j]=DELTAcrit[n]
n+=1
cset[1] = ax.contourf(znum, xnum, ynum, zdir='x', offset=2*np.pi,
levels=levels,cmap='jet',antialiased=True)
for i in range(0,m-1):
for j in range(0,m-1):
znum[i,j]=DELTAcrit[n]
n+=1
cset[2] = ax.contourf(xnum, znum, ynum, zdir='y', offset=2*np.pi,
levels=levels,cmap='jet',antialiased=True)
# setting 3D-axis-limits:
ax.set_xlim3d(0,2*np.pi)
ax.set_ylim3d(0,2*np.pi)
ax.set_zlim3d(0,2*np.pi)
ax.set_xticks([0., .5*np.pi, np.pi, 1.5*np.pi, 2*np.pi])
ax.set_xticklabels(["$0$", r"$\frac{1}{2}\pi$",
r"$\pi$", r"$\frac{3}{2}\pi$", r"$2\pi$"])
ax.set_yticks([0., .5*np.pi, np.pi, 1.5*np.pi, 2*np.pi])
ax.set_yticklabels(["$0$", r"$\frac{1}{2}\pi$",
r"$\pi$", r"$\frac{3}{2}\pi$", r"$2\pi$"])
ax.set_zticks([0., .5*np.pi, np.pi, 1.5*np.pi, 2*np.pi])
ax.set_zticklabels(["$0$", r"$\frac{1}{2}\pi$",
r"$\pi$", r"$\frac{3}{2}\pi$", r"$2\pi$"])
ax.view_init(25, 45)
fig.subplots_adjust(bottom=-0.2,top=0.7)
cb = fig.colorbar(cset[0], shrink=0.7)
plt.show()
When I change the levels variable to a numpy.linspace with 3 or 5 levels it goes ok.
When I try like in the example with ten levels, like the one in the example I've just showed I got the following error:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
c:\users\rapha\pycharmprojects\vortexpy\venv\lib\site-packages\IPython\core\formatters.py in __call__(self, obj)
339 pass
340 else:
--> 341 return printer(obj)
342 # Finally look for special method names
343 method = get_real_method(obj, self.print_method)
c:\users\rapha\pycharmprojects\vortexpy\venv\lib\site-packages\IPython\core\pylabtools.py in <lambda>(fig)
246
247 if 'png' in formats:
--> 248 png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
249 if 'retina' in formats or 'png2x' in formats:
250 png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
c:\users\rapha\pycharmprojects\vortexpy\venv\lib\site-packages\IPython\core\pylabtools.py in print_figure(fig, fmt, bbox_inches, **kwargs)
130 FigureCanvasBase(fig)
131
--> 132 fig.canvas.print_figure(bytes_io, **kw)
133 data = bytes_io.getvalue()
134 if fmt == 'svg':
c:\users\rapha\pycharmprojects\vortexpy\venv\lib\site-packages\matplotlib\backend_bases.py in print_figure(self, filename, dpi, facecolor, edgecolor, orientation, format, bbox_inches, pad_inches, bbox_extra_artists, backend, **kwargs)
2191 else suppress())
2192 with ctx:
-> 2193 self.figure.draw(renderer)
2194
2195 bbox_inches = self.figure.get_tightbbox(
c:\users\rapha\pycharmprojects\vortexpy\venv\lib\site-packages\matplotlib\artist.py in draw_wrapper(artist, renderer, *args, **kwargs)
39 renderer.start_filter()
40
---> 41 return draw(artist, renderer, *args, **kwargs)
42 finally:
43 if artist.get_agg_filter() is not None:
c:\users\rapha\pycharmprojects\vortexpy\venv\lib\site-packages\matplotlib\figure.py in draw(self, renderer)
1861
1862 self.patch.draw(renderer)
-> 1863 mimage._draw_list_compositing_images(
1864 renderer, self, artists, self.suppressComposite)
1865
c:\users\rapha\pycharmprojects\vortexpy\venv\lib\site-packages\matplotlib\image.py in _draw_list_compositing_images(renderer, parent, artists, suppress_composite)
129 if not_composite or not has_images:
130 for a in artists:
--> 131 a.draw(renderer)
132 else:
133 # Composite any adjacent images together
c:\users\rapha\pycharmprojects\vortexpy\venv\lib\site-packages\matplotlib\artist.py in draw_wrapper(artist, renderer, *args, **kwargs)
39 renderer.start_filter()
40
---> 41 return draw(artist, renderer, *args, **kwargs)
42 finally:
43 if artist.get_agg_filter() is not None:
c:\users\rapha\pycharmprojects\vortexpy\venv\lib\site-packages\mpl_toolkits\mplot3d\axes3d.py in draw(self, renderer)
443 for axis in self._get_axis_list()) + 1
444 for i, col in enumerate(
--> 445 sorted(self.collections,
446 key=lambda col: col.do_3d_projection(renderer),
447 reverse=True)):
c:\users\rapha\pycharmprojects\vortexpy\venv\lib\site-packages\mpl_toolkits\mplot3d\axes3d.py in <lambda>(col)
444 for i, col in enumerate(
445 sorted(self.collections,
--> 446 key=lambda col: col.do_3d_projection(renderer),
447 reverse=True)):
448 col.zorder = zorder_offset + i
c:\users\rapha\pycharmprojects\vortexpy\venv\lib\site-packages\mpl_toolkits\mplot3d\art3d.py in do_3d_projection(self, renderer)
766 key=lambda x: x[0], reverse=True)
767
--> 768 zzs, segments_2d, self._facecolors2d, self._edgecolors2d, idxs = \
769 zip(*z_segments_2d)
770
ValueError: not enough values to unpack (expected 5, got 0)
Thanks for the help!

folium heatmap with `Object of type 'int64' is not JSON serializable` error

I got an error as "Object of type 'int64' is not JSON serializable" when I tried to generate heatmap from folium.
I am running my jupyter notebook in anaconda using python 3.6, and the version of folium is '0.9.1'.
df_2y_cons_LatLo.dtypes: Latitude float64;
Longitude float64;
Descriptor int64.
def generateBaseMap(default_location=[40.704652, -73.923688], default_zoom_start=11):
base_map = folium.Map(location=default_location, control_scale=True, zoom_start=default_zoom_start)
return base_map
base_map = generateBaseMap()
hm = HeatMap(list(zip(df_2y_cons_LatLo.Latitude.values,df_2y_cons_LatLo.Longitude.values,df_2y_cons_LatLo.Descriptor.values)))
base_map.add_child(hm)
I expected to have a heatmap showing. But, after I run the above code, it gave me the error mentioned above.
TypeError Traceback (most recent call last)
~\Anaconda3\lib\site-packages\IPython\core\formatters.py in __call__(self, obj)
343 method = get_real_method(obj, self.print_method)
344 if method is not None:
--> 345 return method()
346 return None
347 else:
~\Anaconda3\lib\site-packages\folium\folium.py in _repr_html_(self, **kwargs)
291 self._parent = None
292 else:
--> 293 out = self._parent._repr_html_(**kwargs)
294 return out
295
~\Anaconda3\lib\site-packages\branca\element.py in _repr_html_(self, **kwargs)
326
327 """
--> 328 html = self.render(**kwargs)
329 html = "data:text/html;charset=utf-8;base64," + base64.b64encode(html.encode('utf8')).decode('utf8') # noqa
330
~\Anaconda3\lib\site-packages\branca\element.py in render(self, **kwargs)
319 """Renders the HTML representation of the element."""
320 for name, child in self._children.items():
--> 321 child.render(**kwargs)
322 return self._template.render(this=self, kwargs=kwargs)
323
~\Anaconda3\lib\site-packages\folium\folium.py in render(self, **kwargs)
368 '</style>'), name='map_style')
369
--> 370 super(Map, self).render(**kwargs)
371
372 def fit_bounds(self, bounds, padding_top_left=None,
~\Anaconda3\lib\site-packages\branca\element.py in render(self, **kwargs)
631
632 for name, element in self._children.items():
--> 633 element.render(**kwargs)
~\Anaconda3\lib\site-packages\folium\plugins\heat_map.py in render(self, **kwargs)
79
80 def render(self, **kwargs):
---> 81 super(HeatMap, self).render(**kwargs)
82
83 figure = self.get_root()
~\Anaconda3\lib\site-packages\branca\element.py in render(self, **kwargs)
627 script = self._template.module.__dict__.get('script', None)
628 if script is not None:
--> 629 figure.script.add_child(Element(script(self, kwargs)),
630 name=self.get_name())
631
~\Anaconda3\lib\site-packages\jinja2\runtime.py in __call__(self, *args, **kwargs)
573 (self.name, len(self.arguments)))
574
--> 575 return self._invoke(arguments, autoescape)
576
577 def _invoke(self, arguments, autoescape):
~\Anaconda3\lib\site-packages\jinja2\asyncsupport.py in _invoke(self, arguments, autoescape)
108 def _invoke(self, arguments, autoescape):
109 if not self._environment.is_async:
--> 110 return original_invoke(self, arguments, autoescape)
111 return async_invoke(self, arguments, autoescape)
112 return update_wrapper(_invoke, original_invoke)
~\Anaconda3\lib\site-packages\jinja2\runtime.py in _invoke(self, arguments, autoescape)
577 def _invoke(self, arguments, autoescape):
578 """This method is being swapped out by the async implementation."""
--> 579 rv = self._func(*arguments)
580 if autoescape:
581 rv = Markup(rv)
<template> in macro(l_1_this, l_1_kwargs)
~\Anaconda3\lib\site-packages\jinja2\filters.py in do_tojson(eval_ctx, value, indent)
1076 options = dict(options)
1077 options['indent'] = indent
-> 1078 return htmlsafe_json_dumps(value, dumper=dumper, **options)
1079
1080
~\Anaconda3\lib\site-packages\jinja2\utils.py in htmlsafe_json_dumps(obj, dumper, **kwargs)
563 if dumper is None:
564 dumper = json.dumps
--> 565 rv = dumper(obj, **kwargs) \
566 .replace(u'<', u'\\u003c') \
567 .replace(u'>', u'\\u003e') \
~\Anaconda3\lib\json\__init__.py in dumps(obj, skipkeys, ensure_ascii, check_circular, allow_nan, cls, indent, separators, default, sort_keys, **kw)
236 check_circular=check_circular, allow_nan=allow_nan, indent=indent,
237 separators=separators, default=default, sort_keys=sort_keys,
--> 238 **kw).encode(obj)
239
240
~\Anaconda3\lib\json\encoder.py in encode(self, o)
197 # exceptions aren't as detailed. The list call should be roughly
198 # equivalent to the PySequence_Fast that ''.join() would do.
--> 199 chunks = self.iterencode(o, _one_shot=True)
200 if not isinstance(chunks, (list, tuple)):
201 chunks = list(chunks)
~\Anaconda3\lib\json\encoder.py in iterencode(self, o, _one_shot)
255 self.key_separator, self.item_separator, self.sort_keys,
256 self.skipkeys, _one_shot)
--> 257 return _iterencode(o, 0)
258
259 def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
~\Anaconda3\lib\json\encoder.py in default(self, o)
178 """
179 raise TypeError("Object of type '%s' is not JSON serializable" %
--> 180 o.__class__.__name__)
181
182 def encode(self, o):
TypeError: Object of type 'int64' is not JSON serializable

Pandas cannot plot timeseries imported from Excel

This is my DataFrame obtained importing from an Excel .xls
0 1
664 2017-12-07 19:08:54 1.1377
665 2017-12-07 19:10:31 1.1374
666 2017-12-07 19:12:17 1.1377
667 2017-12-07 19:13:28 1.1377
668 2017-12-07 19:15:25 1.1379
I think is correclty typed
0 datetime64[ns]
1 float64
dtype: object
However it does not plot with ax = _df.plot() because of some error. Any idea why?
This is my first hour in Pandas, sorry if it's classic question, but I saw only not relevant answers on this ValueError: ordinal must be >= 1
Thanks for your inputs
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-24-ca306a836171> in <module>()
----> 1 ax = _df.plot()
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas/plotting/_core.py in __call__(self, x, y, kind, ax, subplots, sharex, sharey, layout, figsize, use_index, title, grid, legend, style, logx, logy, loglog, xticks, yticks, xlim, ylim, rot, fontsize, colormap, table, yerr, xerr, secondary_y, sort_columns, **kwds)
2671 fontsize=fontsize, colormap=colormap, table=table,
2672 yerr=yerr, xerr=xerr, secondary_y=secondary_y,
-> 2673 sort_columns=sort_columns, **kwds)
2674 __call__.__doc__ = plot_frame.__doc__
2675
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas/plotting/_core.py in plot_frame(data, x, y, kind, ax, subplots, sharex, sharey, layout, figsize, use_index, title, grid, legend, style, logx, logy, loglog, xticks, yticks, xlim, ylim, rot, fontsize, colormap, table, yerr, xerr, secondary_y, sort_columns, **kwds)
1898 yerr=yerr, xerr=xerr,
1899 secondary_y=secondary_y, sort_columns=sort_columns,
-> 1900 **kwds)
1901
1902
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas/plotting/_core.py in _plot(data, x, y, subplots, ax, kind, **kwds)
1725 plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
1726
-> 1727 plot_obj.generate()
1728 plot_obj.draw()
1729 return plot_obj.result
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas/plotting/_core.py in generate(self)
258
259 for ax in self.axes:
--> 260 self._post_plot_logic_common(ax, self.data)
261 self._post_plot_logic(ax, self.data)
262
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas/plotting/_core.py in _post_plot_logic_common(self, ax, data)
393 self._apply_axis_properties(ax.xaxis, rot=self.rot,
394 fontsize=self.fontsize)
--> 395 self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
396
397 if hasattr(ax, 'right_ax'):
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas/plotting/_core.py in _apply_axis_properties(self, axis, rot, fontsize)
466
467 def _apply_axis_properties(self, axis, rot=None, fontsize=None):
--> 468 labels = axis.get_majorticklabels() + axis.get_minorticklabels()
469 for label in labels:
470 if rot is not None:
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/axis.py in get_majorticklabels(self)
1189 def get_majorticklabels(self):
1190 'Return a list of Text instances for the major ticklabels'
-> 1191 ticks = self.get_major_ticks()
1192 labels1 = [tick.label1 for tick in ticks if tick.label1On]
1193 labels2 = [tick.label2 for tick in ticks if tick.label2On]
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/axis.py in get_major_ticks(self, numticks)
1318 'get the tick instances; grow as necessary'
1319 if numticks is None:
-> 1320 numticks = len(self.get_major_locator()())
1321 if len(self.majorTicks) < numticks:
1322 # update the new tick label properties from the old
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/dates.py in __call__(self)
984 def __call__(self):
985 'Return the locations of the ticks'
--> 986 self.refresh()
987 return self._locator()
988
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/dates.py in refresh(self)
1004 def refresh(self):
1005 'Refresh internal information based on current limits.'
-> 1006 dmin, dmax = self.viewlim_to_dt()
1007 self._locator = self.get_locator(dmin, dmax)
1008
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/dates.py in viewlim_to_dt(self)
761 vmin, vmax = vmax, vmin
762
--> 763 return num2date(vmin, self.tz), num2date(vmax, self.tz)
764
765 def _get_unit(self):
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/dates.py in num2date(x, tz)
399 tz = _get_rc_timezone()
400 if not cbook.iterable(x):
--> 401 return _from_ordinalf(x, tz)
402 else:
403 x = np.asarray(x)
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/dates.py in _from_ordinalf(x, tz)
252
253 ix = int(x)
--> 254 dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
255
256 remainder = float(x) - ix
ValueError: ordinal must be >= 1
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/IPython/core/formatters.py in __call__(self, obj)
330 pass
331 else:
--> 332 return printer(obj)
333 # Finally look for special method names
334 method = get_real_method(obj, self.print_method)
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/IPython/core/pylabtools.py in <lambda>(fig)
235
236 if 'png' in formats:
--> 237 png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
238 if 'retina' in formats or 'png2x' in formats:
239 png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/IPython/core/pylabtools.py in print_figure(fig, fmt, bbox_inches, **kwargs)
119
120 bytes_io = BytesIO()
--> 121 fig.canvas.print_figure(bytes_io, **kw)
122 data = bytes_io.getvalue()
123 if fmt == 'svg':
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/backend_bases.py in print_figure(self, filename, dpi, facecolor, edgecolor, orientation, format, **kwargs)
2198 orientation=orientation,
2199 dryrun=True,
-> 2200 **kwargs)
2201 renderer = self.figure._cachedRenderer
2202 bbox_inches = self.figure.get_tightbbox(renderer)
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/backends/backend_agg.py in print_png(self, filename_or_obj, *args, **kwargs)
543
544 def print_png(self, filename_or_obj, *args, **kwargs):
--> 545 FigureCanvasAgg.draw(self)
546 renderer = self.get_renderer()
547 original_dpi = renderer.dpi
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/backends/backend_agg.py in draw(self)
462
463 try:
--> 464 self.figure.draw(self.renderer)
465 finally:
466 RendererAgg.lock.release()
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/artist.py in draw_wrapper(artist, renderer, *args, **kwargs)
61 def draw_wrapper(artist, renderer, *args, **kwargs):
62 before(artist, renderer)
---> 63 draw(artist, renderer, *args, **kwargs)
64 after(artist, renderer)
65
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/figure.py in draw(self, renderer)
1142
1143 mimage._draw_list_compositing_images(
-> 1144 renderer, self, dsu, self.suppressComposite)
1145
1146 renderer.close_group('figure')
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/image.py in _draw_list_compositing_images(renderer, parent, dsu, suppress_composite)
137 if not_composite or not has_images:
138 for zorder, a in dsu:
--> 139 a.draw(renderer)
140 else:
141 # Composite any adjacent images together
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/artist.py in draw_wrapper(artist, renderer, *args, **kwargs)
61 def draw_wrapper(artist, renderer, *args, **kwargs):
62 before(artist, renderer)
---> 63 draw(artist, renderer, *args, **kwargs)
64 after(artist, renderer)
65
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/axes/_base.py in draw(self, renderer, inframe)
2424 renderer.stop_rasterizing()
2425
-> 2426 mimage._draw_list_compositing_images(renderer, self, dsu)
2427
2428 renderer.close_group('axes')
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/image.py in _draw_list_compositing_images(renderer, parent, dsu, suppress_composite)
137 if not_composite or not has_images:
138 for zorder, a in dsu:
--> 139 a.draw(renderer)
140 else:
141 # Composite any adjacent images together
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/artist.py in draw_wrapper(artist, renderer, *args, **kwargs)
61 def draw_wrapper(artist, renderer, *args, **kwargs):
62 before(artist, renderer)
---> 63 draw(artist, renderer, *args, **kwargs)
64 after(artist, renderer)
65
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/axis.py in draw(self, renderer, *args, **kwargs)
1134 renderer.open_group(__name__)
1135
-> 1136 ticks_to_draw = self._update_ticks(renderer)
1137 ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(ticks_to_draw,
1138 renderer)
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/axis.py in _update_ticks(self, renderer)
967
968 interval = self.get_view_interval()
--> 969 tick_tups = [t for t in self.iter_ticks()]
970 if self._smart_bounds:
971 # handle inverted limits
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/axis.py in <listcomp>(.0)
967
968 interval = self.get_view_interval()
--> 969 tick_tups = [t for t in self.iter_ticks()]
970 if self._smart_bounds:
971 # handle inverted limits
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/axis.py in iter_ticks(self)
910 Iterate through all of the major and minor ticks.
911 """
--> 912 majorLocs = self.major.locator()
913 majorTicks = self.get_major_ticks(len(majorLocs))
914 self.major.formatter.set_locs(majorLocs)
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/dates.py in __call__(self)
984 def __call__(self):
985 'Return the locations of the ticks'
--> 986 self.refresh()
987 return self._locator()
988
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/dates.py in refresh(self)
1004 def refresh(self):
1005 'Refresh internal information based on current limits.'
-> 1006 dmin, dmax = self.viewlim_to_dt()
1007 self._locator = self.get_locator(dmin, dmax)
1008
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/dates.py in viewlim_to_dt(self)
761 vmin, vmax = vmax, vmin
762
--> 763 return num2date(vmin, self.tz), num2date(vmax, self.tz)
764
765 def _get_unit(self):
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/dates.py in num2date(x, tz)
399 tz = _get_rc_timezone()
400 if not cbook.iterable(x):
--> 401 return _from_ordinalf(x, tz)
402 else:
403 x = np.asarray(x)
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/matplotlib/dates.py in _from_ordinalf(x, tz)
252
253 ix = int(x)
--> 254 dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
255
256 remainder = float(x) - ix
ValueError: ordinal must be >= 1
<matplotlib.figure.Figure at 0x118f1e978>
You probably want to plot column 1 agains column 0, i.e. the numbers against the dates? This would be done via
df.plot(x=0,y=1)
Your columns are unnamed. So you may also name them and reset the index to something useful (or as below, not so useful ;-))
df.columns = ["0", "1"]
df = df.set_index("0")
df.plot()
Both should give you a plot like the following:

Resources