Reconstruct Image from overlapping patches of image - python-3.x

I have used tf.extract_image_patches() to get a tensor of overlapping patches
from the image as described in this link. The answer in the mentioned link suggests to use tf.space_to_depth() to reconstruct the image from overlapping patches. But the problem is that this does not give the desirable results in my case and upon researching I came to know that tf.space_to_depth() does not deal with the overlapping blocks. My code looks like:
import tensorflow as tf
import numpy as np
c = 3
height = 3900
width = 6000
ksizes = [1, 150, 150, 1]
strides = [1, 75, 75, 1]
image = #image of shape [1, height, width, 3]
patches = tf.extract_image_patches(image, ksizes = ksizes, strides= strides, [1, 1, 1, 1], 'VALID')
patches = tf.reshape(patches, [-1, 150, 150, 3])
reconstructed = tf.reshape(patches, [1, height, width, 3])
rec_new = tf.space_to_depth(reconstructed,75)
rec_new = tf.reshape(rec_new,[height,width,3])
This gives me error:
InvalidArgumentError Traceback (most recent call
last)
D:\AnacondaIDE\lib\site-packages\tensorflow\python\framework\common_shapes.py
in _call_cpp_shape_fn_impl(op, input_tensors_needed,
input_tensors_as_shapes_needed, require_shape_fn)
653 graph_def_version, node_def_str, input_shapes, input_tensors,
--> 654 input_tensors_as_shapes, status)
655 except errors.InvalidArgumentError as err:
D:\AnacondaIDE\lib\contextlib.py in exit(self, type, value,
traceback)
87 try:
---> 88 next(self.gen)
89 except StopIteration:
D:\AnacondaIDE\lib\site-packages\tensorflow\python\framework\errors_impl.py
in raise_exception_on_not_ok_status()
465 compat.as_text(pywrap_tensorflow.TF_Message(status)),
--> 466 pywrap_tensorflow.TF_GetCode(status))
467 finally:
InvalidArgumentError: Dimension size must be evenly divisible by
70200000 but is 271957500 for 'Reshape_22' (op: 'Reshape') with input
shapes: [4029,150,150,3], [4] and with input tensors computed as
partial shapes: input1 = [?,3900,6000,3].
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call
last) in ()
----> 1 reconstructed = tf.reshape(features, [-1, height, width, channel])
2 rec_new = tf.space_to_depth(reconstructed,75)
3 rec_new = tf.reshape(rec_new,[h,h,c])
D:\AnacondaIDE\lib\site-packages\tensorflow\python\ops\gen_array_ops.py
in reshape(tensor, shape, name) 2617 """ 2618 result =
_op_def_lib.apply_op("Reshape", tensor=tensor, shape=shape,
-> 2619 name=name) 2620 return result 2621
D:\AnacondaIDE\lib\site-packages\tensorflow\python\framework\op_def_library.py
in apply_op(self, op_type_name, name, **keywords)
765 op = g.create_op(op_type_name, inputs, output_types, name=scope,
766 input_types=input_types, attrs=attr_protos,
--> 767 op_def=op_def)
768 if output_structure:
769 outputs = op.outputs
D:\AnacondaIDE\lib\site-packages\tensorflow\python\framework\ops.py in
create_op(self, op_type, inputs, dtypes, input_types, name, attrs,
op_def, compute_shapes, compute_device) 2630
original_op=self._default_original_op, op_def=op_def) 2631 if
compute_shapes:
-> 2632 set_shapes_for_outputs(ret) 2633 self._add_op(ret) 2634
self._record_op_seen_by_control_dependencies(ret)
D:\AnacondaIDE\lib\site-packages\tensorflow\python\framework\ops.py in
set_shapes_for_outputs(op) 1909 shape_func =
_call_cpp_shape_fn_and_require_op 1910
-> 1911 shapes = shape_func(op) 1912 if shapes is None: 1913 raise RuntimeError(
D:\AnacondaIDE\lib\site-packages\tensorflow\python\framework\ops.py in
call_with_requiring(op) 1859 1860 def
call_with_requiring(op):
-> 1861 return call_cpp_shape_fn(op, require_shape_fn=True) 1862 1863 _call_cpp_shape_fn_and_require_op =
call_with_requiring
D:\AnacondaIDE\lib\site-packages\tensorflow\python\framework\common_shapes.py
in call_cpp_shape_fn(op, require_shape_fn)
593 res = _call_cpp_shape_fn_impl(op, input_tensors_needed,
594 input_tensors_as_shapes_needed,
--> 595 require_shape_fn)
596 if not isinstance(res, dict):
597 # Handles the case where _call_cpp_shape_fn_impl calls unknown_shape(op).
D:\AnacondaIDE\lib\site-packages\tensorflow\python\framework\common_shapes.py
in _call_cpp_shape_fn_impl(op, input_tensors_needed,
input_tensors_as_shapes_needed, require_shape_fn)
657 missing_shape_fn = True
658 else:
--> 659 raise ValueError(err.message)
660
661 if missing_shape_fn:
ValueError: Dimension size must be evenly divisible by 70200000 but is
271957500 for 'Reshape_22' (op: 'Reshape') with input shapes:
[4029,150,150,3], [4] and with input tensors computed as partial
shapes: input1 = [?,3900,6000,3].
I know this is error due to non-compatible dimensions, but it should be that way, right? Please help me to solve this.

I guess that the problem is that in the link you posted the author is using the same value for strides and ksizes, while you are using strides equal to one half of ksizes. This is the reason why the dimensions do not match, you should write the logic of reducing the size of the patches before gluing them (for instance by selecting the central square of each patch).

Related

Cartopy set_extent() errors

I'm trying to map a contour plot for a certain area on a map, I can create the contour on the map with no problem, but when I set the extent as below, with or without the contour plot, I get the error below (Cartopy version 0.20.3):
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
p = ccrs.Mercator()
ax = plt.axes(projection=p)
ax.set_extent([-140, -60, 20, 70], crs=ccrs.Mercator())
ax.coastlines()
ax.gridlines()
plt.show()
TypeError Traceback (most recent call last) Input In [5], in <cell line: 7>()
5 p = ccrs.Mercator()
6 ax = plt.axes(projection=p)
----> 7 ax.set_extent([-140, -60, 20, 70], crs=ccrs.Mercator())
8 ax.coastlines()
9 ax.gridlines()
File ~\Anaconda3\envs\geospatial\lib\site-packages\cartopy\mpl\geoaxes.py:904,
in GeoAxes.set_extent(self, extents, crs)
901 projected = boundary
903 if projected is None:
--> 904 projected = self.projection.project_geometry(domain_in_crs, crs)
905 try:
906 # This might fail with an unhelpful error message ('need more
907 # than 0 values to unpack') if the specified extents fall outside
908 # the projection extents, so try and give a better error message.
909 x1, y1, x2, y2 = projected.bounds
File ~\Anaconda3\envs\geospatial\lib\site-packages\cartopy\crs.py:805, in Projection.project_geometry(self, geometry, src_crs)
803 if not method_name:
804 raise ValueError(f'Unsupported geometry type {geom_type!r}')
--> 805 return getattr(self, method_name)(geometry, src_crs)
File ~\Anaconda3\envs\geospatial\lib\site-packages\cartopy\crs.py:811, in Projection._project_line_string(self, geometry, src_crs)
810 def _project_line_string(self, geometry, src_crs):
--> 811 return cartopy.trace.project_linear(geometry, src_crs, self)
File lib/cartopy/trace.pyx:628, in cartopy.trace.project_linear()
File lib/cartopy/trace.pyx:100, in cartopy.trace.geos_from_shapely()
TypeError: an integer is required
Change
ax.set_extent([-140, -60, 20, 70], crs=ccrs.Mercator())
(Because values and crs do not match)
to
ax.set_extent([-140, -60, 20, 70], crs=ccrs.PlateCarree())

How do I pass the values to Catboost?

I'm trying to work with catboost and I've got a problem that I'm really stuck with right now. I have a dataframe with 28 columns, 2 of them are categorical. When the data is numerical there are some even and some fractional numbers, also some 0.00 values that should represent not an empty values but the actual nulls (like 1-1=0).
I'm trying to run this:
train_cl = cb.Pool(data=ret_df.iloc[:580000, :-1], label=ret_df.iloc[:580000, -1], cat_features=cats)
evl_cl = cb.Pool(data=ret_df.iloc[580000:, :-1], label=ret_df.iloc[580000:, -1], cat_features=cats)
But I have this error
---------------------------------------------------------------------------
CatBoostError Traceback (most recent call last)
<ipython-input-112-a515b0ab357b> in <module>
1 train_cl = cb.Pool(data=ret_df.iloc[:580000, :-1], label=ret_df.iloc[:580000, -1], cat_features=cats)
----> 2 evl_cl = cb.Pool(data=ret_df.iloc[580000:, :-1], label=ret_df.iloc[580000:, -1], cat_features=cats)
~\AppData\Local\Programs\Python\Python36\lib\site-packages\catboost\core.py in __init__(self, data, label, cat_features, text_features, embedding_features, column_description, pairs, delimiter, has_header, ignore_csv_quoting, weight, group_id, group_weight, subgroup_id, pairs_weight, baseline, feature_names, thread_count, log_cout, log_cerr)
615 )
616
--> 617 self._init(data, label, cat_features, text_features, embedding_features, pairs, weight, group_id, group_weight, subgroup_id, pairs_weight, baseline, feature_names, thread_count)
618 super(Pool, self).__init__()
619
~\AppData\Local\Programs\Python\Python36\lib\site-packages\catboost\core.py in _init(self, data, label, cat_features, text_features, embedding_features, pairs, weight, group_id, group_weight, subgroup_id, pairs_weight, baseline, feature_names, thread_count)
1081 if label is not None:
1082 self._check_label_type(label)
-> 1083 self._check_label_empty(label)
1084 label = self._label_if_pandas_to_numpy(label)
1085 if len(np.shape(label)) == 1:
~\AppData\Local\Programs\Python\Python36\lib\site-packages\catboost\core.py in _check_label_empty(self, label)
723 """
724 if len(label) == 0:
--> 725 raise CatBoostError("Labels variable is empty.")
726
727 def _check_label_shape(self, label, samples_count):
CatBoostError: Labels variable is empty.
I've googled this trouble, but found nothing. My hypothesis is that there is a problem with 0.00 values, but I do not know how to solve this because I literally can't replace these values with anything.
Please, help me!

I keep getting "TypeError: only integer scalar arrays can be converted to a scalar index" while using custom-defined metric in KNeighborsClassifier

I am using a custom-defined metric in SKlearn's KNeighborsClassifier. Here's my code:
def chi_squared(x,y):
return np.divide(np.square(np.subtract(x,y)), np.sum(x,y))
Above function implementation of chi squared distance function. I have used NumPy functions because according to scikit-learn docs, metric function takes two one-dimensional numpy arrays.
I have passed the chi_squared function as an argument to KNeighborsClassifier().
knn = KNeighborsClassifier(algorithm='ball_tree', metric=chi_squared)
However, I keep getting following error:
TypeError Traceback (most recent call last)
<ipython-input-29-d2a365ebb538> in <module>
4
5 knn = KNeighborsClassifier(algorithm='ball_tree', metric=chi_squared)
----> 6 knn.fit(X_train, Y_train)
7 predictions = knn.predict(X_test)
8 print(accuracy_score(Y_test, predictions))
~/.local/lib/python3.8/site-packages/sklearn/neighbors/_classification.py in fit(self, X, y)
177 The fitted k-nearest neighbors classifier.
178 """
--> 179 return self._fit(X, y)
180
181 def predict(self, X):
~/.local/lib/python3.8/site-packages/sklearn/neighbors/_base.py in _fit(self, X, y)
497
498 if self._fit_method == 'ball_tree':
--> 499 self._tree = BallTree(X, self.leaf_size,
500 metric=self.effective_metric_,
501 **self.effective_metric_params_)
sklearn/neighbors/_binary_tree.pxi in sklearn.neighbors._ball_tree.BinaryTree.__init__()
sklearn/neighbors/_binary_tree.pxi in sklearn.neighbors._ball_tree.BinaryTree._recursive_build()
sklearn/neighbors/_ball_tree.pyx in sklearn.neighbors._ball_tree.init_node()
sklearn/neighbors/_binary_tree.pxi in sklearn.neighbors._ball_tree.BinaryTree.rdist()
sklearn/neighbors/_dist_metrics.pyx in sklearn.neighbors._dist_metrics.DistanceMetric.rdist()
sklearn/neighbors/_dist_metrics.pyx in sklearn.neighbors._dist_metrics.PyFuncDistance.dist()
sklearn/neighbors/_dist_metrics.pyx in sklearn.neighbors._dist_metrics.PyFuncDistance._dist()
<ipython-input-29-d2a365ebb538> in chi_squared(x, y)
1 def chi_squared(x,y):
----> 2 return np.divide(np.square(np.subtract(x,y)), np.sum(x,y))
3
4
5 knn = KNeighborsClassifier(algorithm='ball_tree', metric=chi_squared)
<__array_function__ internals> in sum(*args, **kwargs)
~/.local/lib/python3.8/site-packages/numpy/core/fromnumeric.py in sum(a, axis, dtype, out, keepdims, initial, where)
2239 return res
2240
-> 2241 return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims,
2242 initial=initial, where=where)
2243
~/.local/lib/python3.8/site-packages/numpy/core/fromnumeric.py in _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs)
85 return reduction(axis=axis, out=out, **passkwargs)
86
---> 87 return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
88
89
TypeError: only integer scalar arrays can be converted to a scalar index
I can reproduce your error message with:
In [173]: x=np.arange(3); y=np.array([2,3,4])
In [174]: np.sum(x,y)
Traceback (most recent call last):
File "<ipython-input-174-1a1a267ebd82>", line 1, in <module>
np.sum(x,y)
File "<__array_function__ internals>", line 5, in sum
File "/usr/local/lib/python3.8/dist-packages/numpy/core/fromnumeric.py", line 2247, in sum
return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims,
File "/usr/local/lib/python3.8/dist-packages/numpy/core/fromnumeric.py", line 87, in _wrapreduction
return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
TypeError: only integer scalar arrays can be converted to a scalar index
Correct use(s) of np.sum:
In [175]: np.sum(x)
Out[175]: 3
In [177]: np.sum(np.arange(6).reshape(2,3), axis=0)
Out[177]: array([3, 5, 7])
In [178]: np.sum(np.arange(6).reshape(2,3), 0)
Out[178]: array([3, 5, 7])
(re)read the np.sum docs if necessary!
Using np.add instead of np.sum:
In [179]: np.add(x,y)
Out[179]: array([2, 4, 6])
In [180]: x+y
Out[180]: array([2, 4, 6])
The following should be equivalent:
np.divide(np.square(np.subtract(x,y)), np.add(x,y))
(x-y)**2/(x+y)

how can I solve "TypeError ufunc 'isfinite'" in jupyter notebook

I have a error about type. it comes from the stocker. I dont't know where the code error
the environment:
python 3.7
numpy and others libray are latest
TypeError Traceback (most recent call last)
<ipython-input-6-00eba8b188be> in <module>
----> 3 amazon.changepoint_prior_analysis(changepoint_priors=[0.001, 0.01, 0.06, 0.2])
~\Desktop\master\stocker.py in changepoint_prior_analysis(self, changepoint_priors, colors)
427 ax.fill_between(predictions['ds'].dt.to_pydatetime(), predictions['%.3f_yhat_upper' % prior],
428 predictions['%.3f_yhat_lower' % prior], facecolor = color_dict[prior],
--> 429 alpha = 0.3, edgecolor = 'k', linewidth = 0.6)
430
431 # Plot labels
~\AppData\Roaming\Python\Python37\site-packages\matplotlib\__init__.py in inner(ax, data, *args, **kwargs)
1599 def inner(ax, *args, data=None, **kwargs):
1600 if data is None:
-> 1601 return func(ax, *map(sanitize_sequence, args), **kwargs)
1602
1603 bound = new_sig.bind(ax, *args, **kwargs)
~\AppData\Roaming\Python\Python37\site-packages\matplotlib\axes\_axes.py in fill_between(self, x, y1, y2, where, interpolate, step, **kwargs)
5222
5223 # Convert the arrays so we can work with them
-> 5224 x = ma.masked_invalid(self.convert_xunits(x))
5225 y1 = ma.masked_invalid(self.convert_yunits(y1))
5226 y2 = ma.masked_invalid(self.convert_yunits(y2))
~\AppData\Roaming\Python\Python37\site-packages\numpy\ma\core.py in masked_invalid(a, copy)
2364 cls = type(a)
2365 else:
-> 2366 condition = ~(np.isfinite(a))
2367 cls = MaskedArray
2368 result = a.view(cls)
TypeError: ufunc 'isfinite' not supported for the input types, and the inputs could not be safely coerced to any supported types according to the casting rule ''safe''
I expect to show the complete chart,but now the actual lack some events.
A similar problem I encountered long back. I'm sharing the links. Hope so it would be helpful for you.Link1
link2

tfidf vectorizer process shows error

I am working on non-Engish corpus analysis but facing several problems. One of those problems is tfidf_vectorizer. After importing concerned liberaries, I processed following code to get results
contents = [open("D:\test.txt", encoding='utf8').read()]
#define vectorizer parameters
tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000,
min_df=0.2, stop_words=stopwords,
use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(3,3))
%time tfidf_matrix = tfidf_vectorizer.fit_transform(contents)
print(tfidf_matrix.shape)
After processing above code I got following error message.
ValueError Traceback (most recent call last)
<ipython-input-144-bbcec8b8c065> in <module>()
5 use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(3,3))
6
----> 7 get_ipython().magic('time tfidf_matrix = tfidf_vectorizer.fit_transform(contents) #fit the vectorizer to synopses')
8
9 print(tfidf_matrix.shape)
C:\Users\mazhar\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py in magic(self, arg_s)
2156 magic_name, _, magic_arg_s = arg_s.partition(' ')
2157 magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
-> 2158 return self.run_line_magic(magic_name, magic_arg_s)
2159
2160 #-------------------------------------------------------------------------
C:\Users\mazhar\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py in run_line_magic(self, magic_name, line)
2077 kwargs['local_ns'] = sys._getframe(stack_depth).f_locals
2078 with self.builtin_trap:
-> 2079 result = fn(*args,**kwargs)
2080 return result
2081
<decorator-gen-60> in time(self, line, cell, local_ns)
C:\Users\mazhar\Anaconda3\lib\site-packages\IPython\core\magic.py in <lambda>(f, *a, **k)
186 # but it's overkill for just that one bit of state.
187 def magic_deco(arg):
--> 188 call = lambda f, *a, **k: f(*a, **k)
189
190 if callable(arg):
C:\Users\mazhar\Anaconda3\lib\site-packages\IPython\core\magics\execution.py in time(self, line, cell, local_ns)
1178 else:
1179 st = clock2()
-> 1180 exec(code, glob, local_ns)
1181 end = clock2()
1182 out = None
<timed exec> in <module>()
C:\Users\mazhar\Anaconda3\lib\site-packages\sklearn\feature_extraction\text.py in fit_transform(self, raw_documents, y)
1303 Tf-idf-weighted document-term matrix.
1304 """
-> 1305 X = super(TfidfVectorizer, self).fit_transform(raw_documents)
1306 self._tfidf.fit(X)
1307 # X is already a transformed view of raw_documents so
C:\Users\mazhar\Anaconda3\lib\site-packages\sklearn\feature_extraction\text.py in fit_transform(self, raw_documents, y)
836 max_doc_count,
837 min_doc_count,
--> 838 max_features)
839
840 self.vocabulary_ = vocabulary
C:\Users\mazhar\Anaconda3\lib\site-packages\sklearn\feature_extraction\text.py in _limit_features(self, X, vocabulary, high, low, limit)
731 kept_indices = np.where(mask)[0]
732 if len(kept_indices) == 0:
--> 733 raise ValueError("After pruning, no terms remain. Try a lower"
734 " min_df or a higher max_df.")
735 return X[:, kept_indices], removed_terms
ValueError: After pruning, no terms remain. Try a lower min_df or a higher max_df.
If I change then min and max value the error is
Assuming your tokeniser works as expected, I see two problems with your code. First, TfIdfVectorizer expects a list of strings, whereas you are providing a single string. Second, min_df=0.2 is quite high- to be included, a term needs to occur in 20% of all documents, which is very unlikely for trigram features.
The following works for me
from sklearn.feature_extraction.text import TfidfVectorizer
with open("README.md") as infile:
contents = infile.readlines() # Note: readlines() instead of read()
tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000,
min_df=2, use_idf=True, ngram_range=(3,3))
# note: minimum of 2 occurrences, rather than 0.2 (20% of all documents)
tfidf_matrix = tfidf_vectorizer.fit_transform(contents)
print(tfidf_matrix.shape)
outputs (155, 28)

Resources