Getting errors during generating connectedcomponents wih opencv3 - python-3.x

I've want to use the function cv2.connectedComponentsWithStats
to get the connectivity
from skimage import io
from skimage.color import rgb2gray
img1 = io.imread('Bingo/25.jpg', as_gray=True)
from scipy import ndimage
def sobel_filters(img):
Kx = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], np.float32)
Ky = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]], np.float32)
Ix = ndimage.filters.convolve(img, Kx)
Iy = ndimage.filters.convolve(img, Ky)
G = np.hypot(Ix, Iy)
G = G / G.max() * 255
theta = np.arctan2(Iy, Ix)
return G
e=sobel_filters(img1)
threshold = 70
# make all pixels < threshold black
binarized = 1.0 * (e > threshold)
connectivity = 4 # or whatever you prefer
output = cv2.connectedComponentsWithStats(binarized, connectivity,cv2.CV_32S)
But I'm getting an error
error: (-215:Assertion failed) iDepth == CV_8U || iDepth == CV_8S in function 'cv::connectedComponents_sub1'
What should I change to get it right?

You need to convert the image data type to uint8
Try this
bin_uint8 = (binarized * 255).astype(np.uint8)
output = cv2.connectedComponentsWithStats(bin_uint8, connectivity,cv2.CV_32S)

Related

Adding image generated from another library as inset in matplotlib

I've generated a network figure using vedo library and I'm trying to add this as an inset to a figure generated in matplotlib
import networkx as nx
import matplotlib.pyplot as plt
from vedo import *
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
G = nx.gnm_random_graph(n=10, m=15, seed=1)
nxpos = nx.spring_layout(G, dim=3, seed=1)
nxpts = [nxpos[pt] for pt in sorted(nxpos)]
nx_lines = [(nxpts[i], nxpts[j]) for i, j in G.edges()]
pts = Points(nxpts, r=12)
edg = Lines(nx_lines).lw(2)
# node values
values = [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[30, 80, 10, 79, 70, 60, 75, 78, 65, 10],
[1, .30, .10, .79, .70, .60, .75, .78, .65, .90]]
time = [0.0, 0.1, 0.2] # in seconds
vplt = Plotter(N=1)
pts1 = pts.cmap('Blues', values[0])
vplt.show(
pts1, edg,
axes=False,
bg='white',
at=0,
interactive=False,
zoom=1.5
).screenshot("network.png")
ax = plt.subplot(111)
ax.plot(
[1, 2, 3], [1, 2, 3],
'go-',
label='line 1',
linewidth=2
)
arr_img = vplt.screenshot(returnNumpy=True, scale=1)
im = OffsetImage(arr_img, zoom=0.25)
ab = AnnotationBbox(im, (1, 0), xycoords='axes fraction', box_alignment=(1.1, -0.1), frameon=False)
ax.add_artist(ab)
plt.show()
ax.figure.savefig(
"output.svg",
transparent=True,
dpi=600,
bbox_inches="tight"
)
There resolution of the image in the inset is too low. Suggestions on how to add the inset without loss of resolution will be really helpful.
EDIT:
The answer posted below works for adding a 2D network, but I am still looking for ways that will be useful for adding a 3D network in the inset.
I am not familiar with vedo but the general procedure would be to create an inset_axis and plot the image with imshow. However, your code is using networkx which has matplotlib bindings and you can directly do this without vedo
EDIT: code edited for 3d plotting
import networkx as nx
import matplotlib.pyplot as plt
G = nx.gnm_random_graph(n=10, m=15, seed=1)
nxpos = nx.spring_layout(G, dim=3, seed=1)
nxpts = [nxpos[pt] for pt in sorted(nxpos)]
nx_lines = [(nxpts[i], nxpts[j]) for i, j in G.edges()]
# node values
values = [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[30, 80, 10, 79, 70, 60, 75, 78, 65, 10],
[1, .30, .10, .79, .70, .60, .75, .78, .65, .90]]
time = [0.0, 0.1, 0.2] # in seconds
fig, ax = plt.subplots()
ax.plot(
[1, 2, 3], [1, 2, 3],
'go-',
label='line 1',
linewidth=2
)
from mpl_toolkits.mplot3d import (Axes3D)
from matplotlib.transforms import Bbox
rect = [.6, 0, .5, .5]
bbox = Bbox.from_bounds(*rect)
inax = fig.add_axes(bbox, projection = '3d')
# inax = add_inset_axes(,
# ax_target = ax,
# fig = fig, projection = '3d')
# inax.axis('off')
# set angle
angle = 25
inax.view_init(10, angle)
# hide axes, make transparent
# inax.set_facecolor('none')
# inax.grid('off')
import numpy as np
# plot 3d
seen = set()
for i, j in G.edges():
x = np.stack((nxpos[i], nxpos[j]))
inax.plot(*x.T, color = 'k')
if i not in seen:
inax.scatter(*x[0], color = 'skyblue')
seen.add(i)
if j not in seen:
inax.scatter(*x[1], color = "skyblue")
seen.add(j)
fig.show()

List [0-1] to binary representation fast

I am trying to convert the rows [0-1] of a matrix to representation in number (binary equivalent), the code I have is the following:
import numpy as np
def generate_binary_matrix(matrix):
result = []
for i in matrix:
val = '0b' + ''.join([str(x) for x in i])
result.append(int(val, 2))
result = np.array(result)
return result
initial_matrix = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
result = generate_binary_matrix(initial_matrix )
print(result)
This code works but it is very slow, does anyone know how to do it in a faster way?
You can convert a 0/1 list to binary using just arithmetic, which should be faster:
from functools import reduce
b = reduce(lambda r, x: 2*r + x, i)
Suppose you matrix numpy array is A with m rows and n columns.
Create a b vector with nelements by:
b = np.power(2, np.arange(n))[::-1]
then your answer is A # b
Example:
import numpy as np
A = np.array([[0, 0, 1], [1, 0, 1]])
n = A.shape[1]
b = np.power(2, np.arange(n))[::-1]
print(A # b) # --> [1 5]
update - I reversed b as the MSB (2^n-1) is A[:,0] + power arguments were mistakenly flipped + add an example.

Replace a bunch of if-else conditions with scikit-learn

I'm trying to wrap my head around ML with scikit-learn
Here is what I'm trying to do:
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
df = pd.DataFrame({
"f1": [1, 1],
"f2": [0, 0],
"c": [1, 0]
})
#df
f1 f2 c # f1, f2 - features / c - class/ classifier
1 1 1 # for f1 = 1 and f2 = 1 > expected c = 1
0 0 0 # for f1 = 0 and f2 = 0 > expected c = 0
dtc_clf = DecisionTreeClassifier()
features = df[["f1", "f2"]]
labels = df[["c"]]
dtc_clf.fit(features, labels)
test_features = pd.DataFrame({"ft1": [1, 1],
"ft2": [0, 0]})
#test_features
ft1 ft2 #I added for test exactly the training data
1 1
0 0
dtc_clf.predict(test_features)
#I'm getting this result:
#array([0, 0])
#I expected this result
#array([1, 0])
If '1,1 => 1' then '0, 0 => 0'
It should be 'array([1, 0])' right?
Each column is a condition which if it's respected will be 1 if not 0.
Basically I'm trying to replace a lot of if else conditions with ML.
Works with DecisionTreeRegressor
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
# "beer": 1
# "wine": 2
df = pd.DataFrame({
"boy": [1, 0],
"hetero": [1, 1],
"drink": [1, 2]
})
X = df[["boy", "hetero"]]
y = df[["drink"]]
regr = DecisionTreeRegressor(random_state=0)
model = regr.fit(X, y)
# Make new observation
observation = [[1, 1]]
# Predict observation's value
model.predict(observation)
Result :
array([ 1.])

Convert integer to pytorch tensor of binary bits

Given an number and an encoding length, how can I convert the number to its binary representation as a tensor?
Eg, given the number 6 and width 8, how can I obtain the tensor:
(0, 0, 0, 0, 0, 1, 1, 0)
def binary(x, bits):
mask = 2**torch.arange(bits).to(x.device, x.dtype)
return x.unsqueeze(-1).bitwise_and(mask).ne(0).byte()
If you wanna reverse the order of bits, use it with torch.arange(bits-1,-1,-1) instead.
Tiana's answer was a good one. BTW, to convert Tiana's 2-base result back to 10-base numbers, one can do like this:
import torch
import numpy as np
def dec2bin(x, bits):
# mask = 2 ** torch.arange(bits).to(x.device, x.dtype)
mask = 2 ** torch.arange(bits - 1, -1, -1).to(x.device, x.dtype)
return x.unsqueeze(-1).bitwise_and(mask).ne(0).float()
def bin2dec(b, bits):
mask = 2 ** torch.arange(bits - 1, -1, -1).to(b.device, b.dtype)
return torch.sum(mask * b, -1)
if __name__ == '__main__':
NUM_BITS = 7
d = torch.randint(0, 16, (3, 6))
b = dec2bin(d, NUM_BITS)
# print(d)
# print(b)
# print(b.shape)
# print("num of total bits: {}".format(np.prod(b.shape)))
d_rec = bin2dec(b, NUM_BITS)
# print(d_rec)
print(abs(d - d_rec).max()) # should be 0.
If the input is unsigned bytes and the output width is 8 bits:
>>> binary = np.unpackbits(np.array([0xaa, 0xf0], dtype=np.uint8))
>>> print(torch.tensor(binary))
tensor([1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0], dtype=torch.uint8)
Note that unpackbits() only operates with np.uint8.
def decimal_to_binary_tensor(value, width=0):
string = format(value, '0{}b'.format(width))
binary = [0 if c == '0' else 1 for c in string]
return torch.tensor(binary, dtype=torch.uint8)
Examples:
>>> print(decimal_to_binary_tensor(6, width=8))
tensor([0, 0, 0, 0, 0, 1, 1, 0], dtype=torch.uint8)
>>> print(decimal_to_binary_tensor(6))
tensor([1, 1, 0], dtype=torch.uint8)

LinearSVC() differs from SVC(kernel='linear')

When data is offset (not centered in zero), LinearSVC() and SVC(kernel='linear') are giving awfully different results. (EDIT: the problem might be it does not handle non-normalized data.)
import matplotlib.pyplot as plot
plot.ioff()
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.svm import LinearSVC, SVC
def plot_hyperplane(m, X):
w = m.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(np.min(X[:, 0]), np.max(X[:, 0]))
yy = a*xx - (m.intercept_[0]) / w[1]
plot.plot(xx, yy, 'k-')
X, y = make_blobs(n_samples=100, centers=2, n_features=2,
center_box=(0, 1))
X[y == 0] = X[y == 0] + 100
X[y == 1] = X[y == 1] + 110
for i, m in enumerate((LinearSVC(), SVC(kernel='linear'))):
m.fit(X, y)
plot.subplot(1, 2, i+1)
plot_hyperplane(m, X)
plot.plot(X[y == 0, 0], X[y == 0, 1], 'r.')
plot.plot(X[y == 1, 0], X[y == 1, 1], 'b.')
xv, yv = np.meshgrid(np.linspace(98, 114, 10), np.linspace(98, 114, 10))
_X = np.c_[xv.reshape((xv.size, 1)), yv.reshape((yv.size, 1))]
_y = m.predict(_X)
plot.plot(_X[_y == 0, 0], _X[_y == 0, 1], 'r.', alpha=0.4)
plot.plot(_X[_y == 1, 0], _X[_y == 1, 1], 'b.', alpha=0.4)
plot.show()
This is the result I get:
(left=LinearSVC(), right=SVC(kernel='linear'))
sklearn.__version__ = 0.17. But I also tested in Ubuntu 14.04, which comes with 0.15.
I thought about reporting the bug, but it seems too evident to be a bug. What am I missing?
Reading the documentation, they are using different underlying implementations. LinearSVC is using liblinear where SVC is using libsvm.
Looking closely at the coefficients and intercept, it seems LinearSVC applies regularization to the intercept where SVC does not.
By adding intercept_scaling, I was able to obtain the same results to both.
LinearSVC(loss='hinge', intercept_scaling=1000)

Resources