module 'cv2.cv2' has no attribute 'xfeatures2d'(Opencv 3.4.2.17) - python-3.x

hello can someone resolve my problem I am getting this error
I am working on an image stitching project and have installed OpenCV and OpenCV-contrib version - 3.4.2.17 still getting this error
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-18-243a893afb39> in <module>
3 (result, matched_points) = panaroma.image_stitch([images[0], images[1]], match_status=True)
4 else:
----> 5 (result, matched_points) = panaroma.image_stitch([images[no_of_images-2], images[no_of_images-1]], match_status=True)
6 for i in range(no_of_images - 2):
7 (result, matched_points) = panaroma.image_stitch([images[no_of_images-i-3],result], match_status=True)
<ipython-input-14-2f5bd4f316a7> in image_stitch(self, images, lowe_ratio, max_Threshold, match_status)
5 #detect the features and keypoints from SIFT
6 (imageB, imageA) = images
----> 7 (KeypointsA, features_of_A) = self.Detect_Feature_And_KeyPoints(imageA)
8 (KeypointsB, features_of_B) = self.Detect_Feature_And_KeyPoints(imageB)
9
<ipython-input-14-2f5bd4f316a7> in Detect_Feature_And_KeyPoints(self, image)
37
38 # detect and extract features from the image
---> 39 descriptors = cv2.xfeatures2d.SIFT_create()
40 (Keypoints, features) = descriptors.detectAndCompute(image, None)
41
AttributeError: module 'cv2.cv2' has no attribute 'xfeatures2d'
while running this line of code:
panaroma = Panaroma()
if no_of_images==2:
(result, matched_points) = panaroma.image_stitch([images[0], images[1]], match_status=True)
else:
(result, matched_points) = panaroma.image_stitch([images[no_of_images-2], images[no_of_images-1]], match_status=True)
for i in range(no_of_images - 2):
(result, matched_points) = panaroma.image_stitch([images[no_of_images-i-3],result], match_status=True)
the class is this :
class Panaroma:
def image_stitch(self, images, lowe_ratio=0.75, max_Threshold=4.0,match_status=False):
#detect the features and keypoints from SIFT
(imageB, imageA) = images
(KeypointsA, features_of_A) = self.Detect_Feature_And_KeyPoints(imageA)
(KeypointsB, features_of_B) = self.Detect_Feature_And_KeyPoints(imageB)
#got the valid matched points
Values = self.matchKeypoints(KeypointsA, KeypointsB,features_of_A, features_of_B, lowe_ratio, max_Threshold)
if Values is None:
return None
#to get perspective of image using computed homography
(matches, Homography, status) = Values
result_image = self.getwarp_perspective(imageA,imageB,Homography)
result_image[0:imageB.shape[0], 0:imageB.shape[1]] = imageB
# check to see if the keypoint matches should be visualized
if match_status:
vis = self.draw_Matches(imageA, imageB, KeypointsA, KeypointsB, matches,status)
return (result_image, vis)
return result_image
def getwarp_perspective(self,imageA,imageB,Homography):
val = imageA.shape[1] + imageB.shape[1]
result_image = cv2.warpPerspective(imageA, Homography, (val , imageA.shape[0]))
return result_image
def Detect_Feature_And_KeyPoints(self, image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect and extract features from the image
descriptors = cv2.xfeatures2d.SIFT_create()
(Keypoints, features) = descriptors.detectAndCompute(image, None)
Keypoints = np.float32([i.pt for i in Keypoints])
return (Keypoints, features)
def get_Allpossible_Match(self,featuresA,featuresB):
# compute the all matches using euclidean distance and opencv provide
#DescriptorMatcher_create() function for that
match_instance = cv2.DescriptorMatcher_create("BruteForce")
All_Matches = match_instance.knnMatch(featuresA, featuresB, 2)
return All_Matches
def All_validmatches(self,AllMatches,lowe_ratio):
#to get all valid matches according to lowe concept..
valid_matches = []
for val in AllMatches:
if len(val) == 2 and val[0].distance < val[1].distance * lowe_ratio:
valid_matches.append((val[0].trainIdx, val[0].queryIdx))
return valid_matches
def Compute_Homography(self,pointsA,pointsB,max_Threshold):
#to compute homography using points in both images
(H, status) = cv2.findHomography(pointsA, pointsB, cv2.RANSAC, max_Threshold)
return (H,status)
def matchKeypoints(self, KeypointsA, KeypointsB, featuresA, featuresB,lowe_ratio, max_Threshold):
AllMatches = self.get_Allpossible_Match(featuresA,featuresB);
valid_matches = self.All_validmatches(AllMatches,lowe_ratio)
if len(valid_matches) > 4:
# construct the two sets of points
pointsA = np.float32([KeypointsA[i] for (_,i) in valid_matches])
pointsB = np.float32([KeypointsB[i] for (i,_) in valid_matches])
(Homograpgy, status) = self.Compute_Homography(pointsA, pointsB, max_Threshold)
return (valid_matches, Homograpgy, status)
else:
return None
def get_image_dimension(self,image):
(h,w) = image.shape[:2]
return (h,w)
def get_points(self,imageA,imageB):
(hA, wA) = self.get_image_dimension(imageA)
(hB, wB) = self.get_image_dimension(imageB)
vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
vis[0:hA, 0:wA] = imageA
vis[0:hB, wA:] = imageB
return vis
def draw_Matches(self, imageA, imageB, KeypointsA, KeypointsB, matches, status):
(hA,wA) = self.get_image_dimension(imageA)
vis = self.get_points(imageA,imageB)
# loop over the matches
for ((trainIdx, queryIdx), s) in zip(matches, status):
if s == 1:
ptA = (int(KeypointsA[queryIdx][0]), int(KeypointsA[queryIdx][1]))
ptB = (int(KeypointsB[trainIdx][0]) + wA, int(KeypointsB[trainIdx][1]))
cv2.line(vis, ptA, ptB, (0, 255, 0), 1)
return vis
Note : I am using opencv version 3.4.2.17 and opencv-contrib-3.4.2.17
using the following on Kaggle notebook:
https://www.kaggle.com/deepzsenu/image-stitching/
the above is the link to my notebook :
Thank you

hi every I have solved the above error
don't downgrade your OpenCV package directly
first use
!pip uninstall opencv-python -y
then I install only the opencv contrib
using
!pip install -U opencv-contrib-python=3.4.2.17

just use cv2.SIFT_create() try this out

Related

too many values to unpack (expected 2) Pytorch

datasets = {
x:LungDataset(
splits[x],
origins_folder,
masks_folder,
train_transform if x == 'train' else val_test_transform
) for x in ["train", "test", "val"]
}
dataloaders = {x: torch.utils.data.DataLoader(datasets[x], batch_size=batch_size) for x in ["train", "test", "val"]}
idx = 0
phase = "train"
origin, mask = datasets[phase][idx]
class LungDataset(torch.utils.data.Dataset):
def __init__(self, origin_mask_list, origins_folder, masks_folder, transforms=None):
self.origin_mask_list = origin_mask_list
self.origins_folder = origins_folder
self.masks_folder = masks_folder
self.transforms = transforms
def __getitem__(self, idx):
origin_name, mask_name = self.origin_mask_list[idx]
origin = Image.open(self.origins_folder / (origin_name + ".png")).convert("P")
mask = Image.open(self.masks_folder / (mask_name + ".png"))
if self.transforms is not None:
origin, mask = self.transforms((origin, mask))
origin = torchvision.transforms.functional.to_tensor(origin) - 0.5
mask = np.array(mask)
mask = (torch.tensor(mask) > 128).long()
return origin, mask
def __len__(self):
return len(self.origin_mask_list)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-111-6b5b08e37e6d> in <module>
3
4 plt.figure(figsize=(20, 10))
----> 5 origin, mask = datasets[phase][idx]
6 pil_origin = torchvision.transforms.functional.to_pil_image(origin + 0.5).convert("RGB")
7 pil_mask = torchvision.transforms.functional.to_pil_image(mask.float())
D:\python\care\Lung-segmentation\src\data.py in __getitem__(self, idx)
16
17 def __getitem__(self, idx):
---> 18 origin_name, mask_name = self.origin_mask_list[idx]
19 origin = Image.open(self.origins_folder / (origin_name + ".png")).convert("P")
20 mask = Image.open(self.masks_folder / (mask_name + ".png"))
ValueError: too many values to unpack (expected 2)
I'm trying to load data but It is showing me the error:
It's Lung Segmentation Project you can go to the repo by GitHub link
and the datasets are in drive if you want more detail then comment to me. I'll provide you
It's lung segmentation code
For more details take a look at github.

IndexError: invalid index of a 0-dim tensor. Use tensor.item() to convert a 0-dim tensor to a Python number

def nms(bboxes,scores,threshold=0.5):
'''
bboxes(tensor) [N,4]
scores(tensor) [N,]
'''
x1 = bboxes[:,0]
y1 = bboxes[:,1]
x2 = bboxes[:,2]
y2 = bboxes[:,3]
areas = (x2-x1) * (y2-y1)
_,order = scores.sort(0,descending=True)
keep = []
while order.numel() > 0:
i = order[0]
keep.append(i)
if order.numel() == 1:
break
xx1 = x1[order[1:]].clamp(min=x1[i])
yy1 = y1[order[1:]].clamp(min=y1[i])
xx2 = x2[order[1:]].clamp(max=x2[i])
yy2 = y2[order[1:]].clamp(max=y2[i])
w = (xx2-xx1).clamp(min=0)
h = (yy2-yy1).clamp(min=0)
inter = w*h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
ids = (ovr<=threshold).nonzero().squeeze()
if ids.numel() == 0:
break
order = order[ids+1]
return torch.LongTensor(keep)
I tried
i=order.item()
But it does not work
I found the solution in the github issues here
Try to change
i = order[0] # works for PyTorch 0.4.1.
to
i = order # works for PyTorch>=0.5.
I was trying to run a standard Convolutional Neural Network(LeNet) on MNIST using PyTorch. I was getting this error
IndexError Traceback (most recent call last
79 y = net.forward(train_x, dropout_value)
80 loss = net.loss(y,train_y,l2_regularization)
81 loss_train = loss.data[0]
82 loss_train += loss_val.data
IndexError: invalid index of a 0-dim tensor. Use tensor.item() to convert a
0-dim tensor to a Python number
Changing
loss_train = loss.data[0]
To
loss_train = loss.data
fixed the problem.
You should change the loop body as:
while order.numel() > 0:
if order.numel() == 1:
break
i = order[0]
keep.append(i)
The code i = order[0] gives error when there is only one element left in order.

image dilation with python

I'm trying to execute a piece of code I found online and it is giving me the following error.
I'm new to opencv so please help me.
error:
<ipython-input-1-7fe9c579ec14> in image_masking(filepath)
15 gray = cv2.imread(filepath,0)
16 edges = cv2.Canny(gray, CANNY_THRESH_1, CANNY_THRESH_2)
---> 17 edges = cv2.dilate(edges,None)
18 edges = cv2.erode(edges, None)
19
error: OpenCV(3.4.1) C:\Miniconda3\conda-bld\opencv-
suite_1533128839831\work\modules\core\src\matrix.cpp:760: error: (-215)
dims <= 2 && step[0] > 0 in function cv::Mat::locateROI
code:
import cv2
import numpy as np
def image_masking(filepath):
BLUR = 21
CANNY_THRESH_1 = 100
CANNY_THRESH_2 = 100
MASK_DILATE_ITER = 10
MASK_ERODE_ITER = 10
MASK_COLOR = (0.0,0.0,0.0) # In BGR format
gray = cv2.imread(filepath,0)
edges = cv2.Canny(gray, CANNY_THRESH_1, CANNY_THRESH_2)
edges = cv2.dilate(edges,None)
edges = cv2.erode(edges, None)
contour_info = []
_, contours, __ = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
for c in contours:
contour_info.append((c, cv2.isContourConvex(c), cv2.contourArea(c),))
contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True)
max_contour = contour_info[0]
for c in contour_info:
cv2.fillConvexPoly(mask, c[0], (255))
mask = cv2.dilate(mask, None, iterations=MASK_DILATE_ITER)
mask = cv2.erode(mask, None, iterations=MASK_ERODE_ITER)
mask = cv2.GaussianBlur(mask, (BLUR, BLUR), 0)
mask_stack = np.dstack([mask]*3)
mask_stack = mask_stack.astype('float32') / 255.0
img = img.astype('float32') / 255.0
masked = (mask_stack * img) + ((1-mask_stack) * MASK_COLOR)
masked = (masked * 255).astype('uint8')
fileName, fileExtension = filepath.split('.')
fileName += '-masked.'
filepath = fileName + fileExtension
print(filepath)
cv2.imwrite(filepath, masked)
if __name__ == '__main__':
filepath = 'C:\\Users\HP\Downloads\test3.jpg'
image_masking(filepath)
i tried replacing None in dilate function with kernel but it is giving me the same error
The second argument to cv2.dilate and cv2.erode should be the kernel with which you want to perform dilation/erosion as it is shown in the docs: opencv documentation
For example, you can try to do it like that:
kernel = np.ones((3, 3), np.uint8)
edges = cv2.dilate(edges, kernel)
edges = cv2.erode(edges, kernel)
Good luck with further opencv exploration!

AssertionError: Format for classes is `<label> file`

This is a python script for detecting features in a set of images for a SVM.
import os
import sys
import argparse
import _pickle as cPickle
import json
import cv2
import numpy as np
from sklearn.cluster import KMeans
def build_arg_parser():
parser = argparse.ArgumentParser(description='Creates features for given images')
parser.add_argument("--samples", dest="cls", nargs="+", action="append",
required=True, help="Folders containing the training images. \
The first element needs to be the class label.")
parser.add_argument("--codebook-file", dest='codebook_file', required=True,
help="Base file name to store the codebook")
parser.add_argument("--feature-map-file", dest='feature_map_file', required=True,
help="Base file name to store the feature map")
parser.add_argument("--scale-image", dest="scale", type=int, default=150,
help="Scales the longer dimension of the image down to this size.")
return parser
def load_input_map(label, input_folder):
combined_data = []
if not os.path.isdir(input_folder):
print ("The folder " + input_folder + " doesn't exist")
raise IOError
for root, dirs, files in os.walk(input_folder):
for filename in (x for x in files if x.endswith('.jpg')):
combined_data.append({'label': label, 'image': os.path.join(root, filename)})
return combined_data
class FeatureExtractor(object):
def extract_image_features(self, img):
kps = DenseDetector().detect(img)
kps, fvs = SIFTExtractor().compute(img, kps)
return fvs
def get_centroids(self, input_map, num_samples_to_fit=10):
kps_all = []
count = 0
cur_label = ''
for item in input_map:
if count >= num_samples_to_fit:
if cur_label != item['label']:
count = 0
else:
continue
count += 1
if count == num_samples_to_fit:
print ("Built centroids for", item['label'])
cur_label = item['label']
img = cv2.imread(item['image'])
img = resize_to_size(img, 150)
num_dims = 128
fvs = self.extract_image_features(img)
kps_all.extend(fvs)
kmeans, centroids = Quantizer().quantize(kps_all)
return kmeans, centroids
def get_feature_vector(self, img, kmeans, centroids):
return Quantizer().get_feature_vector(img, kmeans, centroids)
def extract_feature_map(input_map, kmeans, centroids):
feature_map = []
for item in input_map:
temp_dict = {}
temp_dict['label'] = item['label']
print ("Extracting features for", item['image'])
img = cv2.imread(item['image'])
img = resize_to_size(img, 150)
temp_dict['feature_vector'] = FeatureExtractor().get_feature_vector(
img, kmeans, centroids)
if temp_dict['feature_vector'] is not None:
feature_map.append(temp_dict)
return feature_map
class Quantizer(object):
def __init__(self, num_clusters=32):
self.num_dims = 128
self.extractor = SIFTExtractor()
self.num_clusters = num_clusters
self.num_retries = 10
def quantize(self, datapoints):
kmeans = KMeans(self.num_clusters,
n_init=max(self.num_retries, 1),
max_iter=10, tol=1.0)
res = kmeans.fit(datapoints)
centroids = res.cluster_centers_
return kmeans, centroids
def normalize(self, input_data):
sum_input = np.sum(input_data)
if sum_input > 0:
return input_data / sum_input
else:
return input_data
def get_feature_vector(self, img, kmeans, centroids):
kps = DenseDetector().detect(img)
kps, fvs = self.extractor.compute(img, kps)
labels = kmeans.predict(fvs)
fv = np.zeros(self.num_clusters)
for i, item in enumerate(fvs):
fv[labels[i]] += 1
fv_image = np.reshape(fv, ((1, fv.shape[0])))
return self.normalize(fv_image)
class DenseDetector(object):
def __init__(self, step_size=20, feature_scale=40, img_bound=20):
self.detector = cv2.xfeatures2d.SIFT_create("Dense")
self.detector.setInt("initXyStep", step_size)
self.detector.setInt("initFeatureScale", feature_scale)
self.detector.setInt("initImgBound", img_bound)
def detect(self, img):
return self.detector.detect(img)
class SIFTExtractor(object):
def compute(self, image, kps):
if image is None:
print ("Not a valid image")
raise TypeError
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
kps, des = cv2.SIFT().compute(gray_image, kps)
return kps, des
# Resize the shorter dimension to 'new_size'
# while maintaining the aspect ratio
def resize_to_size(input_image, new_size=150):
h, w = input_image.shape[0], input_image.shape[1]
ds_factor = new_size / float(h)
if w < h:
ds_factor = new_size / float(w)
new_size = (int(w * ds_factor), int(h * ds_factor))
return cv2.resize(input_image, new_size)
if __name__=='__main__':
args = build_arg_parser().parse_args()
input_map = []
for cls in args.cls:
assert len(cls) >= 2, "Format for classes is `<label> file`"
label = cls[0]
input_map += load_input_map(label, cls[1])
downsample_length = args.scale
# Building the codebook
print ("===== Building codebook =====")
kmeans, centroids = FeatureExtractor().get_centroids(input_map)
if args.codebook_file:
with open(args.codebook_file, 'w') as f:
pickle.dump((kmeans, centroids), f)
# Input data and labels
print ("===== Building feature map =====")
feature_map = extract_feature_map(input_map, kmeans, centroids)
if args.feature_map_file:
with open(args.feature_map_file, 'w') as f:
pickle.dump(feature_map, f)
I receive the following error:
Traceback (most recent call last):
File "create_features.py", line 164, in <module>
assert len(cls) >= 2, ("Format for classes is `<label> file`")
AssertionError: Format for classes is `<label> file`
Any idea of what could be wrong? I'm just following the instructions of 'OpenCV with Python by Example' of Prateek Joshi. Pages 494-526
Assertion are used to check a condition. If the condition isn't satisfied, it throes AssertionError. In your case, len(cls) >= 2 isn't satisfied. It means that len(cls) is smaller than 2. Apparently, cls is a list of arguments passed to the programm. And the first element of this list must be a label. And when you add argument (a file), you should specify a label for this file.
For example, if you choose a label name my_label, you must add file with my_label my_file.

OpenCV3 matrix transform on KeyPoint fails

I'm using Python 3.5.2 and opencv 3.1.0. I'm trying to warp some keypoints from a query image with a transformation matrix I generated with cv.getAffineTransform() (see below code). Whatever I try to pass to the transform function it will always throw me this error:
cv2.error: D:\opencv\sources\modules\core\src\matmul.cpp:1947: error: (-215) scn == m.cols || scn + 1 == m.cols in function cv::transform
How do I have to pass the keypoints to make cv2.transform() work?
import cv2
import numpy as np
import random
queryImage_path = "C:\tmp\query.jpg"
trainImage_path = "C:\tmp\train.jpg"
queryImage = cv2.imread(queryImage_path, cv2.IMREAD_COLOR)
trainImage = cv2.imread(trainImage_path, cv2.IMREAD_COLOR)
surf = cv2.xfeatures2d.SURF_create()
queryImage_keypoints = surf.detect(queryImage,None)
trainImage_keypoints = surf.detect(trainImage, None)
queryImage_keypoints, queryImage_descriptors = surf.compute(queryImage, queryImage_keypoints)
trainImage_keypoints, trainImage_descriptors = surf.compute(trainImage, trainImage_keypoints)
bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
matches = bf.match(queryImage_descriptors, trainImage_descriptors)
# get three random match indices which are not the same
match_index_a = random.randint(0, len(matches) - 1)
match_index_b = random.randint(0, len(matches) - 1)
match_index_c = random.randint(0, len(matches) - 1)
# get Keypoints from match indices
# queryImage- keypoints
queryImage_keypoint_a = queryImage_keypoints[matches[match_index_a].queryIdx]
queryImage_keypoint_b = queryImage_keypoints[matches[match_index_b].queryIdx]
queryImage_keypoint_c = queryImage_keypoints[matches[match_index_c].queryIdx]
# trainImage-keypoints
trainImage_keypoint_a = trainImage_keypoints[matches[match_index_a].trainIdx]
trainImage_keypoint_b = trainImage_keypoints[matches[match_index_b].trainIdx]
trainImage_keypoint_c = trainImage_keypoints[matches[match_index_c].trainIdx]
# get affine transformation matrix from these 6 keypoints
trainImage_points = np.float32([[trainImage_keypoint_a.pt[0], trainImage_keypoint_a.pt[1]],
[trainImage_keypoint_b.pt[0], trainImage_keypoint_b.pt[1]],
[trainImage_keypoint_c.pt[0], trainImage_keypoint_c.pt[1]]])
queryImage_points = np.float32([[queryImage_keypoint_a.pt[0], queryImage_keypoint_a.pt[1]],
[queryImage_keypoint_b.pt[0], queryImage_keypoint_b.pt[1]],
[queryImage_keypoint_c.pt[0], queryImage_keypoint_c.pt[1]]])
# get transformation matrix for current points
currentMatrix = cv2.getAffineTransform(queryImage_points, trainImage_points)
queryImage_keypoint = queryImage_keypoints[matches[0].queryIdx]
keypoint_asArray = np.array([[queryImage_keypoint.pt[0]], [queryImage_keypoint.pt[1]], [1]])
#queryImage_warped_keypoint = currentMatrix.dot(keypoint_asArray)
queryImage_warped_keypoint = cv2.transform(keypoint_asArray,currentMatrix)
Use
keypoint_asArray = np.array([[[queryImage_keypoint.pt[0], queryImage_keypoint.pt[1], 1]]])
instead of
keypoint_asArray = np.array([[queryImage_keypoint.pt[0]], [queryImage_keypoint.pt[1]], [1]])

Resources