problem AttributeError: 'Sequential' object has no attribute 'predict_classes' - object

i have problem with my code.
thanks
I can't get the correct result
dic = test_dataset.class_indices
idc = {k:v for v, k in dic.items()}
img = load_img('C:\\Users\\landa\\Downloads\\Alzheimer_s Dataset\\test\\NonDemented\\26 (100).jpg', target_size = (224,224,3))
img = img_to_array(img)`enter code here`
img = img/255
imshow(img)
plt.axis('off')
img = np.expand_dims(img,axis=0)
answer = model.predict_classes(img)
probability = round(np.max(model.predict_proba(img)*100),2)
print(probability, '% chances are there that the image is',idc[answer[0]])

Related

Evaluation gives 0 score

I am working on a instance segmentation problem on mask rcnn with pytorch. Training part is working with below code but evaluation gives 0 score at every mAP. What's the problem in the code?
More info:
I use Albumentations for transforms and some files from pytorch vision for training.
Some problems I've been through:
When I use coco for bbox format instead of pascal voc it gives following error.
RuntimeError: Boolean value of Tensor with more than one value is ambiguous
When put labels out of the convert_seg_boolMask function, it gives following error.
RuntimeError: Boolean value of Tensor with more than one value is ambiguous
def get_transforms(train=False):
if train:
transform = A.Compose([
ToTensorV2()
],bbox_params=A.BboxParams("pascal_voc",label_fields=["labels","iscrowd"]))
else:
transform = A.Compose([
ToTensorV2()
],bbox_params=A.BboxParams("pascal_voc",label_fields=["labels","iscrowd"]))
return transform
class Dataset(datasets.VisionDataset):
def __init__(self, coco_, data_dir, transform=None, target_transform=None, transforms=None):
super().__init__(data_dir, transforms, transform, target_transform)
self.coco_info = coco_
self.data_dir = data_dir
self.transforms = transforms
if isinstance(self.coco_info,dict):
self.ids = [x["id"] for x in self.coco_info["images"] if len(self._load_target(x["id"]))>0]
def _load_image(self, id: int):
name = loadImgs(self.coco_info["images"],id)[0]['file_name']
image = cv2.imread(os.path.join(self.data_dir, name))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)/255
return image
def _load_target(self, id):
return loadAnns(self.coco_info["annotations"],id)
def n_classes(self):
category_names = list(set(sorted([a["name"] for a in self.coco_info["categories"]])))
self.classes = ["__background__"]+[i for i in category_names]
return self.classes
def __getitem__(self,idx):
id = self.ids[idx]
image = self._load_image(id)
target = copy.deepcopy(self._load_target(id))
image_shape = (image.shape[0],image.shape[1])
img_info = {
"img_shape":image_shape,
"image_id":id,
"labels":[t["category_id"]for t in target],
"segmentation":[t["segmentation"][0] for t in target],
"id": [t["id"] for t in target]
}
mask, labels = self.convert_seg_to_boolMask(img_info)
obj_ids = np.unique(mask)
obj_ids = obj_ids[1:]
masks = torch.tensor(mask == obj_ids[:, None, None])
boxes = []
bbox = np.array([t["bbox"] for t in target])
for xmin,ymin,width,height in bbox:
xmax = xmin+width
ymax = ymin+height
boxes.append([xmin, ymin, xmax, ymax])
boxes = torch.tensor(boxes)
labels = torch.tensor(labels)
image_id = torch.tensor([id])
iscrowd = torch.tensor([t["iscrowd"] for t in target])
transformed = self.transforms(image=image, masks=masks, bboxes=boxes, labels=labels, iscrowd=iscrowd)
image = transformed['image']
masks = torch.tensor(transformed["masks"])
boxes = torch.tensor(transformed['bboxes'])
labels = torch.tensor(transformed["labels"])
iscrowd = torch.tensor(transformed["iscrowd"])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
image_id = torch.tensor(image_id)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["masks"] = masks
target["image_id"] = image_id
target["area"] = area
target["iscrowd"] = iscrowd
return image, target
def __len__(self):
return len(self.ids)
def convert_seg_to_boolMask(self,img_info):
mask = np.zeros(img_info["img_shape"], dtype=np.uint8)
mask = Image.fromarray(mask)
draw = ImageDraw.Draw(mask)
for seg, i in zip(img_info["segmentation"],img_info["id"]):
points = [tuple([k,l]) for k,l in zip(seg[0::2],seg[1::2])]
draw.polygon(xy=points,
outline=tuple([i]),
fill=tuple([i]))
mask = np.array(mask)
labels = img_info["labels"]
return mask, labels

ValueError: Unsupported image shape: ()

I try to preprocess my train data with resizing, grayscale, and threshold but got error like the title, here is the code
train_image = []
for i in tqdm(range(train.shape[0])):
img = image.load_img('Data/Images/'+ str(train['Id'][i]) + '.jpg', target_size=(400,400,1))
img = cv2.imread('Data/Images/', cv2.IMREAD_GRAYSCALE)
retval,img = cv2.threshold(img, 128, 1, cv2.THRESH_BINARY)
img = image.img_to_array(img)
img = img/255
train_image.append(img)
X = np.array(train_image)
how to write the right code to preprocess the data?
in line
img = cv2.imread('Data/Images/', cv2.IMREAD_GRAYSCALE)
no image is specified. It is just a directory.
Use only one method to load images either image.load_image() or cv2.imread()
I corrected the code by this
train_image = []
for i in tqdm(range(train.shape[0])):
img = cv2.imread('Data/Images/' + str(train['Id'][i]) + '.jpg' ,cv2.IMREAD_GRAYSCALE)
retval,img = cv2.threshold(img, 128, 1, cv2.THRESH_BINARY)
img = cv2.resize(img, (400,400))
img = image.img_to_array(img)
img = img/255
train_image.append(img)
X = np.array(train_image)

TypeError: resize() got multiple values for argument 'size'

I'm trying to prepare my image data for image segmentation but I ran into an error. I read the PIL Image documentation and I think I sent all the parameters needed to the function. I put a * at the start of the line I'm getting the error.
from PIL.Image import Image
im = Image()
image_ids = next(os.walk(DATA_PATH))[1]
X = np.zeros((len(image_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
Y = np.zeros((len(image_ids), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
for n, id_ in tqdm(enumerate(image_ids), total=len(image_ids)):
path = DATA_PATH
img = imread(path + '/' + id_ + '/images/' + 'page' + id_ + '.png')[:,:IMG_CHANNELS]
* img = im.resize(img, size = (IMG_HEIGHT, IMG_WIDTH))
X[n] = img
mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
for mask_file in next(os.walk(path + '/masks/'))[2]:
mask_ = imread(path + '/masks/' + mask_file)
mask_ = np.expand_dims(im.resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant',
preserve_range=True), axis=-1)
mask = np.maximum(mask, mask_)
Y[n] = mask
x_train=X
y_train=Y
I can't seem to find any solution to it.
Thanks for the help.

samples.cols == var_count && samples.type() == 5 in function 'cv::ml::SVMImpl::predict' error on svm.predict method

I'm creating a object classifier in opencv python using svm. Training dataset is of 200 positive and 200 negative images. For positive images first took 200 images and cropped target object from images and resized them to (64,128) size for HOG calculation. Then for negative images, First created Pyramid of images then applied sliding window of 64X128 and then calculated HOG for positive as well all windows of negative images with labels 1 and 0. Trained svm model on hog features.
I am getting error "cv2.error: OpenCV(3.4.2) C:\projects\opencv-python\opencv\modules\ml\src\svm.cpp:2010: error: (-215:Assertion failed) samples.cols == var_count && samples.type() == 5 in function 'cv::ml::SVMImpl::predict' " when i called predict function using res = svm.predict(samples[0]).ravel() method.
import cv2
import os
import time
import numpy as np
import imutils
positive_path='C:\\Users\\Admin\\3D Objects\\datqaet with hog and svm\\ROI images'
negative_path='C:\\Users\\Admin\\3D Objects\\datqaet with hog and svm\\Negative images'
def pyramid(img): #Create image Pyramid
minSize=(30, 30)
imgarr = []
while True:
scale = 2
imgarr.append(img)
w = int(img.shape[1] / scale)
img = imutils.resize(img, width=w)
if img.shape[0] < minSize[1] or img.shape[1] < minSize[0]:
break
return imgarr
def sliding_window(image, stepSize, windowSize): #Sliding window for negative images
sliding = []
for y in range(0, image.shape[0], stepSize):
for x in range(0, image.shape[1], stepSize):
sliding.append((x, y, image[y:y + windowSize[1], x:x + windowSize[0]]))
return sliding
def get_hog() :
winSize = (64,128)
blockSize = (16,16)
blockStride = (16,16)
cellSize = (8,8)
nbins = 9
derivAperture = 1
winSigma = 4.
histogramNormType = 0
L2HysThreshold = 0.2
gammaCorrection = 0
nlevels = 64
signedGradient = True
hog = cv2.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins,derivAperture,winSigma,histogramNormType,L2HysThreshold,gammaCorrection,nlevels, signedGradient)
return hog
samples = []
labels = []
sam = []
hog = get_hog()
for filename in os.listdir(positive_path):
img = cv2.imread(os.path.join(positive_path,filename),0) #RGB image
img = cv2.resize(img,(64,128))
img = np.array(img)
hist = hog.compute(img)
hist = cv2.normalize(hist,None)
sam.append(img)
samples.append(hist)
labels.append(1)
i=0
for filename in os.listdir(negative_path):
img = cv2.imread(os.path.join(negative_path,filename),0)
(winW, winH) = (64,128)
pyr = pyramid(img)
for resized in pyr:
sliding = sliding_window(resized, stepSize=32, windowSize=(winW, winH))
for (x, y, window) in sliding:
if window.shape[0] != winH or window.shape[1] != winW:
continue
hist = hog.compute(window)
hist = cv2.normalize(hist,None)
sam.append(window)
samples.append(hist)
labels.append(0)
print(i)
i=i+1
samples = np.array(samples,dtype=np.float32)
labels = np.array(labels,dtype=int)
samples = np.squeeze(samples)
print(len(samples))
print(samples.shape)
rand = np.random.RandomState(10)
shuffle = rand.permutation(len(samples))
sam = samples[shuffle]
samples = sam[shuffle]
labels = labels[shuffle]
svm = cv2.ml.SVM_create()
svm.setKernel(cv2.ml.SVM_LINEAR)
svm.setType(cv2.ml.SVM_C_SVC)
svm.setC(2.67)
svm.setGamma(5.383)
svm_params = dict( kernel_type = cv2.ml.SVM_LINEAR,
svm_type = cv2.ml.SVM_C_SVC,
C=2.67, gamma=5.383 )
svm.train(samples,cv2.ml.ROW_SAMPLE,labels)
print("trained")
res = svm.predict(samples[0]).ravel()
print(res)
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
img=cv2.resize(img,(400,400))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
(winW, winH) = (64,128)
pyr = pyramid(img)
for resized in pyr:
sliding = sliding_window(resized, stepSize=32, windowSize=(winW, winH))
for (x, y, window) in sliding:
if window.shape[0] != winH or window.shape[1] != winW:
continue
hist = hog.compute(window)
hist = cv2.normalize(hist,None)
hist = np.reshape(hist,(1,hist.shape[0]))
res = svm.predict(hist)[1].ravel()
if res == 1:
print("found")
cv2.imshow('img',img)
cv2.waitKey(10)

How to call a function within a string for a title

I have a script that produces 4 images (Below I only include 2 as an example of output). I have another function that determines what % cat or dog the picture is and I would like to call that function in the title of the following code
import cv2
import matplotlib.pyplot as plt
def Mpic():
plt.figure(figsize=(15,30))
path = r"data/dogscats1/pupper"
path1 = r"data/dogscatspeople/test1"
path2 = r"data/dogscatspeople/test1"
path3 = r"data/dogscats1/pupper"
imgpath1 = path + "/cat.jpg"
imgpath2 = path1 + "/1.jpg"
imgpath3 = path2 + "/2.jpg"
imgpath4 = path3 + "/dog.jpg"
img1 = cv2.imread(imgpath1, 1)
img2 = cv2.imread(imgpath2, 1)
img3 = cv2.imread(imgpath3, 1)
img4 = cv2.imread(imgpath4, 1)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
img3 = cv2.cvtColor(img3, cv2.COLOR_BGR2RGB)
img4 = cv2.cvtColor(img4, cv2.COLOR_BGR2RGB)
titles = ['Kitty', '% Cat = , Dog % = ','% Cat = , Dog % =', 'Pupper']
images = [img1, img2, img3, img4]
for i in range(4):
plt.subplot(4,2,i+1)
plt.imshow(images[i])
plt.xticks([])
plt.title(titles[i])
plt.yticks([])
plt.show()
if __name__ == "__main__":
Mpic()
This is the original function that calls the array:
def pred_datsci(file_path):
prev_precompute = learn.precompute
learn.precompute = False
try:
trn_tfms, val_tfms = tfms_from_model(arch,sz)
test_img = open_image(file_path)
im = val_tfms(test_img)
pred = learn.predict_array(im[None])
class_index = (np.exp(pred))
class_index1 = np.argmax(np.exp(pred))
print(class_index*100)
return data.classes[class_index1]
finally:
learn.precompute = prev_precompute
Which can return something along the lines of:
pred_datsci(f"data/dogscats1/valid/dogs/12501.jpg")
I want it to call it in the form of something like this:
titles = [ cat % = pred_datsci(f"data/dogscats1/valid/cats/cat.1.jpg"),"etc"]
titles = [ "cat % = {}".format(pred_datsci("data/dogscats1/valid/cats/cat.1.jpg")),"etc"]

Resources