I try to preprocess my train data with resizing, grayscale, and threshold but got error like the title, here is the code
train_image = []
for i in tqdm(range(train.shape[0])):
img = image.load_img('Data/Images/'+ str(train['Id'][i]) + '.jpg', target_size=(400,400,1))
img = cv2.imread('Data/Images/', cv2.IMREAD_GRAYSCALE)
retval,img = cv2.threshold(img, 128, 1, cv2.THRESH_BINARY)
img = image.img_to_array(img)
img = img/255
train_image.append(img)
X = np.array(train_image)
how to write the right code to preprocess the data?
in line
img = cv2.imread('Data/Images/', cv2.IMREAD_GRAYSCALE)
no image is specified. It is just a directory.
Use only one method to load images either image.load_image() or cv2.imread()
I corrected the code by this
train_image = []
for i in tqdm(range(train.shape[0])):
img = cv2.imread('Data/Images/' + str(train['Id'][i]) + '.jpg' ,cv2.IMREAD_GRAYSCALE)
retval,img = cv2.threshold(img, 128, 1, cv2.THRESH_BINARY)
img = cv2.resize(img, (400,400))
img = image.img_to_array(img)
img = img/255
train_image.append(img)
X = np.array(train_image)
Related
i have problem with my code.
thanks
I can't get the correct result
dic = test_dataset.class_indices
idc = {k:v for v, k in dic.items()}
img = load_img('C:\\Users\\landa\\Downloads\\Alzheimer_s Dataset\\test\\NonDemented\\26 (100).jpg', target_size = (224,224,3))
img = img_to_array(img)`enter code here`
img = img/255
imshow(img)
plt.axis('off')
img = np.expand_dims(img,axis=0)
answer = model.predict_classes(img)
probability = round(np.max(model.predict_proba(img)*100),2)
print(probability, '% chances are there that the image is',idc[answer[0]])
I am working on a opencv project, where I need to detect names column and any black color border present around the ROI. I am quite new with image processing so unable to figure out how to do this.
This is one of the sample images from which I wish to remove the column on the right (one containing all the details). But not all images contain this column, so I wish to detect the column and remove it from the image.
Here is the expected output.
EDIT
Here is the code that I have tried (I have tried using detection of largest rectangles in the region):
import cv2
from cv2 import dilate
from cv2 import findContours
import imutils
import numpy as np
image_name = 'test2.jpg'
og_plan = cv2.imread('test_images/{}'.format(image_name))
res = og_plan.copy()
img_height, img_width, img_channel = og_plan.shape
img_area = img_width * img_height
if og_plan.shape[0] > 800:
res = imutils.resize(res, height=720)
img_height, img_width, img_channel = res.shape
img_area = img_width * img_height
print(res.shape)
print(img_area)
hsv_plan = cv2.cvtColor(res, cv2.COLOR_BGR2HSV)
grey_plan = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
blue_min = np.array([14,100,76])
blue_max = np.array([130,255,255])
bluemask = cv2.inRange(hsv_plan,blue_min,blue_max)
blue_output = cv2.bitwise_and(hsv_plan, hsv_plan, mask=bluemask)
grey_mask = cv2.cvtColor(blue_output, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(grey_mask, 100, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
ret2, thresh2 = cv2.threshold(grey_plan, 160, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
kernel = np.ones((3,3), np.uint8)
dil = dilate(thresh, kernel, iterations=2)
dil_grey = dilate(thresh2, kernel, iterations=2)
cont,hier = findContours(dil, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cont1,hier1 = findContours(dil_grey, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
def max_rect(cntrs):
ar = {}
for cnt in cntrs:
x,y,w,h = cv2.boundingRect(cnt)
area = w*h
ar[area] = (x,y,w,h)
# ar = sorted(ar, key=ar.keys, reverse=True)
return ar
area_dict = max_rect(cont1)
roi_area = []
for area in area_dict:
if area >= img_area*0.1 and area < img_area:
print(area)
roi_area.append(area)
plan_no = 1
for a in roi_area:
plan = area_dict[a]
# del area_dict[a]
x,y,w,h = plan
roi = res[y:y+h, x:x+w]
print(plan)
cv2.rectangle(res, (x-5,y-5), (x+w+5, y+h+5), (255,255,0), 2)
cv2.imshow('ROI-{}'.format(image_name),roi)
cv2.imwrite('./result/{}_plan-{}.png'.format(image_name,plan_no),roi)
cv2.waitKey(0)
plan_no += 1
'''plan1 = area_dict[max(area_dict)]
del area_dict[max(area_dict)]
plan2 = area_dict[max(area_dict)]
x,y,w,h = plan1
x1,y1,w1,h1 = plan2
roi1 = res[y:y+h, x:x+w]
roi2 = res[y1:y1+h1, x1:x1+w1]
print(plan1, plan2)
cv2.rectangle(res, (x-5,y-5), (x+w+5, y+h+5), (255,255,0), 2)
cv2.rectangle(res, (x1-5,y1-5), (x1+w1+5, y1+h1+20), (255,255,0), 2)'''
This is my code, I want to read from this captcha image.
screenshot_name = "captcha.png"
driver.save_screenshot(screenshot_name)
#crop img
img = Image.open("captcha.png")
area = (30, 138, 355, 210)
cropped_img = img.crop(area)
cropped_img.save('captcha1.png')
#read Captcha
img = cv2.imread("captcha1.png")
gry = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
(h, w) = gry.shape[:2]
gry = cv2.resize(gry, (w*2, h*2))
cv2.imwrite("captcha2.png",gry)
cls = cv2.morphologyEx(gry, cv2.MORPH_CLOSE, None)
thr = cv2.threshold(cls, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
cv2.imwrite("captcha3.png",thr)
txt = tess.image_to_string(thr)
print(txt)
I try this code and txt print me nothing, pls help.
I'm trying to prepare my image data for image segmentation but I ran into an error. I read the PIL Image documentation and I think I sent all the parameters needed to the function. I put a * at the start of the line I'm getting the error.
from PIL.Image import Image
im = Image()
image_ids = next(os.walk(DATA_PATH))[1]
X = np.zeros((len(image_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
Y = np.zeros((len(image_ids), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
for n, id_ in tqdm(enumerate(image_ids), total=len(image_ids)):
path = DATA_PATH
img = imread(path + '/' + id_ + '/images/' + 'page' + id_ + '.png')[:,:IMG_CHANNELS]
* img = im.resize(img, size = (IMG_HEIGHT, IMG_WIDTH))
X[n] = img
mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
for mask_file in next(os.walk(path + '/masks/'))[2]:
mask_ = imread(path + '/masks/' + mask_file)
mask_ = np.expand_dims(im.resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant',
preserve_range=True), axis=-1)
mask = np.maximum(mask, mask_)
Y[n] = mask
x_train=X
y_train=Y
I can't seem to find any solution to it.
Thanks for the help.
I am trying to set dpi to (72,72) using opencv in python. can anyone tell me how do we assign dpi in opencv using imwrite ?
img = cv2.imread('kitchen.jpeg')
img = cv2.resize(img,(500,500),interpolation=cv2.INTER_AREA)
img = cv2.cvtColor(img, cv2.COLOR_RGBA2RGB)
mask = np.zeros(img.shape[:2],np.uint8)
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
rect = (10,10,500,500)
cv2.grabCut(img,mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT)
mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
img = img*mask2[:,:,np.newaxis]
lower_white = np.array([0, 0, 0], dtype=np.uint8)
upper_white = np.array([0,0,0], dtype=np.uint8)
mask = cv2.inRange(img, lower_white, upper_white) # could also use threshold
res = cv2.bitwise_not(img, img, mask)
cv2.imwrite('kitchen_processed.jpeg',img)
emm dpi is for printing only. during image write you dont have to do it or you cant do it(for some format). It's only at image print you have to do it. if you insist of doing so. After image write, call System.call with the file name as the variable
convert -density 75 -units pixelsperinch infile.jpg outfile.png
normally the density is 300 or more. in your case, use it at 75
edit
Add this in. Not sure if you have other potential problem. But based on your title, add this line should solve your issue
import subprocess
img = cv2.imread('kitchen.jpeg')
img = cv2.resize(img,(500,500),interpolation=cv2.INTER_AREA)
img = cv2.cvtColor(img, cv2.COLOR_RGBA2RGB)
mask = np.zeros(img.shape[:2],np.uint8)
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
rect = (10,10,500,500)
cv2.grabCut(img,mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT)
mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
img = img*mask2[:,:,np.newaxis]
lower_white = np.array([0, 0, 0], dtype=np.uint8)
upper_white = np.array([0,0,0], dtype=np.uint8)
mask = cv2.inRange(img, lower_white, upper_white) # could also use threshold
res = cv2.bitwise_not(img, img, mask)
cv2.imwrite('kitchen_processed.png',img)
subprocess.run('convert -density 75 -units pixelsperinch kitchen_processed.png outfile.png')