How to fix multiprocess issue in code given below? - python-3.x

My main function is aligning images with the reference image. The function is working smoothly for single core. I had tried to multi-process the above problem to reduce the time, but it is taking same time as single core. I think same image is allotted to all the cores. How do I split different images in a folder to different cores to speed the process? Also tell me if there is any error in my code.
MAX_FEATURES = 50000
GOOD_MATCH_PERCENT = 1.00
def alignImages(refimage, input_path, output_path):
im1 = cv2.imread(input_path)
im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2Gray = cv2.cvtColor(reference_image, cv2.COLOR_BGR2GRAY)
# Detect ORB features and compute descriptors.
#Machter Algo
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Draw top matches
imMatches = cv2.drawMatches(im1, keypoints1, reference_image,
keypoints2, matches, None)
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
# Use homography
height, width, channels = reference_image.shape
imReg = cv2.warpPerspective(im1, h, (width, height))
f, e = os.path.splitext(output_path)
cv2.imwrite(f + '.TIF',imReg)
if __name__ == "__main__":
start = time.time()
from multiprocessing import Pool
path = r'Path of folder to align images'
dirs = os.listdir(path)
array1 = []
array2 = []
array3 = []
for i in dirs:
input_path = path+'\\'+i
reference_image = cv2.imread('Reference image path')
array1.append(reference_image)
array2.append(input_path)
out = path +'\\'+'new\\'+i
array3.append(out)
z = list(zip(array1,array2,array3))
p = Pool(12)
p.starmap(alignImages,z,chunksize=28)
end = time.time()
print(end-start)

Related

Detection of small object - aphids on plants

I'm currently trying to create a detector of aphids (green and rose) on plants but only using "classic" image processing technique (no neural network).
Here are an image I'm working on:
'aphids.jpg'
I'm working on a code (see below). If you apply it on the image you should have the plants alone. My problem is that I want to isolate the aphids that can be seen on the plants. There are a lot of them but I just want to detect the biggest or the more obvious.
On the code there is an "edges_detect" function I'm currently working on. One of the problem I have is that I can detect some of the aphids as contour but it will also take simple lines...
I tried to drop those line using the hierarchy of contour but it seems those line have inner contour so I can't easily delete them.
I also tried the adjust_gamma and contrast, but it doesn't give that much result.
I'm looking for more ideas. What would you try ?
Thank you in advance !
Here is the code:
import cv2
import numpy as np
import matplotlib.pyplot as plt
def adjust_gamma(image, gamma=1.0):
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv2.LUT(image, table)
def adjust_contrast(image,alpha=1.0,beta=0):
new = np.zeros(image.shape,image.dtype)
for y in range(image.shape[0]):
for x in range(image.shape[1]):
for c in range(image.shape[2]):
new[y,x,c] = np.clip(alpha*image[y,x,c]+beta,0,255)
return(new)
def img_process(img):
(h1, w1) = img.shape[:2]
center = (w1 / 2, h1 / 2)
blur = cv2.GaussianBlur(img.copy(),(5,5),0)
hsv = cv2.cvtColor(blur,cv2.COLOR_BGR2HSV)
#image = img.copy()
#Boundaries to separate plants from the image
l_bound = np.array([20,0,0])
h_bound = np.array([90,250,170])#green
mask = cv2.inRange(hsv,l_bound,h_bound)
res = cv2.bitwise_and(img,img,mask=mask)
#Find contour plants
cnt,_ = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
sort_cnt = sorted(cnt,key=cv2.contourArea,reverse=True)
cnt = [sort_cnt[i] for i in range(len(sort_cnt)) if cv2.contourArea(sort_cnt[i])>300]
cv2.drawContours(res, cnt, -1, (0,255,0), -1)
#Inverse mask to have only the plant in the image
mask2 = cv2.inRange(res,np.array([0,0,0]),np.array([250,250,250]))
mask2 = cv2.bitwise_not(mask2)
res2 = cv2.bitwise_and(img,img,mask=mask2)
#Augment bright/contrast
res2=res2*1.45
res2=res2.astype('uint8')
#Crop
res2 = res2[:-50,int(center[0]-300):int(center[0]+550)]
return res2
def edge_detec(img):
(h1, w1) = img.shape[:2]
center = (w1 / 2, h1 / 2)
blur = cv2.GaussianBlur(img.copy(),(5,5),0)
gray = cv2.cvtColor(blur,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,30,70,apertureSize = 3)
edges = edges[:-50,int(center[0]-300):int(center[0]+550)]
#kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
#edges = cv2.morphologyEx(edges, cv2.MORPH_GRADIENT, kernel)
cnt,hierarchy = cv2.findContours(edges,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnt = sorted(cnt,key=cv2.contourArea,reverse=True)
listArea = list(map(cv2.contourArea,cnt))
sort_cnt = [x for x in cnt if cv2.contourArea(x)>10]
cv2.drawContours(edges, sort_cnt, -1, (0,255,0), -1)
return edges,center,img
### Debut programme
img = cv2.imread('051.jpg')
while True:
##Put processing function here
img_mod = img_process(img)
cv2.imshow('img',img_mod)
if cv2.waitKey(1) & 0xFF == 27:
break
cv2.destroyAllWindows()

3D-reconstruction using structure-from-motion

I want to do 3D-reconstruction using structure-from-motion algorithm. I am using opencv to do this in python. But some how the obtained pointcloud is breaking into 2 halves. My input images are:
Image 1
Image 2
Image 3.
I am matching every 2 images like image1 with image2 and image2 with image 3. I tried different feature detectors like SIFT, KAZE and SURF. With the obtained points I compute the essential matrix. I got the camera intrinsics from the camera calibration from Opencv and are stored in the variables 'mtx' and 'dist' in the code below.
```file = os.listdir('Path_to _images')
file.sort(key=lambda f: int(''.join(filter(str.isdigit,f))))
path = os.path.join(os.getcwd(),'Path_to_images/')
for i in range(0, len(file)-1):
if(i == len(file) - 1):
break
path1 = cv2.imread(path + file[i], 0)
path1 = cv2.equalizeHist(path1)
path2 = cv2.imread(path + file[i+1], 0)
path2 = cv2.equalizeHist(path2)
# Feature Detection #
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(path1,None)
kp2, des2 = sift.detectAndCompute(path2,None)
# Feature Matching #
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(des1,des2,k=2)
good = []
pts1 = []
pts2 = []
for j, (m,n) in enumerate(matches):
if m.distance < 0.8*n.distance:
good.append(m)
pts2.append(kp2[m.trainIdx].pt)
pts1.append(kp1[m.queryIdx].pt)
pts1 = np.int32(pts1)
pts2 = np.int32(pts2)
pts1 = np.array([pts1],dtype=np.float32)
pts2 = np.array([pts2],dtype=np.float32)
# UNDISTORTING POINTS #
pts1_norm = cv2.undistortPoints(pts1, mtx, dist)
pts2_norm = cv2.undistortPoints(pts2, mtx, dist)
# COMPUTE FUNDAMENTAL MATRIX #
F, mask = cv2.findFundamentalMat(pts1_norm,pts2_norm,cv2.FM_LMEDS)
# COMPUTE ESSENTIAL MATRIX #
E, mask = cv2.findEssentialMat(pts1_norm, pts2_norm, focal=55.474, pp=(33.516, 16.630), method=cv2.FM_LMEDS, prob=0.999, threshold=3.0)
# POSE RECOVERY #
points, R, t, mask = cv2.recoverPose(E, pts1_norm, pts2_norm)
anglesBetweenImages = rotationMatrixToEulerAngles(R)
sys.stdout = open('path_to_folder/angles.txt', 'a')
print(anglesBetweenImages)
# COMPOSE PROJECTION MATRIX OF R, t #
matrix_1 = np.hstack((R, t))
matrix_2 = np.hstack((np.eye(3, 3), np.zeros((3, 1))))
projMat_1 = np.dot(mtx, matrix_1)
projMat_2 = np.dot(mtx, matrix_2)
# TRIANGULATE POINTS #
point_4d_hom = cv2.triangulatePoints(projMat_1[:3], projMat_2[:3], pts1[:2].T, pts2[:2].T)
# HOMOGENIZE THE 4D RESULT TO 3D #
point_4d = point_4d_hom
point_3d = point_4d[:3, :].T # Obtains 3D points
np.savetxt('/path_to_folder/'+ file[i] +'.txt', point_3d)
After cv2.triangulatePoints, I expected to obtain one pointcloud. But the result I got has 2 surfaces as shown in the image below.
Result 1.
I really appreciate if anyone can tell me what is going wrong with my algorithm. Thanks!
you need to do this interativilly
like this:
cv::Mat pointsMat1(2, 1, CV_64F);
cv::Mat pointsMat2(2, 1, CV_64F);
int size0 = m_history.getHistorySize();
for(int i = 0; i < size0; i++){
cv::Point pt1 = m_history.getOriginalPoint(0, i);
cv::Point pt2 = m_history.getOriginalPoint(1, i);
pointsMat1.at<double>(0,0) = pt1.x;
pointsMat1.at<double>(1,0) = pt1.y;
pointsMat2.at<double>(0,0) = pt2.x;
pointsMat2.at<double>(1,0) = pt2.y;
cv::Mat pnts3D(4, 1, CV_64F);
cv::triangulatePoints(m_projectionMat1, m_projectionMat2, pointsMat1, pointsMat2, pnts3D);
}

How to remove an image and it's label from a dataset

When I import a dataset of images and the csv file with the corresponding labels, I made a function to extract every image label. But know I have to remove some images that do not fit specific criteria. Is there a way to remove the corresponding label as well?
This is the function that is used to load the images and the labels
def imp_img():
dirname = '/s/desk/img/'
x = np.zeros((1000, 100, 100), dtype=np.float32)
for i in range(x.shape[0]):
img = Image.open(dirname + 'img_%02d.png' % (i))
img = np.array(img)
x[i] = img
path = '/s/desk/labels_classificatio.csv'
labels = pd.read_csv(path, usecols=["category"],
sep=";" )
y = np.array(labels)
return x, y
This is how they are imported
x, y = imp_img()
x = x/255.0
y = y.reshape(y.shape[0], 1)
x.shape, y.shape
and now I made for loop to remove the images that are too dark
c =[]
for i in x:
if np.sum(i) >= 100:
c.append(i)
c = np.asarray(c)
The problem now is that I have fewer images than I have labels. Is there a way to remove the corresponding label as well?
You're looking for enumerate. It lets you loop over an iterable while maintaining a count. Instead of for i in x we'll do for i, img in enumerate(x) which let us maintain a loop counter i. This way you can sbset the labels corresponding to the images that meet your criteria.
code:
c = []
c_labels = []
for i, img in enumerate(x):
if np.sum(img) >= 100:
c.append(img)
c_labels.append(y[i])
c = np.asarray(c)
c_labels = np.asarray(c_labels)

An efficient way to extract, and compare and match fingerprint minutiae

I am currently working on an program that detects and matches fingerprints as part of a fingerprint sensor. After processing the image, I obtain key points using Harris Corner Detection. Then, using ORB feature extractor, I obtain descriptors in the form of an array.
Problem is the number of key points I get for two different images of the same fingerprint are different. Hence, the descriptor arrays obtained are also of different sizes.
Now I've used Hamming distances to measure the difference between the descriptor arrays of two images, and hence the difference between the fingerprints themselves. However, due to the different array sizes, I'm finding it difficult to set a threshold for all fingerprints.
def get_descriptors(img):
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
img = clahe.apply(img)
img = image_enhance.image_enhance(img) #for image-processing
img = numpy.array(img, dtype=numpy.uint8)
# Threshold
ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
# Normalize to 0 and 1 range
img[img == 255] = 1
#Thinning
skeleton = skeletonize(img)
skeleton = numpy.array(skeleton, dtype=numpy.uint8)
skeleton = removedot(skeleton)
# Harris corners
harris_corners = cv2.cornerHarris(img, 3, 3, 0.04)
harris_normalized = cv2.normalize(harris_corners, 0, 255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32FC1)
threshold_harris = 125
# Extract keypoints
keypoints = []
for x in range(0, harris_normalized.shape[0]):
for y in range(0, harris_normalized.shape[1]):
if harris_normalized[x][y] > threshold_harris:
keypoints.append(cv2.KeyPoint(y, x, 1))
# Define descriptor
orb = cv2.ORB_create()
# Compute descriptors
_, des = orb.compute(img, keypoints)
return (keypoints, des);
def main():
img1 = cv2.imread("C:/Users/Nimesh Shahdadpuri/Desktop/DMRC Intern/database/106_1.tif" , cv2.IMREAD_GRAYSCALE)
kp1, des1 = get_descriptors(img1)
#print (des1)
#print (des1.shape)
img2 = cv2.imread("C:/Users/Nimesh Shahdadpuri/Desktop/DMRC Intern/database/106_2.tif" , cv2.IMREAD_GRAYSCALE)
kp2, des2 = get_descriptors(img2)
#print (des2)
# Matching between descriptors
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches= bf.match(des1,des2)
matches = sorted(matches, key= lambda match:match.distance)
#print (len(matches))
# Plot keypoints
img4 = cv2.drawKeypoints(img1, kp1, outImage=None)
img5 = cv2.drawKeypoints(img2, kp2, outImage=None)
#f, axarr = plt.subplots(1,2)
print ("First Fingerprint")
#axarr[0].imshow(img4)
plt.imshow(img4)
plt.show()
print ("Second Fingerprint")
#axarr[1].imshow(img5)
plt.imshow(img5)
plt.show()
# Plot matches
img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches, flags=2, outImg=None)
print ("All the matching points and the corresponding distances")
plt.imshow(img3)
plt.show()
# Calculate score
score = 0
for match in matches:
score += match.distance
score_threshold = 40
matchper= score/len(matches)
print(matchper)
if matchper < score_threshold:
print("Fingerprint matches.")
else:
print("Fingerprint does not match.")
I expect an efficient way to define a general threshold for all fingerprints. I would also like suggestions for an alternate approach to define and match the key points.

how to accelerate 2D convolution with FFT?

# Zero-pad an image
def zero_pad(image, pad_height, pad_width):
H, W = image.shape
out = np.zeros((H+2*pad_height, W+2*pad_width))
out[pad_height:H+pad_height,pad_width:W+pad_width] = image
return out
# An step-by-step implementation of convolution filter
def conv(image, kernel):
Hi, Wi = image.shape
Hk, Wk = kernel.shape
out = np.zeros((Hi, Wi))
image_pad = zero_pad(image, (Hk-1)//2, (Wk-1)//2)
for i in range(Hi):
for j in range(Wi):
out[i,j] = np.sum(kernel*image_pad[i:Hk+i,j:Wk+j])
return out
# accelerate convolution using FFT
def conv_faster(image, kernel):
Hi, Wi = image.shape
Hk, Wk = kernel.shape
out = np.zeros((Hi, Wi))
# expand image and kernel by zero-padding
if( (Hi+Hk) % 2 == 0):
x = (Hi+Hk)
else:
x = (Hi+Hk)-1
if( (Wi+Wk) % 2 == 0):
y = (Wi+Wk)
else:
y = (Wi+Wk)-1
image_pad = np.zeros((x,y))
kernel_pad = np.zeros((x,y))
image_pad[0:Hi,0:Wi] = image
kernel_pad[0:Hk,0:Wk] = kernel
# make image and kernel at the center of frequency domain
for p in range(Hi):
for q in range(Wi):
image_pad[p,q]*=(-1)**(p+q)
for p in range(Hk):
for q in range(Wk):
kernel_pad[p,q]*=(-1)**(p+q)
# do fft for image and kernel
image_pad_fft = np.fft.fft2(image_pad)
kernel_pad_fft = np.fft.fft2(kernel_pad)
# get the imaginary part of kernel's transformation
kernel_pad_fft = 1j*np.imag(kernel_pad_fft)
# multiply the two in frequency domain
out_pad = np.fft.ifft2(image_pad_fft*kernel_pad_fft)
# get he real part of result
out = np.real(out_pad[0:Hi,0:Wi])
# Counteract the preceding centralization
for p in range(Hi):
for q in range(Wi):
out[p,q]*=(-1)**(p+q)
return out
There are some difference between their returns.
I think there result should be same, how could I refine it? Thinks!
the kernel is [[1,0,-1],[2,0,-2],[1,0,-1]]
I input this image for the two function
The step-by-step function obtains this result
The accelerated function obtains this result
For a convolution, the Kernel must be flipped. What you do in conv() is a correlation.
Since your Kernel is symmetric apart from a minus sign, result2 = -result1 in your current results

Resources