Before you flag the question please read it first-
I found How to count the number of objects detected with Template Matching?, tutorial and it's good but not perfect.
The problem with this and what I am facing is that, It's not giving accurate counts, but draws the rectangle around all the found(objects) ones!
For example, I have this (Before, executing script):-
Then, I have this (After executing script):-
As you can see clearly, there are 3 rectangles in 4th row but the count it's giving is 1.
I've tried changing threshold and sensitivity, but it didn't work. Here's what I have so far-
# USAGE
===================================================================
# python3 match_template.py --template cod_logo.png --images images
===================================================================
import numpy as np
import argparse
import imutils
import glob
import cv2
from matplotlib import pyplot as plt
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-t", "--template", required=True, help="Path to template image")
ap.add_argument("-i", "--images", required=True,
help="Path to images where template will be matched")
ap.add_argument("-v", "--visualize",
help="Flag indicating whether or not to visualize each iteration")
args = vars(ap.parse_args())
def match_and_count(template, image):
img_rgb = cv2.imread(image)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread(template,0)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
threshold = 0.8
loc = np.where( res >= threshold)
f = set()
for pt in zip(*loc[::-1]):
cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
sensitivity = 100
f.add((round(pt[0]/sensitivity), round(pt[1]/sensitivity)))
cv2.imwrite('multiple_objects.jpg',img_rgb)
print("Occurence of Object: %s" % len(f))
match_and_count(args["template"], args["images"])
Does anyone have any better approach of doing the same?
Related
I am working on a task that involves the use of image processing techniques to clean a noisy image with the help of other (noisy) images which overlap it in
their area. To achieve this, I will need to calculate the warp of each
of the images to the target image, i.e. calculate the alignment
between them.
My goal is to apply the necessary warps in order to copy each of the
images to the target image.
Example images:
source_01.jpg, source_02.jpg, target.jpg
To achieve the above, I first implemented SIFT using the OpenCV module to obtain the [x, y, r, t] values, calculate keypoints distances, and also Implemented a simple RANSAC loop with the homography solver in other to calculate more accurate the locations of matched key points in both the images.
My code.
import numpy as np
import cv2
import os
import matplotlib.pyplot as plt
import tqdm
from functools import reduce
from operator import concat
from skimage.measure import ransac
from skimage.transform import ProjectiveTransform, AffineTransform
from functools import reduce
from operator import concat
file_path = 'denoising_sets\cameleon__N_8__sig_noise_5__sig_motion_103'
def read_images(file_path):
images= []
for root,dir,files in os.walk(os.path.join(os.getcwd(),file_path )):
for file in files:
images.append(cv2.imread(os.path.join(root,file),0))
return images
images = read_images(file_path)
img1 = images[0]
img2 = images[1]
target_image = images[-1]
def good_match_keypoints(img1,img2, show=True):
# Initiate SIFT detector
sift = cv2.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
# BFMatcher with default params
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
# Apply ratio test
good = []
for m, n in matches:
if m.distance < 0.8 * n.distance:
good.append([m])
good_match = reduce(concat, good)
# cv2.drawMatchesKnn expects list of lists as matches.
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, flags=2, outImg=img2)
if show:
plt.imshow(img3), plt.show()
return good_match, kp1, kp2
good_match, kp1, kp2 = good_match_keypoints(img1,img2, show=True)
keypoints distance
pts1 = np.float32([kp1[m.queryIdx].pt for m in good_match])
pts2 = np.float32([kp2[m.trainIdx].pt for m in good_match])
I used the RANSAC loop with the homography solver in other to calculate more accurately the locations of matched key points in both the images.
def ransc_loop(pts1, pts2, show=True):
model, inliers = ransac(
(pts1, pts2),
AffineTransform, min_samples=4,
residual_threshold=8, max_trials=10000
)
n_inliers = np.sum(inliers)
inlier_keypoints_left = [cv2.KeyPoint(point[0], point[1], 1) for point in pts1[inliers]]
inlier_keypoints_right = [cv2.KeyPoint(point[0], point[1], 1) for point in pts2[inliers]]
placeholder_matches = [cv2.DMatch(idx, idx, 1) for idx in range(n_inliers)]
image3 = cv2.drawMatches(img1, inlier_keypoints_left, img2, inlier_keypoints_right, placeholder_matches, None)
if show:
plt.imshow(image3)
plt.show()
src_pts = np.float32([ inlier_keypoints_left[m.queryIdx].pt for m in placeholder_matches ]).reshape(-1, 2)
dst_pts = np.float32([ inlier_keypoints_right[m.trainIdx].pt for m in placeholder_matches ]).reshape(-1, 2)
return src_pts, dst_pts
src_pts, dst_pts = ransc_loop(pts1, pts2)
This is what the src_pts and dst_pts look like
(array([[ 106.41315, 332.88037],
[ 120.28672, 314.56943],
array([[ 116.75639, 576.8563 ],
[ 130.71513, 555.35364],
Finally, I want to use warping to copy a source image to the target image, based on the adapted transformation. I don't know how to achieve this. Please I need assistance on how to achieve this.
This are the steps involve
can someone help me on how to increase the size of images from feature map extracted? i recently run CNN on set of images and would like to see the feature extracted. I manage to extract it but unable to actually see it because it was too small.
My code:
from matplotlib import pyplot
#summarize feature map shapes
for i in range(len(cnn.layers)):
layer = cnn.layers[i]
#check fr conv layer
if 'conv' not in layer.name:
continue
print(i, layer.name,layer.output.shape)
from keras import models
from keras.preprocessing import image
model_new = models.Model(inputs=cnn.inputs, outputs=cnn.layers[1].output)
img_path = 'train/1/2NbeGPsQf2Q - 4 0.jpg'
img = image.load_img(img_path, target_size=(img_rows, img_cols))
import numpy as np
from keras.applications.imagenet_utils import decode_predictions, preprocess_input
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
features = model_new.predict(img)
square = 10
ix = 1
for _ in range(square):
for _ in range(square):
# specify subplot and turn of axis
ax = pyplot.subplot(square, square, ix)
ax.set_xticks([])
ax.set_yticks([])
# plot filter channel in colour
pyplot.imshow(features[0, :, :, ix-1], cmap='viridis')
ix += 1
# show the figure
pyplot.show()
the result is at attached.output of feature map layer 1
its too small. How can i make it bigger so i can see what actually is there?
Appreciate for any input. Thanks!
I use miniconda jupyter notebook python and I'm trying to implement a machine (Audio filtering). I got this error and I really don't know how to fix it.
Here I imported libraries that I need with the path of the file:
import wave as we
import numpy as np
import matplotlib.pyplot as plt
dir = r'/home/pc/Downloads/Bubble audios'
Here the fuction that should plot the graph:
def read_wav(wavfile, plots=True, normal=False):
f = wavfile
params = f.getparams()
# print(params)
nchannels, sampwidth, framerate, nframes = params[:4]
strData = f.readframes(nframes) # , string format
waveData = np.frombuffer(strData, dtype=np.int16) # Convert a string to an int
# wave amplitude normalization
if normal == True:
waveData = waveData*1.0/(max(abs(waveData)))
#
if plots == True:
time = np.arange(0, nframes ,dtype=np.int16) *(1.0 / framerate)
plt.figure(dpi=100)
plt.plot(time, waveData)
plt.xlabel("Time")
plt.ylabel("Amplitude")
plt.title("Single channel wavedata")
plt.show()
return (Wave, time)
def fft_wav(waveData, plots=True):
f_array = np.fft.fft(waveData) # Fourier transform, the result is a complex array
f_abs = f_array
axis_f = np.linspace(0, 250, np.int(len(f_array)/2)) # map to 250
# axis_f = np.linspace(0, 250, np.int(len(f_array))) # map to 250
if plots == True:
plt.figure(dpi=100)
plt.plot(axis_f, np.abs(f_abs[0:len(axis_f)]))
# plt.plot(axis_f, np.abs(f_abs))
plt.xlabel("Frequency")
plt.ylabel("Amplitude spectrum")
plt.title("Tile map")
plt.show()
return f_abs
And here I call the function with the file that I want to be read and plotted.
f = we.open(dir+r'/Ars1_Aufnahme.wav', 'rb')
Wave, time = read_wav(f)
The error that I got:
ValueError: x and y must have same first dimension, but have shapes (2140699,) and (4281398,)
I tried to use np.reshape but it didn't work or I might have used it wrong. So, any advice?
it's seems that your time is 1/2 of the size of your wave. Maybe your nframe is too short. If you do nframses = 2*nframes what is the error ?
I'm trying to crop segmented objects outputed by an MASK RCNN the only problem is that when i do the cropping i get the segments with mask colors and not with their original colors.
Here's the outputed image with the segments :
and here's one segment (we have 17 segments in this image ) :
as you can see , we have the segment with the mask color and not the original color.
here's the code that i'm using :
from mrcnn.config import Config
from mrcnn import model as modellib
from mrcnn import visualize
import numpy as np
import colorsys
import argparse
import imutils
import random
import cv2
import os
import matplotlib.image as mpimg
import cv2
import matplotlib.pyplot as plt
import numpy as np
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-w", "--weights", required=True,
help="path to Mask R-CNN model weights pre-trained on COCO")
ap.add_argument("-l", "--labels", required=True,
help="path to class labels file")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
ap.add_argument("-i", "--image", required=True,
help="path to input image to apply Mask R-CNN to")
args = vars(ap.parse_args())
# load the class label names from disk, one label per line
CLASS_NAMES = open(args["labels"]).read().strip().split("\n")
# generate random (but visually distinct) colors for each class label
# (thanks to Matterport Mask R-CNN for the method!)
hsv = [(i / len(CLASS_NAMES), 1, 1.0) for i in range(len(CLASS_NAMES))]
COLORS = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.seed(42)
random.shuffle(COLORS)
class SimpleConfig(Config):
# give the configuration a recognizable name
NAME = "fashion"
# set the number of GPUs to use along with the number of images
# per GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = args["confidence"]
# initialize the inference configuration
config = SimpleConfig()
# initialize the Mask R-CNN model for inference and then load the
# weights
print("[INFO] loading Mask R-CNN model...")
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=os.getcwd())
model.load_weights(args["weights"], by_name=True)
# load the input image, convert it from BGR to RGB channel
# ordering, and resize the image
# default value 512 form the width
image = cv2.imread(args["image"])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = imutils.resize(image, width=1150)
# perform a forward pass of the network to obtain the results
print("[INFO] making predictions with Mask R-CNN...")
r = model.detect([image], verbose=1)[0]
image = visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
['BG', 'top', 'boots' , 'bag'], r['scores'],
title="")
# get and then save the segmented objects
i = 0
mask = r["masks"]
for i in range(mask.shape[2]):
image = cv2.imread(args["image"])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = imutils.resize(image, width=1150)
for j in range(image.shape[2]):
image[:,:,j] = image[:,:,j] * mask[:,:,i]
filename = "Output/segment_%d.jpg"%i
cv2.imwrite(filename,image)
i+=1
Any Help on how to resolve this issue would be much appreciated , thank you.
I think you need to change this line line in visualize display_intance, and change facecolor from none to None.
I think it is creating random colors even if you don't specify it explicitly
I found the Error , as it has been suggested to me in Github , i had to remove the
`image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)`
Line, because my image was already converted to RGB.
In command line, I want to give python hist.py -n 1000 -o /dir and output will be png in the assigning directory. Can anyone help on it?
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import argparse
import os, sys, errno
def plotData(outputDir):
outFilename = "hist.pdf"
outFilepath = os.path.join(outputDir)
parser = argparse.ArgumentParser()
parser.add_argument("-n","--number", help="display a square of a given number",type=int)
parser.add_argument('-o', '--outputDir', required=True,
help='The directory to which plot files should be saved')
args = parser.parse_args()
num=(args.number)
outFilepath=(args.outputDir)
# example data
mu = 100 # mean of distribution
sigma = 15 # standard deviation of distribution
#num=input()
x = mu + sigma * np.random.randn(int(num))
num_bins = 50
# the histogram of the data
n, bins, patches = plt.hist(x, num_bins, normed=1, facecolor='green', alpha=0.5)
# add a 'best fit' line
y = mlab.normpdf(bins, mu, sigma)
plt.plot(bins, y, 'r--')
plt.xlabel('Smarts')
plt.ylabel('Probability')
plt.title(r'Histogram of IQ: $\mu=100$, $\sigma=15$')
# Tweak spacing to prevent clipping of ylabel
plt.subplots_adjust(left=0.15)
plt.savefig (outFilepath,outFilename)
plt.show()
plt.close()
I could give random variable but not directory from terminal command line.
I have used args.outputDir but it does not work for me.
and i am learners