I'm trying to crop segmented objects outputed by an MASK RCNN the only problem is that when i do the cropping i get the segments with mask colors and not with their original colors.
Here's the outputed image with the segments :
and here's one segment (we have 17 segments in this image ) :
as you can see , we have the segment with the mask color and not the original color.
here's the code that i'm using :
from mrcnn.config import Config
from mrcnn import model as modellib
from mrcnn import visualize
import numpy as np
import colorsys
import argparse
import imutils
import random
import cv2
import os
import matplotlib.image as mpimg
import cv2
import matplotlib.pyplot as plt
import numpy as np
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-w", "--weights", required=True,
help="path to Mask R-CNN model weights pre-trained on COCO")
ap.add_argument("-l", "--labels", required=True,
help="path to class labels file")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
ap.add_argument("-i", "--image", required=True,
help="path to input image to apply Mask R-CNN to")
args = vars(ap.parse_args())
# load the class label names from disk, one label per line
CLASS_NAMES = open(args["labels"]).read().strip().split("\n")
# generate random (but visually distinct) colors for each class label
# (thanks to Matterport Mask R-CNN for the method!)
hsv = [(i / len(CLASS_NAMES), 1, 1.0) for i in range(len(CLASS_NAMES))]
COLORS = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.seed(42)
random.shuffle(COLORS)
class SimpleConfig(Config):
# give the configuration a recognizable name
NAME = "fashion"
# set the number of GPUs to use along with the number of images
# per GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = args["confidence"]
# initialize the inference configuration
config = SimpleConfig()
# initialize the Mask R-CNN model for inference and then load the
# weights
print("[INFO] loading Mask R-CNN model...")
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=os.getcwd())
model.load_weights(args["weights"], by_name=True)
# load the input image, convert it from BGR to RGB channel
# ordering, and resize the image
# default value 512 form the width
image = cv2.imread(args["image"])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = imutils.resize(image, width=1150)
# perform a forward pass of the network to obtain the results
print("[INFO] making predictions with Mask R-CNN...")
r = model.detect([image], verbose=1)[0]
image = visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
['BG', 'top', 'boots' , 'bag'], r['scores'],
title="")
# get and then save the segmented objects
i = 0
mask = r["masks"]
for i in range(mask.shape[2]):
image = cv2.imread(args["image"])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = imutils.resize(image, width=1150)
for j in range(image.shape[2]):
image[:,:,j] = image[:,:,j] * mask[:,:,i]
filename = "Output/segment_%d.jpg"%i
cv2.imwrite(filename,image)
i+=1
Any Help on how to resolve this issue would be much appreciated , thank you.
I think you need to change this line line in visualize display_intance, and change facecolor from none to None.
I think it is creating random colors even if you don't specify it explicitly
I found the Error , as it has been suggested to me in Github , i had to remove the
`image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)`
Line, because my image was already converted to RGB.
Related
import torch
import glob
import os
import pathlib
from ultralytics import YOLO
model_name='MyBest.pt'
model = torch.hub.load(<?>, 'custom', source='local', path = model_name, force_reload = True)
results = model(person.png) # predict on an image
results.save()
What should I write instead: <?>
I'm trying to get an image with BOX on all objects I want the code to use both yoloV8 and pytorch
According to the official python usage source, release 8.0.20:
from ultralytics.yolo.engine.model import YOLO
model = YOLO("yolov8s.pt")
results = model.predict(source='ultralytics/assets', save=True, save_txt=True)
You can use these codes for more details:
for result in results:
boxes = result.boxes # Boxes object for bbox outputs
masks = result.masks # Masks object for segmenation masks outputs
probs = result.probs # Class probabilities
print(boxes)
print(masks)
print(probs)
boxes = results[0].boxes
box = boxes[0] # returns one box
box.xyxy
boxes.xyxy # box with xyxy format, (N, 4)
boxes.xywh # box with xywh format, (N, 4)
boxes.xyxyn # box with xyxy format but normalized, (N, 4)
boxes.xywhn # box with xywh format but normalized, (N, 4)
boxes.conf # confidence score, (N, 1)
boxes.cls # cls, (N, 1)
boxes.data # raw bboxes tensor, (N, 6) or boxes.boxes .
You can go to the docs.ultralytics page for more information
https://docs.ultralytics.com/predict/
can someone help me on how to increase the size of images from feature map extracted? i recently run CNN on set of images and would like to see the feature extracted. I manage to extract it but unable to actually see it because it was too small.
My code:
from matplotlib import pyplot
#summarize feature map shapes
for i in range(len(cnn.layers)):
layer = cnn.layers[i]
#check fr conv layer
if 'conv' not in layer.name:
continue
print(i, layer.name,layer.output.shape)
from keras import models
from keras.preprocessing import image
model_new = models.Model(inputs=cnn.inputs, outputs=cnn.layers[1].output)
img_path = 'train/1/2NbeGPsQf2Q - 4 0.jpg'
img = image.load_img(img_path, target_size=(img_rows, img_cols))
import numpy as np
from keras.applications.imagenet_utils import decode_predictions, preprocess_input
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
features = model_new.predict(img)
square = 10
ix = 1
for _ in range(square):
for _ in range(square):
# specify subplot and turn of axis
ax = pyplot.subplot(square, square, ix)
ax.set_xticks([])
ax.set_yticks([])
# plot filter channel in colour
pyplot.imshow(features[0, :, :, ix-1], cmap='viridis')
ix += 1
# show the figure
pyplot.show()
the result is at attached.output of feature map layer 1
its too small. How can i make it bigger so i can see what actually is there?
Appreciate for any input. Thanks!
I'm studying deep learning. Trained an image classification algorithm. The problem is, however, that to train images I used:
test_image = image.load_img('some.png', target_size = (64, 64))
test_image = image.img_to_array(test_image)
While for actual application I use:
test_image = cv2.imread('trick.png')
test_image = cv2.resize(test_image, (64, 64))
But I found that those give a different ndarray (different data):
Last entries from load_image:
[ 64. 71. 66.]
[ 64. 71. 66.]
[ 62. 69. 67.]]]
Last entries from cv2.imread:
[ 15 23 27]
[ 16 24 28]
[ 14 24 28]]]
, so the system is not working. Is there a way to match results of one to another?
OpenCV reads images in BGR format whereas in keras, it is represented in RGB. To get the OpenCV version to correspond to the order we expect (RGB), simply reverse the channels:
test_image = cv2.imread('trick.png')
test_image = cv2.resize(test_image, (64, 64))
test_image = test_image[...,::-1] # Added
The last line reverses the channels to be in RGB order. You can then feed this into your keras model.
Another point I'd like to add is that cv2.imread usually reads in images in uint8 precision. Examining the output of your keras loaded image, you can see that the data is in floating point precision so you may also want to convert to a floating-point representation, such as float32:
import numpy as np
# ...
# ...
test_image = test_image[...,::-1].astype(np.float32)
As a final point, depending on how you trained your model it's usually customary to normalize the image pixel values to a [0,1] range. If you did this with your keras model, make sure you divide your values by 255 in your image read in through OpenCV:
import numpy as np
# ...
# ...
test_image = (test_image[...,::-1].astype(np.float32)) / 255.0
Recently, I came across the same issue. I tried to convert the color channel and resize the image with OpenCV. However, PIL and OpenCV have very different ways of image resizing.
Here is the exact solution to this problem.
This is the function that takes image file path , convert to targeted size and prepares for the Keras model -
import cv2
import keras
import numpy as np
from keras.preprocessing import image
from PIL import Image
def prepare_image (file):
im_resized = image.load_img(file, target_size = (224,224))
img_array = image.img_to_array(im_resized)
image_array_expanded = np.expand_dims(img_array, axis = 0)
return keras.applications.mobilenet.preprocess_input(image_array_expanded)
# execute the function
PIL_image = prepare_image ("lena.png")
If you have an OpenCV image then the function will be like this -
def prepare_image2 (img):
# convert the color from BGR to RGB then convert to PIL array
cvt_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
im_pil = Image.fromarray(cvt_image)
# resize the array (image) then PIL image
im_resized = im_pil.resize((224, 224))
img_array = image.img_to_array(im_resized)
image_array_expanded = np.expand_dims(img_array, axis = 0)
return keras.applications.mobilenet.preprocess_input(image_array_expanded)
# execute the function
img = cv2.imread("lena.png")
cv2_image = prepare_image2 (img)
# finally check if it is working
np.array_equal(PIL_image, cv2_image)
>> True
Besides CV2 using the BGR format and Keras (using PIL as a backend) using the RGB format, there are also significant differences in the resize methods of CV2 and PIL using the same parameters.
Multiple references can be found on the internet but the general idea is that there are subtle differences in pixel coordinate systems used in the two resize algorithms and also potential issues with different methods of casting to float as an intermediate step in the interpolation algo. End result is a visually similar image but one that is slightly shifted/perturbed between versions.
A perfect example of an adversarial attack that can cause huge differences in accuracy despite small input differences.
Before you flag the question please read it first-
I found How to count the number of objects detected with Template Matching?, tutorial and it's good but not perfect.
The problem with this and what I am facing is that, It's not giving accurate counts, but draws the rectangle around all the found(objects) ones!
For example, I have this (Before, executing script):-
Then, I have this (After executing script):-
As you can see clearly, there are 3 rectangles in 4th row but the count it's giving is 1.
I've tried changing threshold and sensitivity, but it didn't work. Here's what I have so far-
# USAGE
===================================================================
# python3 match_template.py --template cod_logo.png --images images
===================================================================
import numpy as np
import argparse
import imutils
import glob
import cv2
from matplotlib import pyplot as plt
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-t", "--template", required=True, help="Path to template image")
ap.add_argument("-i", "--images", required=True,
help="Path to images where template will be matched")
ap.add_argument("-v", "--visualize",
help="Flag indicating whether or not to visualize each iteration")
args = vars(ap.parse_args())
def match_and_count(template, image):
img_rgb = cv2.imread(image)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread(template,0)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
threshold = 0.8
loc = np.where( res >= threshold)
f = set()
for pt in zip(*loc[::-1]):
cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
sensitivity = 100
f.add((round(pt[0]/sensitivity), round(pt[1]/sensitivity)))
cv2.imwrite('multiple_objects.jpg',img_rgb)
print("Occurence of Object: %s" % len(f))
match_and_count(args["template"], args["images"])
Does anyone have any better approach of doing the same?
I noticed a performance drop from around 10% in accuracy between what Keras gives as output and when I test it myself. So I reproduced this, see the small code snippet below. I generate input in two ways. input is generated by the Keras ImageGenerator (no augmentations) and input2 is produced without ImageGenerator.
import numpy as np
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
import os
import pdb
def preprocess(img):
img = image.array_to_img(img)
width, height = img.size
# Crop 48x48px
desired_width, desired_height = 48, 48
if width < 48:
desired_width = width
start_x = np.maximum(0, int((width-desired_width)/2))
img = img.crop((start_x, np.maximum(0, height-desired_height), start_x+desired_width, height))
img = img.resize((48, 48))
img = image.img_to_array(img)
return img / 255.
datagen = ImageDataGenerator(
featurewise_center=False,
featurewise_std_normalization=False,
preprocessing_function=preprocess)
generator = datagen.flow_from_directory(
'numbers_train',
target_size=(48,48),
batch_size=1024, # Only 405 images in directory, so batch always the same
classes=['02'],
shuffle=False,
class_mode='sparse')
inputs, targets = next(generator)
folder = 'numbers_train/02'
files = os.listdir(folder)
files = list(map(lambda x: os.path.join(folder, x), files))
images = []
for f in files:
img = image.load_img(f)
images.append(preprocess(img))
inputs2 = np.asarray(images)
print(np.mean(inputs))
print(np.mean(inputs2))
This gives two different values, where I expect that input and input2 are the same.
0.403158
0.41354
This causes a difference in accuracy of around 10%. What is happening here?
Edit: It seems to be something with the resizing of the images. Remove the img.resize in preprocess and add this line in the for loop before preprocessing and the means will be the same. But what I want is that the resizing is done after the cropping.
Edit2: So the ImageDataGenerator does first the resizing to (48,48) and then it calls the preprocess function. I want it the other way around. Does someone know a trick to do this?