Thresholding infrared images - python-3.x

I want to analyze infrared images of solar panels and cut the panels from the images. As a first step I need to threshold the image. But if I do it with a example image from google like the one below I only get a black image. What I can do to improve?
import cv2
import numpy as np
img = cv2.imread('GooglePanelIR.png')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
ret,thresh1 = cv2.threshold(hsv[:,:,0],100,255,cv2.THRESH_BINARY)
cv2.imshow('cont imge', thresh1)
cv2.waitKey(0)
print(ret)

Since you need to threshold and its a grayscale image, import your image in grayscale.
This is what you are looking for.
import cv2
import numpy as np
img = cv2.imread('GooglePanelIR.png', 0)
ret,thresh1 = cv2.threshold(img,100,255,cv2.THRESH_BINARY)
cv2.imshow('cont imge', thresh1)
cv2.waitKey(0)
print(ret)

Related

Detection of walls in a plan

I work on a subject which is detection of walls in an image.
So the method I use is:
featur.Canny of skimage I got this image
FindContours of OpenCV-python
But it detects no segments at all
Import part:
import matplotlib.pyplot as plt
import skimage.segmentation as seg
import skimage.filters as filters
from skimage import feature
from skimage.segmentation import (morphological_chan_vese,
morphological_geodesic_active_contour,
inverse_gaussian_gradient,
checkerboard_level_set, mark_boundaries)
from skimage import data, img_as_float
import cv2 as cv2
The code:
img = cv2.imread(image_png, 0)
img_base = img
img = feature.canny(img, sigma=3)
dpi = 72
plt.figure(figsize=(9000/dpi, 9000/dpi), dpi=dpi)
plt.imshow(img, cmap="Greys", aspect="equal")
plt.axis("off")
plt.savefig("test_new_canny.png")
img = cv2.imread("test_new_canny.png", 0)
image_base = cv2.imread("./image_test/rdc.png", 0)
contours, _ = cv2.findContours(img, cv2.RETR_CCOMP , cv2.CHAIN_APPROX_SIMPLE )
contours = cv2.drawContours(image_base, contours, -1, (0,255,0), 3)
Image.fromarray(contours).save("test_contours.png")
Do you know why the detection doesn't work?
So I use a second method, Computes Felsenszwalb’s efficient graph based image segmentation, with Skimage
I obtain something with a small image but with a larger one the algo never finish the treatment.
Any ideas?

Numpy error trying to use difference hashing with the ImageHash library

I am trying to perform difference hashing with the python ImageHash library and keep getting a numpy error.
The error:
File "/Users/testuser/Desktop/test_folder/test_env/lib/python3.8/site-packages/imagehash.py", line 252, in dhash
image = image.convert("L").resize((hash_size + 1, hash_size), Image.ANTIALIAS)
AttributeError: 'numpy.ndarray' object has no attribute 'convert'
The code:
from PIL import Image
from cv2 import cv2
import imagehash
import numpy as np
def hash_and_compare(image1, image2):
image1 = image1
image2 = image2
# read images
image1 = cv2.imread(image1)
image2 = cv2.imread(image2)
# convert to grayscale
image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
# resize images
image1 = cv2.resize(image1, (9, 8))
image2 = cv2.resize(image2, (9, 8))
# hash images
image1_hash = imagehash.dhash(image1)
image2_hash = imagehash.dhash(image2)
# compute hamming distance
distance = image1_hash - image2_hash
if image1_hash <= 10:
print(distance)
print('match')
else:
print(distance)
print('no match')
hash_and_compare('/Users/testuser/Desktop/test_folder/game_name056496.png', '/Users/testuser/Desktop/test_folder/game_name499761.png')
as it is mentioned in imagehash library's document, #image must be a PIL instance.. so you cant set numpy array as input of the dshash function.if you want do some preprocess with opencv, you should convert it into PIL array before setting it into dhash, like this :
import numpy as np
from PIL import Image
...
some preprocess
...
# You may need to convert the color.
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
im_pil = Image.fromarray(img)
image1_hash = imagehash.dhash(im_pil)

how to show binary image data in python?

i can show the image using image.open, but how do i display from the binary data?
trying to use plot gets: ValueError: x and y can be no greater than 2-D, but have shapes (64,) and (64, 64, 3). this makes sense as that is what the result is supposed to be, but how do i display it?
import pathlib
import glob
from os.path import join
import matplotlib.pyplot as plt
from PIL import Image
import tensorflow as tf
def parse(image): # my like ings, but with .png instead of .jpeg.
image_string = tf.io.read_file(image)
image = tf.image.decode_png(image_string, channels=3)
image = tf.image.convert_image_dtype(image, tf.float32)
image = tf.image.resize(image, [64, 64])
return image
root = "in/flower_photos/tulips"
path = join(root,"*.jpg")
files = sorted(glob.glob(path))
file=files[0]
image = Image.open(file)
image.show()
binary=parse(file)
print(type(binary))
# how do i see this?
#plt.plot(binary) # does not seem to work
#plt.show() # does not seem to work
found a nice pillow tutorial.
from matplotlib import image
from matplotlib import pyplot
from PIL import Image
# load the image
filename='Sydney-Opera-House.jpg'
im = Image.open(filename)
# summarize some details about the image
print(im.format)
print(im.mode)
print(im.size)
# show the image
#image.show()
# load image as pixel array
data = image.imread(filename)
# summarize shape of the pixel array
print(data.dtype)
print(data.shape)
# display the array of pixels as an image
pyplot.imshow(data)
pyplot.show()

How to convert binary image to RGB with PIL?

I have PIL Image in binary and I need to convert it in RGB. I did this diskew image
binary image
I need this way:
I already tried this which is not working
from PIL import Image as im
img = im.fromarray((255 * Image).astype("uint8")).convert("RGB")
I still don't understand why you convert to RGBA if you want RGB, but this code converts your image to RGB as you ask:
#!/usr/local/bin/python3
import numpy as np
from PIL import Image
# Open input image
im = Image.open('text.png').convert('RGB')
# Invert
npim = 255 - np.array(im)
# Save
Image.fromarray(npim).save('result.png')

convert multidimensional numpy array to image

I have a numpy array(trainData.npy) for image processing. It contains 2000 images, garyscale and height 450 , width 600.
Train images shape:(2000, 1, 450, 600)
I'm looking for a way to plot or show one of these images. I've used this code but i'v got TypeError: Invalid dimensions for image data error/
import numpy as np
import matplotlib.pyplot as plt
img = img_train[0]
plt.imshow(img)
plt.show()
Just reshape your image:
import numpy as np
import matplotlib.pyplot as plt
img = img_train[0] #img has dim (ncolor=1, nlines=450, nrows=600)
img = reshape(450, 600) #img has dim (450, 600)
#img = img / img.max #if you need rescaling of greyscale to be in [0..1]
plt.imshow(img)
plt.show()
P.S.: I personally find it a bit more intuitive if pictures are dimensional-ordered (nlines, nrows, ncolorchanel) than your choice (ncolorchanel, nlines, nrows)

Resources