Smoothing jagged edges of an image - python-3.x

I would like to generate a skeleton out of an image. Since the edges that are generated using skimage from the original image isn't smooth, the resulting skeleton obtained from binary has disconnected edges with knots.
import skimage
from skimage import data,io,filters
import numpy as np
import cv2
import matplotlib.pyplot as plt
from skimage.filters import threshold_adaptive,threshold_mean
from skimage.morphology import binary_dilation
from skimage import feature
from skimage.morphology import skeletonize_3d
imgfile = "edit.jpg"
image = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
thresh = threshold_mean(image)
binary = image > thresh
edges = filters.sobel(binary)
dilate = feature.canny(binary,sigma=0)
skeleton = skeletonize_3d(binary)
fig, axes = plt.subplots(nrows=2,ncols=2, figsize=(8, 2))
ax = axes.ravel()
ax[0].imshow(binary, cmap=plt.cm.gray)
ax[0].set_title('binarize')
ax[1].imshow(edges, cmap=plt.cm.gray)
ax[1].set_title('edges')
ax[2].imshow(dilate, cmap=plt.cm.gray)
ax[2].set_title('dilates')
ax[3].imshow(skeleton, cmap=plt.cm.gray)
ax[3].set_title('skeleton')
for a in ax:
a.axis('off')
plt.show()
I tried using dilate to smoothen the jagged edges. But the contours in the skeleton has two edges instead of a single edge that is desired.
I would like to ask for suggestions on how the edges can be smoothened to avoid knots and disconnected edges in the resulting skeleton.
Input image
Output images
Edit:After using gaussian smoothing
binary = image > thresh
gaussian = skimage.filters.gaussian(binary)
skeleton = skeletonize_3d(gaussian)

This median filter should do the work on your binary image for the skeletonization.
import scipy
binary_smoothed = scipy.signal.medfilt (binary, 3)
For the borders, I will probably use this and play with the parameters as shown in the link below
https://claudiovz.github.io/scipy-lecture-notes-ES/advanced/image_processing/auto_examples/plot_canny.html:
from image_source_canny import canny
borders = canny (binary_smoothed, 3, 0.3, 0.2)

Related

How to increase the image size extracted from visualizing the feature map?

can someone help me on how to increase the size of images from feature map extracted? i recently run CNN on set of images and would like to see the feature extracted. I manage to extract it but unable to actually see it because it was too small.
My code:
from matplotlib import pyplot
#summarize feature map shapes
for i in range(len(cnn.layers)):
layer = cnn.layers[i]
#check fr conv layer
if 'conv' not in layer.name:
continue
print(i, layer.name,layer.output.shape)
from keras import models
from keras.preprocessing import image
model_new = models.Model(inputs=cnn.inputs, outputs=cnn.layers[1].output)
img_path = 'train/1/2NbeGPsQf2Q - 4 0.jpg'
img = image.load_img(img_path, target_size=(img_rows, img_cols))
import numpy as np
from keras.applications.imagenet_utils import decode_predictions, preprocess_input
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
features = model_new.predict(img)
square = 10
ix = 1
for _ in range(square):
for _ in range(square):
# specify subplot and turn of axis
ax = pyplot.subplot(square, square, ix)
ax.set_xticks([])
ax.set_yticks([])
# plot filter channel in colour
pyplot.imshow(features[0, :, :, ix-1], cmap='viridis')
ix += 1
# show the figure
pyplot.show()
the result is at attached.output of feature map layer 1
its too small. How can i make it bigger so i can see what actually is there?
Appreciate for any input. Thanks!

Remove Freckles from Simple Binary Image

I have the following NumPy array of a running man, which you can download here:
https://drive.google.com/file/d/1SfIEqGsBV_vA7iP4UjLdklLJlLdDzozL/view?usp=sharing
To display it, use this code:
import numpy as np
import matplotlib.pyplot as plt
# load data
data = np.load('running_man.npy')
# plot data
plt.imshow(data)
As you can see there is a lot of noise (freckles) in the image. I would like to get rid of it and retrieve a clean image of the runner. Any idea of how to do it?
This is what I have done so far:
from skimage import measure
# Find contours at a constant value of 1
contours = measure.find_contours(data, 1, fully_connected='high')
# Select the largest contiguous contour
contour = sorted(contours, key=lambda x: len(x))[-1]
# Create an empty image to store the masked array
r_mask = np.zeros_like(data, dtype='bool')
# Create a contour image by using the contour coordinates rounded to their nearest integer value
r_mask[np.round(contour[:, 0]).astype('int'), np.round(contour[:, 1]).astype('int')] = 1
# Fill in the hole created by the contour boundary
r_mask = ndimage.binary_fill_holes(r_mask)
# Invert the mask since one wants pixels outside of the region
r_mask = ~r_mask
plt.imshow(r_mask)
... but as you can see the outline is very rough !
What works well is to upload the image to an online jpg to SVG converter -> this makes the lines super smooth. ... but I want to be able to do it in python.
Idea:
I am looking for something that can keep the sharp corners, maybe something that detects the gradient along the edge and only keeps the point where the gradient is above a certain threshold...
For this specific image you can just use numpy:
import numpy as np
import matplotlib.pyplot as plt
data = np.load('running_man.npy')
data[data > 1] = 0
plt.xticks([])
plt.yticks([])
plt.imshow(data)
For a method that preserves the corners better, we can use median filters, but force the preservation of corners.
Masked Image
Mask after filtering
Recolored
import cv2
import numpy as np
# load image
img = cv2.imread("run.png");
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY);
# make mask
_, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU);
# median filter
med = cv2.medianBlur(thresh, 11);
med[thresh == 255] = 255;
# inverse filter
mask = cv2.bitwise_not(med);
med = cv2.medianBlur(mask, 3);
med[mask == 255] = 255;
# recolor
color = np.zeros_like(img);
color[med == 0] = (66, 239, 245);
color[med == 255] = (92, 15, 75);
# show
cv2.imshow("colored", color);
cv2.waitKey(0);

Augmenting both X and Y images with Keras

I know how to use the ImageDataGenerator to augment my data by translating, flipping, rotationg, shearing, etc. The question is let's say that I have both a training image and the corresponding segmentation images and I would like to augment both of these images. For example if I rotated a training image by 45 degrees then I would also like to augment the segmentation image by 45 degrees. In essence I want to perform the identical set of transforms to two data sets. Is that possible to do with ImageDataGenerator, or do I have to write all the augmentation functions from scratch? Thanks very much in advance.
You can use augmentations in tf.data.Dataset.map and return the image twice. I don't know of any way to do this with ImageDataGenerator.
import tensorflow as tf
import matplotlib.pyplot as plt
from skimage import data
cats = tf.concat([data.chelsea()[None, ...] for i in range(24)], axis=0)
test = tf.data.Dataset.from_tensor_slices(cats)
def augment(image):
image = tf.cast(x=image, dtype=tf.float32)
image = tf.divide(x=image, y=tf.constant(255.))
image = tf.image.random_hue(image=image, max_delta=5e-1)
image = tf.image.random_brightness(image=image, max_delta=2e-1)
return image, image
test = test.batch(1).map(augment)
fig = plt.figure()
plt.subplots_adjust(wspace=.1, hspace=.2)
images = next(iter(test.take(1)))
for index, image in enumerate(images):
ax = plt.subplot(1, 2, index + 1)
ax.set_xticks([])
ax.set_yticks([])
ax.imshow(tf.clip_by_value(tf.squeeze(image), clip_value_min=0, clip_value_max=1))
plt.show()

Image Cropping to get just the particular shape out of image

[I have the images as below, i need to extract just the white strip portion from all the images.
i Have tried using PIL to extract the rectangular portion by manually specifying the pixel value, Can there be any automated way to get this work done where by just feeding the image gives back the rectangular portion
Below is My snipped code:
from PIL import Image
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = Image.open('C:/Users/ShAgarwal/Documents/image_dataset/pic9.jpg')
half_the_width = img.size[0] / 2
half_the_height = img.size[1] / 2
img4 = img.crop(
(
half_the_width-1632,
half_the_height - 440,
half_the_width+1632,
half_the_height + 80
)
)
sample image
import cv2
import numpy as np
from matplotlib import pyplot as plt
image='IMG_3134.JPG'
# read image
imgc = cv2.imread(image)
img = cv2.resize(imgc, None, fx=0.25, fy=0.25) # resize since image is huge
#cropping the strip dimensions
#crop_img = img[1010:1650,140:1099723]
blurred = cv2.blur(img, (3,3))
canny = cv2.Canny(blurred, 50, 200)
Marking coordinates through auto image detection using canny's algorithm
## find the non-zero min-max coords of canny
pts = np.argwhere(canny>0)
y1,x1 = pts.min(axis=0)
y2,x2 = pts.max(axis=0)`
`## crop the region
cropped = img[y1:y2, x1:x2]
cv2.imwrite("cropped.png", cropped)
#Select the bounded area around white boundary
tagged = cv2.rectangle(img.copy(), (x1,y1), (x2,y2), (0,255,0), 3, cv2.LINE_AA)
r = cv2.selectROI(tagged)
imCrop = im[int(r[1]):int(r[1]+r[3]), int(r[0]):int(r[0]+r[2])]
#Bounded Area
cv2.imwrite("taggd2.png", imcrop)
cv2.waitKey()
Results from above code

How to detect Roads from satellite images and get the outline using opencv python

Hey guys, I am trying to detect roads from satellite images.
After identifying the roads am getting the co-ordinates like roads coordinates and building coordinates.
The code which i tried to extract roads from satellite images.
Input image
from __future__ import print_function, division
from PIL import Image
import operator
import cv2
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
#reading image directly from current working directly
build_image = cv2.imread("avilla_san_marcos_gilbert,az.png")
#Doing MeanShift Filtering
#shifted = cv2.pyrMeanShiftFiltering(image, 21, 51)
#GrayScale Conversion
build_gray = cv2.cvtColor(build_image, cv2.COLOR_BGR2GRAY)
#OTSU Thresholding
thresh = cv2.threshold(build_gray, 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
cv2.imshow("OTSU Thresholded Image",thresh)
cv2.imwrite("OTSU_1 image.jpg",thresh)
#Checking Coordinates of White Pixels
build_white = np.argwhere(thresh == 255)
#Creating an array
build_data= np.array(build_white)
np.savetxt('build_coords.csv', build_data,delimiter=",")
#Checking Coordinates of White Pixels
road_white = np.argwhere(thresh == 0)
#Creating an array
road_data = np.array(road_white)
print(road_data)
#Saving vector of roads in a csv file
np.savetxt('road_coords.csv', road_data,delimiter=",")
Output image is plotted based on csv of Road pixel Coordinates
I have a problem with the output image obtained. It has detected trees i have to eliminate it by obtaining the pixel coordinates of tree.
So From that output image i have to extract the layout of the roads alone.Please try to help me out guys. Thanks in advance.

Resources