I'm trying to merge two images according to the values in a mask where at all points where the mask is 1, the resulting image has the values of the first image, and otherwise it has values of the second image. Does anyone know how it can be achieved in pytorch? Using numpy, it can be achieved using
>>> import numpy as np
>>> img1 = np.random.rand(100,100,3)
>>> img2 = np.random.rand(100,100,3)
>>> mask = np.random.rand(100,100)>.5
>>> res = img2.copy()
>>> res[mask] = img1[mask]
You are looking for np.where:
res = np.where(mask, img1, img2)
Related
I am working on a task that involves the use of image processing techniques to clean a noisy image with the help of other (noisy) images which overlap it in
their area. To achieve this, I will need to calculate the warp of each
of the images to the target image, i.e. calculate the alignment
between them.
My goal is to apply the necessary warps in order to copy each of the
images to the target image.
Example images:
source_01.jpg, source_02.jpg, target.jpg
To achieve the above, I first implemented SIFT using the OpenCV module to obtain the [x, y, r, t] values, calculate keypoints distances, and also Implemented a simple RANSAC loop with the homography solver in other to calculate more accurate the locations of matched key points in both the images.
My code.
import numpy as np
import cv2
import os
import matplotlib.pyplot as plt
import tqdm
from functools import reduce
from operator import concat
from skimage.measure import ransac
from skimage.transform import ProjectiveTransform, AffineTransform
from functools import reduce
from operator import concat
file_path = 'denoising_sets\cameleon__N_8__sig_noise_5__sig_motion_103'
def read_images(file_path):
images= []
for root,dir,files in os.walk(os.path.join(os.getcwd(),file_path )):
for file in files:
images.append(cv2.imread(os.path.join(root,file),0))
return images
images = read_images(file_path)
img1 = images[0]
img2 = images[1]
target_image = images[-1]
def good_match_keypoints(img1,img2, show=True):
# Initiate SIFT detector
sift = cv2.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
# BFMatcher with default params
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
# Apply ratio test
good = []
for m, n in matches:
if m.distance < 0.8 * n.distance:
good.append([m])
good_match = reduce(concat, good)
# cv2.drawMatchesKnn expects list of lists as matches.
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, flags=2, outImg=img2)
if show:
plt.imshow(img3), plt.show()
return good_match, kp1, kp2
good_match, kp1, kp2 = good_match_keypoints(img1,img2, show=True)
keypoints distance
pts1 = np.float32([kp1[m.queryIdx].pt for m in good_match])
pts2 = np.float32([kp2[m.trainIdx].pt for m in good_match])
I used the RANSAC loop with the homography solver in other to calculate more accurately the locations of matched key points in both the images.
def ransc_loop(pts1, pts2, show=True):
model, inliers = ransac(
(pts1, pts2),
AffineTransform, min_samples=4,
residual_threshold=8, max_trials=10000
)
n_inliers = np.sum(inliers)
inlier_keypoints_left = [cv2.KeyPoint(point[0], point[1], 1) for point in pts1[inliers]]
inlier_keypoints_right = [cv2.KeyPoint(point[0], point[1], 1) for point in pts2[inliers]]
placeholder_matches = [cv2.DMatch(idx, idx, 1) for idx in range(n_inliers)]
image3 = cv2.drawMatches(img1, inlier_keypoints_left, img2, inlier_keypoints_right, placeholder_matches, None)
if show:
plt.imshow(image3)
plt.show()
src_pts = np.float32([ inlier_keypoints_left[m.queryIdx].pt for m in placeholder_matches ]).reshape(-1, 2)
dst_pts = np.float32([ inlier_keypoints_right[m.trainIdx].pt for m in placeholder_matches ]).reshape(-1, 2)
return src_pts, dst_pts
src_pts, dst_pts = ransc_loop(pts1, pts2)
This is what the src_pts and dst_pts look like
(array([[ 106.41315, 332.88037],
[ 120.28672, 314.56943],
array([[ 116.75639, 576.8563 ],
[ 130.71513, 555.35364],
Finally, I want to use warping to copy a source image to the target image, based on the adapted transformation. I don't know how to achieve this. Please I need assistance on how to achieve this.
This are the steps involve
can someone help me on how to increase the size of images from feature map extracted? i recently run CNN on set of images and would like to see the feature extracted. I manage to extract it but unable to actually see it because it was too small.
My code:
from matplotlib import pyplot
#summarize feature map shapes
for i in range(len(cnn.layers)):
layer = cnn.layers[i]
#check fr conv layer
if 'conv' not in layer.name:
continue
print(i, layer.name,layer.output.shape)
from keras import models
from keras.preprocessing import image
model_new = models.Model(inputs=cnn.inputs, outputs=cnn.layers[1].output)
img_path = 'train/1/2NbeGPsQf2Q - 4 0.jpg'
img = image.load_img(img_path, target_size=(img_rows, img_cols))
import numpy as np
from keras.applications.imagenet_utils import decode_predictions, preprocess_input
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
features = model_new.predict(img)
square = 10
ix = 1
for _ in range(square):
for _ in range(square):
# specify subplot and turn of axis
ax = pyplot.subplot(square, square, ix)
ax.set_xticks([])
ax.set_yticks([])
# plot filter channel in colour
pyplot.imshow(features[0, :, :, ix-1], cmap='viridis')
ix += 1
# show the figure
pyplot.show()
the result is at attached.output of feature map layer 1
its too small. How can i make it bigger so i can see what actually is there?
Appreciate for any input. Thanks!
I have two images (channel 1 and channel 2) and I'm trying to compute the polynomial transform that warps one image into the other image. First, I created an ORB object and computed the affine transform between the two images (post-affine). Then I decided to try to use skimage.transform.PolynomialTransform. However, when I try to compute the transform, the returned NumPy array has either NaN values or 0 values, even though the original image had a non-zero float value at that location (post-polynomial). What am I doing wrong? Code included below, images in following link. https://drive.google.com/drive/folders/1mWxUvLFLK5-rYCrxs3-uGKFxKq2wXDjS?usp=sharing Thanks in advance!
Note: I know that the question Image warping with scikit-image and transform.PolynomialTransform is similar, but in my opinion the two aren't duplicates. Although that user's problem is with the same function, the pixels in their transformed images have values, whereas by and large mine don't.
import cv2
from ImageConversion import ImageConversion # self-written, irrelevant
import matplotlib
matplotlib.use('macosX')
import matplotlib.pyplot as plt
from scipy.ndimage import uniform_filter
from skimage.draw import circle_perimeter
from skimage.transform import PolynomialTransform, warp
def affine_transform(self):
channel1_u8 = self.channel1.astype('uint8') # necessary for detectAndCompute
channel2_u8 = self.channel2.astype('uint8')
orb = cv2.ORB_create(100)
#kp1, des1 = orb.detectAndCompute(channel1_32, None)
#kp2, des2 = orb.detectAndCompute(channel2_32, None)
kp1, des1 = orb.detectAndCompute(channel1_u8, None)
kp2, des2 = orb.detectAndCompute(channel2_u8, None)
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(des1, des2, None)
matches = sorted(matches, key = lambda x:x.distance)
points1 = np.zeros((len(matches), 2), dtype = np.float32)
points2 = np.zeros((len(matches), 2), dtype = np.float32)
for i, match in enumerate(matches):
points1[i, :] = kp1[match.queryIdx].pt # index of descriptor in query descriptors, ie index of descriptor in channel 1 which is the image we wish to map to channel 2
points2[i, :] = kp2[match.trainIdx].pt
mat_coeff, inliers = cv2.estimateAffine2D(points1, points2) # inliers only here because estimateAffine2D returns both matrix coefficients and inliers
print(mat_coeff)
rows, cols = channel1_u8.shape
#dst = cv2.warpAffine(channel1_u8, mat_coeff, (cols, rows))
dst = cv2.warpAffine(self.channel1, mat_coeff, (cols, rows))
return mat_coeff, dst
tform = PolynomialTransform()
tform.estimate(self.channel2, dst, order = 3)
warped_1 = warp(dst, tform, mode = 'constant')
I found the error. I was trying to feed PolynomialTransform.estimate the entire image, rather than identified key points in the image.
I have a matrix, e.g., generated as follows
x = np.random.randint(10,size=(20,20))
How to visualize the matrix with respect to the distribution of a given value, i.e., 6
In other words, how to show the matrix as an image, where the pixels with corresponding matrix entries being equivalent to 6 will be shown as white, while other pixels will be shown as black.
The simplest way to display the distribution of a given value through a black and white image is using a boolean array like x == 6. If you wish to improve visualization by replacing black and white with custom colors, NumPy's where will come in handy:
import numpy as np
import matplotlib.pyplot as plt
x = np.random.randint(10, size=(20, 20))
value = 6
foreground = [255, 0, 0] # red
background = [0, 0, 255] # blue
bw = x == value
rgb = np.where(bw[:, :, None], foreground, background)
fig, ax = plt.subplots(1, 2)
ax[0].imshow(bw, cmap='gray')
ax[0].set_title('Black & white')
ax[1].imshow(rgb)
ax[1].set_title('RGB')
plt.show(fig)
I think you want this:
from PIL import Image
import numpy as np
# Initialise data
x = np.random.randint(10,size=(20,20),dtype=np.uint8)
# Make all pixels with value 6 into white and all else black
x[x==6] = 255
x[x!=255] = 0
# Make PIL Image from Numpy array
pi = Image.fromarray(x)
# Display image
pi.show()
# Save PIL Image
pi.save('result.png')
I would like to deform/scale a three dimensional numpy array in one dimension. I will visualize my problem in 2D:
I have the original image, which is a 2D numpy array:
Then I want to deform/scale it for some factor in dimension 0, or horizontal dimension:
For PIL images, there are a lot of solutions, for example in pytorch, but what if I have a numpy array of shapes (w, h, d) = (288, 288, 468)? I would like to upsample the width with a factor of 1.04, for example, to (299, 288, 468). Each cell contains a normalized number between 0 and 1.
I am not sure, if I am simply not looking for the correct vocabulary, if I try to search online. So also correcting my question would help. Or tell me the mathematical background of this problem, then I can write the code on my own.
Thank you!
You can repeat the array along the specific axis a number of times equal to ceil(factor) where factor > 1 and then evenly space indices on the stretched dimension to select int(factor * old_length) elements. This does not perform any kind of interpolation but just repeats some of the elements:
import math
import cv2
import numpy as np
from scipy.ndimage import imread
img = imread('/tmp/example.png')
print(img.shape) # (512, 512)
axis = 1
factor = 1.25
stretched = np.repeat(img, math.ceil(factor), axis=axis)
print(stretched.shape) # (512, 1024)
indices = np.linspace(0, stretched.shape[axis] - 1, int(img.shape[axis] * factor))
indices = np.rint(indices).astype(int)
result = np.take(stretched, indices, axis=axis)
print(result.shape) # (512, 640)
cv2.imwrite('/tmp/stretched.png', result)
This is the result (left is original example.png and right is stretched.png):
Looks like it is as easy as using the torch.nn.functional.interpolate functional from pytorch and choosing 'trilinear' as interpolation mode:
import torch
PET = torch.tensor(data)
print("Old shape = {}".format(PET.shape))
scale_factor_x = 1.4
# Scaling.
PET = torch.nn.functional.interpolate(PET.unsqueeze(0).unsqueeze(0),\
scale_factor=(scale_factor_x, 1, 1), mode='trilinear').squeeze().squeeze()
print("New shape = {}".format(PET.shape))
output:
>>> Old shape = torch.Size([288, 288, 468])
>>> New shape = torch.Size([403, 288, 468])
I verified the results by looking at the data, but I can't show them here due to data privacy. Sorry!
This is an example for linear up-sampling a 3D Image with scipy.interpolate, hope it helps.
(I worked quite a lot with np.meshgrid here, if you not familiar with it i recently explained it here)
import numpy as np
import matplotlib.pyplot as plt
import scipy
from scipy.interpolate import RegularGridInterpolator
# should be 1.3.0
print(scipy.__version__)
# =============================================================================
# producing a test image "image3D"
# =============================================================================
def some_function(x,y,z):
# output is a 3D Gaussian with some periodic modification
# its only for testing so this part is not impotent
out = np.sin(2*np.pi*x)*np.cos(np.pi*y)*np.cos(4*np.pi*z)*np.exp(-(x**2+y**2+z**2))
return out
# define a grid to evaluate the function on.
# the dimension of the 3D-Image will be (20,20,20)
N = 20
x = np.linspace(-1,1,N)
y = np.linspace(-1,1,N)
z = np.linspace(-1,1,N)
xx, yy, zz = np.meshgrid(x,y,z,indexing ='ij')
image3D = some_function(xx,yy,zz)
# =============================================================================
# plot the testimage "image3D"
# you will see 5 images that corresponds to the slicing of the
# z-axis similar to your example picture_
# https://sites.google.com/site/linhvtlam2/fl7_ctslices.jpg
# =============================================================================
def plot_slices(image_3d):
f, loax = plt.subplots(1,5,figsize=(15,5))
loax = loax.flatten()
for ii,i in enumerate([8,9,10,11,12]):
loax[ii].imshow(image_3d[:,:,i],vmin=image_3d.min(),vmax=image_3d.max())
plt.show()
plot_slices(image3D)
# =============================================================================
# interpolate the image
# =============================================================================
interpolation_function = RegularGridInterpolator((x, y, z), image3D, method = 'linear')
# =============================================================================
# evaluate at new grid
# =============================================================================
# create the new grid that you want
x_new = np.linspace(-1,1,30)
y_new = np.linspace(-1,1,40)
z_new = np.linspace(-1,1,N)
xx_new, yy_new, zz_new = np.meshgrid(x_new,y_new,z_new,indexing ='ij')
# change the order of the points to match the input shape of the interpolation
# function. That's a bit messy but i couldn't figure out a way around that
evaluation_points = np.rollaxis(np.array([xx_new,yy_new,zz_new]),0,4)
interpolated = interpolation_function(evaluation_points)
plot_slices(interpolated)
The original (20,20,20) dimensional 3D Image:
And the upsampeled (30,40,20) dimensional 3D Image: