Split images losing color when saving RasterBrick array as *jpg - jpeg

#Packages
library(raster)
library(rgeos)
library(rgdal)
library(jpeg)
I have a RGB color *jpg image:
## Open my RGB image
path<-"https://raw.githubusercontent.com/Leprechault/trash/main/IMG_191022_134242_0000_RGB.JPG" # Image path
download.file(path, "IMG_191022_134242_0000_RGB.JPG", mode = "wb")
rc<-stack(raster("IMG_191022_134242_0000_RGB.JPG")) #Open as a raster and stack
plot(rc)
When I try to split in 16 images of equal size:
# This function spatially aggregates the original raster
# it turns each aggregated cell into a polygon
# then the extent of each polygon is used to crop
# the original raster.
# The function returns a list with all the pieces
# it saves and plots each piece
# The arguments are:
# raster = raster to be chopped (raster object)
# ppside = pieces per side (integer)
# save = write raster (TRUE or FALSE)
# plot = do you want to plot the output? (TRUE or FALSE)
SplitRas <- function(raster,ppside,save,plot){
h <- ceiling(ncol(raster)/ppside)
v <- ceiling(nrow(raster)/ppside)
agg <- aggregate(raster,fact=c(h,v))
agg[] <- 1:ncell(agg)
agg_poly <- rasterToPolygons(agg)
names(agg_poly) <- "polis"
r_list <- list()
for(i in 1:ncell(agg)){
e1 <- extent(agg_poly[agg_poly$polis==i,])
r_list[[i]] <- crop(raster,e1)
}
if(save==T){
for(i in 1:length(r_list)){
options(max.print=999999)
#EBImage::writeImage(as.array(r_list[[i]]/255), paste0("sample_",i,".jpg",sep=""),quality = 100)
writeJPEG(as.array(r_list[[i]]/255), target = paste0("sample_",i,".jpg",sep=""),quality = 1,color.space="RGBA")
}
}
return(r_list)
}
#Slip RGB raster in 16 subimages
splitRBG<-SplitRas(raster=rc,ppside=4,save=TRUE)
#
The results are greyscaled images, eg. 1 (sample_1.jpg) of the 16 images:
I try to make a lot of changes in writeJPEG() function (something is wrong in the as.array(r_list[[i]]/255)code or in the color.space) without success, please any ideas?

Solution: split each RGB channel in a individual raster layer (rst.blue,rst.green nad rst.red) and stack before the crop operation with SplitRas custom function and works!!
#Packages
library(raster)
library(rgeos)
library(rgdal)
library(jpeg)
## download RGB image
path<-"https://raw.githubusercontent.com/Leprechault/trash/main/IMG_191022_134242_0000_RGB.JPG" # Image path
download.file(path, "IMG_191022_134242_0000_RGB.JPG", mode = "wb")
# Open jpeg file
jpg<-readJPEG("IMG_191022_134242_0000_RGB.JPG")
# Convert imagedata to raster
rst.blue <- raster(jpg[,,1])
rst.green <- raster(jpg[,,2])
rst.red <- raster(jpg[,,3])
rc<-stack(rst.blue, rst.green, rst.red)
# This function spatially aggregates the original raster
# it turns each aggregated cell into a polygon
# then the extent of each polygon is used to crop
# the original raster.
# The function returns a list with all the pieces
# it saves and plots each piece
# The arguments are:
# raster = raster to be chopped (raster object)
# ppside = pieces per side (integer)
# save = write raster (TRUE or FALSE)
# plot = do you want to plot the output? (TRUE or FALSE)
SplitRas <- function(raster,ppside,save,plot){
h <- ceiling(ncol(raster)/ppside)
v <- ceiling(nrow(raster)/ppside)
agg <- aggregate(raster,fact=c(h,v))
agg[] <- 1:ncell(agg)
agg_poly <- rasterToPolygons(agg)
names(agg_poly) <- "polis"
r_list <- list()
for(i in 1:ncell(agg)){
e1 <- extent(agg_poly[agg_poly$polis==i,])
r_list[[i]] <- crop(raster,e1)
}
if(save==T){
for(i in 1:length(r_list)){
writeJPEG(as.array(r_list[[i]]), target = paste0("sample_",i,".jpg",sep=""),quality = 1)
}
}
return(r_list)
}
#Slip RGB raster in 4 equal parts
splitRBG<-SplitRas(raster=rc,ppside=4,save=TRUE)
#

Related

Why are albumentations Augmentations (Yolo / YoloV5) altering Bounding Boxes if no augmentations are being placed?

I was using the Albumentations library in order to perform some data augmentations on an object detection dataset that I intended to train a YoloV5 model on.
I have to perform the augmentations seperately and save the images locally to disk, but when I do I noticed that some of the output bounding boxes returned aren't generating properly.
I have my augmentations set up in a seperate aug.py file, shown below (augmentations purposefully removed in debugging attempts, see below) -
import albumentations as A
import cv2
PROB = 0.5
bbp = A.BboxParams(format="yolo")
horizontal_flip_transform = A.Compose([
], bbox_params = bbp)
vertical_flip_transform = A.Compose([
], bbp)
pixel_dropout_transform = A.Compose([
], bbox_params = bbp)
random_rotate = A.Compose([
], bbox_params = bbp )
#NOTE: THIS METHOD IMPLIES THAT THE IMAGE WIDTHS MUST BE AT LEAST 50 PIXELS
#Remove this aug to remove this constraint
random_crop = A.Compose([
], bbox_params = bbp)
augs = [horizontal_flip_transform, vertical_flip_transform, pixel_dropout_transform, random_rotate, random_crop]
def get_augmentations():
return augs
And the relevant parts of my implementation for performing the augmentations and saving them to disk is below:
def run_augments_on_image(img_name, bboxes, max_images_to_generate = 500):
ret = []
img = np.array(Image.open(img_name), dtype=np.uint8)
transforms = get_augmentations()
for i in range(min(len(transforms), max_images_to_generate)):
transformed = transforms[i](image=img, bboxes = bboxes)
ret.append((transformed["image"] , transformed["bboxes"]))
return ret
def run_and_save_augments_on_image_sets(batch_img_names, bboxes_urls, max_images_to_generate, dataset_dir, trainval):
num_images = 0
for i in range(len(batch_img_names)):
bboxes = []
with open(os.path.join(dataset_dir, trainval, 'labels', bboxes_urls[i]), 'r') as f:
for row in f:
x = row.strip().split(' ')
x.append(row[0])
x.pop(0)
x[0] = float(x[0])
x[1] = float(x[1])
x[2] = float(x[2])
x[3] = float(x[3])
bboxes.append(x)
trans = run_augments_on_image(os.path.join(dataset_dir, trainval, 'images', batch_img_names[i]), bboxes)
img_index = len(os.listdir(os.path.join(dataset_dir, 'train' , 'images'))) + len(os.listdir(os.path.join(dataset_dir, 'valid', 'images'))) + 1
for j in range(len(trans)):
img_trans, bboxes_trans = trans[j]
p = Image.fromarray(img_trans).save(os.path.join(dataset_dir, trainval, 'images' , f'image-{img_index}.{batch_img_names[j].split(".")[-1]}'))
with open(os.path.join(dataset_dir, trainval, 'labels', f'image-{img_index}.txt'), 'w') as f:
for boxs in bboxes_trans:
print(f'{boxs[-1]} {boxs[0]} {boxs[1]} {boxs[2]} {boxs[3]}', file=f)
num_images += 1
img_index += 1
if num_images >= max_images_to_generate:
break
if num_images >= max_images_to_generate:
break
For testing purposes (some of the bounding boxes were off), I removed all the actual augmentations, expecting the input image label (one augmented image example shown below) to be equal to augmented label since there were no augmentations. But, as you can see, the two labels are different.
img-original.txt
0 0.5662285714285714 0.2740066225165563 0.5297714285714286 0.4837913907284769
img-augmented.txt
0 0.51488 0.47173333333333334 0.6405099999999999 0.6527333333333334
(The labels above are in normalized xywh YOLO format)
Why is albumentations altering the labels? None of the augmentations in augs.py contain anything.

Bokeh Widget distorts plot with tile provider

When changing the color through the given widget first, the plot distorts. When first moving the map with the cursor and then changing the color, the plot does not distort. This only happens when a tile background is added. Thus the problem might lay in a difference in the underlaying projection / CRS of the plot and the tile that somehow gets changed in the background (?).
If you are able to find the problem, it would be great if you could also link to resources how you did that so I learn how to debug more in depth.
My browser: Brave (Chromium based)
Simple working example:
props to bigreddot as this working example is mostly based on his answer.
from bokeh.layouts import grid
from bokeh.models.widgets.inputs import ColorPicker
from bokeh.sampledata import us_states
from bokeh.plotting import *
from bokeh.tile_providers import get_provider, CARTODBPOSITRON
us_states = us_states.data.copy()
del us_states["HI"]
del us_states["AK"]
# separate latitude and longitude points for the borders
# of the states.
state_xs = [us_states[code]["lons"] for code in us_states]
state_ys = [us_states[code]["lats"] for code in us_states]
# init figure
p = figure(title="Plotting Points Example: The 5 Largest Cities in Texas",
toolbar_location="left", plot_width=1100, plot_height=700)
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
# Draw state lines
p.patches(state_xs, state_ys, fill_alpha=0.0,
line_color="#884444", line_width=1.5)
# Latitude and Longitude of 5 Cities
# ------------------------------------
# Austin, TX -------30.26° N, 97.74° W
# Dallas, TX -------32.77° N, 96.79° W
# Fort Worth, TX ---32.75° N, 97.33° W
# Houston, TX ------29.76° N, 95.36° W
# San Antonio, TX --29.42° N, 98.49° W
# Now group these values together into a lists of x (longitude) and y (latitude)
x = [-97.7431, -96.79, -97.33, -95.36, -98.49]
y = [30.26, 32.77, 32.75, 29.76, 29.42]
# add basemap and labels
tile_provider = get_provider(CARTODBPOSITRON)
p.add_tile(tile_provider)
points = p.circle(x, y, size=8, color='navy', alpha=1)
picker = ColorPicker(title=f"Point Color", color="navy")
picker.js_link("color", points.glyph, "fill_color")
# output to static HTML file
output_file("texas.html")
l = grid([p, picker], ncols=2, sizing_mode="fixed")
# show results
show(l)
The display via the grid is not the problem - it also occurs if you display the plot via:
show(column([p, picker]))
As indicated here one need to add
p.match_aspect = True
to the above code before the output. This resolves the issue of randomly changing ratios.

How to get H1 and DC images from FFT of a series of images of organ moving over time

I have series of images to the heart beating, how can I take Fourier Transform over time to extract from it DC and H1 images ?
I tried the following code so far, but I don't know what is H1 exactly ! so I can't extract the images of that component !
here is the code I have tried so far:
import numpy as np
import cv2 as cv
for sl in range(img.shape[2]):
for fr in range(1,img.shape[3]):
#|=======================================================|
#| xx Thresholding xx |
#|-------------------------------------------------------|
th_f = cv.adaptiveThreshold(img[:,:,sl,fr ].astype('uint8'),255,cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY,11,4)
th_f0 = cv.adaptiveThreshold(img[:,:,sl,fr-1].astype('uint8'),255,cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY,11,4)
#|=======================================================|
#| xx Fourier HPF Filter (Smoothing & Denoising) xx |
#|-------------------------------------------------------|
# Fourier 2D Transform
f = np.fft.fft2(th_f)
f0 = np.fft.fft2(th_f0)
# Move the DC component of the FFT output to the center of the spectrum
fshift = np.fft.fftshift(f)
fshift0 = np.fft.fftshift(f0)
# Save the original fshift
fshift_orig = fshift.copy()
fshift0_orig = fshift0.copy()
# Create mask
rows, cols = img.shape[0],img.shape[1]
crow, ccol = int(rows/2), int(cols/2)
# Use mask to remove low frequency components
dist1 = 30
#dist2 = 0
fshift[crow-dist1:crow+dist1, ccol-dist1:ccol+dist1] = 0
#fshift[crow-dist2:crow+dist2, ccol-dist2:ccol+dist2] = fshift_orig[crow-dist2:crow+dist2, ccol-dist2:ccol+dist2]
fshift0[crow-dist1:crow+dist1, ccol-dist1:ccol+dist1] = 0
#fshift0[crow-dist2:crow+dist2, ccol-dist2:ccol+dist2] = fshift0_orig[crow-dist2:crow+dist2, ccol-dist2:ccol+dist2]
#-----calculate the derivative of the 2D FFT with respect to time-----------
dfdt = fshift - fshift0 + result
#print(np.max(result))
# inverse Fourier transform
img_back = np.fft.ifft2(dfdt)
# get rid of imaginary part by abs
Fresult = np.abs(img_back).astype('uint8')
cv.imshow(Fresult)
cv.waitKey(0)
cv.destroyAllWindows()
The solution was to take Fourier transform 3D for each slice, then to chose only the 2nd component of the Transform to transform it back to the spatial space, and that's it.
The benefit of this is to detect if something is moving along the third axis(time in my case).
for sl in range(img.shape[2]):
#-----Fourier--H1-----------------------------------------
# ff1[:, :, 1] H1 compnent 1, if 0 then DC
ff1 = FFT.fftn(img[:,:,sl,:])
fh = np.absolute(FFT.ifftn(ff1[:, :, 1]))
#-----Fourier--H1-----------------------------------------

Increase width/height of image(not resize)

]From https://www.pyimagesearch.com/2018/07/19/opencv-tutorial-a-guide-to-learn-opencv/
I'm able to extract the contours and write as files.
For example I've a photo with some scribbled text : "in there".
I've been able to extract the letters as separate files but what I want is that these letter files should have same width and height. For example in case of "i" and "r" width will differ. In that case I want to append(any b/w pixels) to the right of "i" photo so it's width becomes same as that of "r"
How to do it in Python? Just increase the size of photo(not resize)
My code looks something like this:
# find contours (i.e., outlines) of the foreground objects in the
# thresholded image
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
output = image.copy()
ROI_number = 0
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
ROI = image[y:y+h, x:x+w]
file = 'ROI_{}.png'.format(ROI_number)
cv2.imwrite(file.format(ROI_number), ROI)
[][1
Here are a couple of other ways to do that using Python/OpenCV using cv2.copyMakeBorder() to extend the border to the right by 50 pixels. The first way simply extends the border by replication. The second extends it with the mean (average) blue background color using a mask to get only the blue pixels.
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('i.png')
# get mask of background pixels (for result2b only)
lowcolor = (232,221,163)
highcolor = (252,241,183)
mask = cv2.inRange(img, lowcolor, highcolor)
# get average color of background using mask on img (for result2b only)
mean = cv2.mean(img, mask)[0:3]
color = (mean[0],mean[1],mean[2])
# extend image to the right by 50 pixels
result = img.copy()
result2a = cv2.copyMakeBorder(result, 0,0,0,50, cv2.BORDER_REPLICATE)
result2b = cv2.copyMakeBorder(result, 0,0,0,50, cv2.BORDER_CONSTANT, value=color)
# view result
cv2.imshow("img", img)
cv2.imshow("mask", mask)
cv2.imshow("result2a", result2a)
cv2.imshow("result2b", result2b)
cv2.waitKey(0)
cv2.destroyAllWindows()
# save result
cv2.imwrite("i_extended2a.jpg", result2a)
cv2.imwrite("i_extended2b.jpg", result2b)
Replicated Result:
Average Background Color Result:
In Python/OpenCV/Numpy you create a new image of the size and background color you want. Then you use numpy slicing to insert the old image into the new one. For example:
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('i.png')
ht, wd, cc= img.shape
# create new image of desired size (extended by 50 pixels in width) and desired color
ww = wd+50
hh = ht
color = (242,231,173)
result = np.full((hh,ww,cc), color, dtype=np.uint8)
# copy img image into image at offsets yy=0,xx=0
yy=0
xx=0
result[yy:yy+ht, xx:xx+wd] = img
# view result
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
# save result
cv2.imwrite("i_extended.jpg", result)

Code for image manipulation produces no result/output

I am working on a project that where I am required to use classes and objects to manipulate an image in Python using PIL.
I have eliminated formatted the path to the file correctly so there must be something in the code itself.
class image_play(object):
def __init__(self,im_name):
self.im_name = im_name
def rgb_to_gray_image(self):
im = Image.open(self.im_name)
im = im.convert('LA')
return im
# editing pixels of image to white
def loop_over_image(self):
im = Image.open(self.im_name)
width, height = im.size
# nested loop over all pixels of image
temp = []
for i in range(width):
for j in range(height):
temp.append((255,255,255)) # append tuple for the RGB values for each pixel
image_out = Image.new(im.mode,im.size) #create new image using PIl
image_out.putdata(temp) #use the temp list to create the image
return image_out
pic = image_play('test.png')
picGray = pic.rgb_to_gray_image()
picWhite = pic.loop_over_image()
I simply added picGray.show() and picWhite.show() an now I have view-able output. Hmmmm...

Resources