I want to create interactive line- and topoplot depending on menu. I figured out how to make red the line chosen in menu, but it doesn't work for topoplot marks (black circles inside topoplot). I can change it manually (cmap[][4] = RGB{N0f8}(1.0,0.0,0.0)), but how to do that interactively?
f = Figure(backgroundcolor = RGBf(0.98, 0.98, 0.98), resolution = (1500, 700))
ax = Axis(f[1:3, 1], xlabel = "Time [s]", ylabel = "Voltage amplitude [µV]")
N = 1:length(pos) #1:4
hidespines!(ax, :t, :r)
GLMakie.xlims!(-0.3, 1.2)
hlines!(0, color = :gray, linewidth = 1)
vlines!(0, color = :gray, linewidth = 1)
times = range(-0.3, length=size(dat_e,2), step=1 ./ 128)
lines = Dict()
for i in N
mean_trial = mean(dat_e[i,:,:],dims=2)[:,1]
line = lines!(times, mean_trial, color = "black")
lines[i] = line
end
hidedecorations!(ax, label = false, ticks = false, ticklabels = false)
topo_axis = Axis(f[2, 2], width = 178, height = 178, aspect = DataAspect())
Makie.xlims!(low = -0.2, high = 1.2)
Makie.ylims!(low = -0.2, high = 1.2)
topoMatrix = eegHeadMatrix(pos[N], (0.5, 0.5), 0.5)
cmap = Observable(collect(ColorScheme(range(colorant"black", colorant"black", length=30))))
#cmap[][4] = RGB{N0f8}(1.0,0.0,0.0)
topo = eeg_topoplot!(topo_axis, N, # averaging all trial of 30 participants on Xth msec
raw.ch_names[1:30];
positions=pos, # produced automatically from ch_names
interpolation=NullInterpolator(),
enlarge=1,
#colorrange = (0, 1), # add the 0 for the white-first color
colormap = cmap[],
label_text=false)
hidedecorations!(current_axis())
hidespines!(current_axis())
num_prev = 0
menu = Menu(f[3, 2], options = raw.ch_names[1:30], default = nothing)#, default = "second")
on(menu.selection) do selected
if selected != nothing
num = findall(x->x==menu.selection[], raw.ch_names[1:30])[]
if num_prev != 0
lines[num_prev].color = "black"
cmap[][num] = RGB{N0f8}(1.0,0.0,0.0)
end
lines[num].color = "red"
cmap[][num] = RGB{N0f8}(1.0,0.0,0.0)
num_prev = num
end
end
notify(menu.selection)
#print(cmap[])
f
We solved this by putting this string at the end of the menu.selection section:
notify(lines)
It works, because lines() automatically creates Observable.
I want to put text annotations along a line on the top. If I write same numbers (no matter which) in y array text will be aligned across center.
let
f = Figure()
ch = ["Ch2", "Ch3", "Ch17", "Ch18", "Ch19"]
x = Array(0:100:400)
y = [100, 100, 100, 100, 100]
ax = Axis(f[1, 1])
text!(x, y, text = ch, align = (:center, :center),
offset = (0, 0),
color = :black)
f
end
but if I change one item in y array
y = [10, 100, 100, 100, 100]
this happens:
How could I put "Ch2" to the same position on the top with other annotations?
Use ylims!(ax,0,110); to configure your axis.
let
f = Figure(;ylim=[0,110])
ch = ["Ch2", "Ch3", "Ch17", "Ch18", "Ch19"]
x = Array(0:100:400)
y = [100, 100, 100, 100, 100]
ax = Axis(f[1, 1]);
text!(x, y, text = ch, align = (:center, :center),
offset = (0, 0),color = :black)
ylims!(ax,0,110)
f
end
How I can get actual coordinates of Landsat image corners (see image to understand) ?
From metadata file (..._MTL.txt) I can get coordinates of red corners, but I need to get coordinates of green corners.
I work with GeoTIFF files using GDAL.
I need to get correct latitude and longitude of green points.
Can I do it using python3?
Thanks for help
Metadata file
GROUP = L1_METADATA_FILE
GROUP = METADATA_FILE_INFO
ORIGIN = "Image courtesy of the U.S. Geological Survey"
REQUEST_ID = "9991103150002_00325"
PRODUCT_CREATION_TIME = 2011-03-16T20:14:24Z
STATION_ID = "EDC"
LANDSAT5_XBAND = "1"
GROUND_STATION = "IKR"
LPS_PROCESSOR_NUMBER = 0
DATEHOUR_CONTACT_PERIOD = "1016604"
SUBINTERVAL_NUMBER = "01"
END_GROUP = METADATA_FILE_INFO
GROUP = PRODUCT_METADATA
PRODUCT_TYPE = "L1T"
ELEVATION_SOURCE = "GLS2000"
PROCESSING_SOFTWARE = "LPGS_11.3.0"
EPHEMERIS_TYPE = "DEFINITIVE"
SPACECRAFT_ID = "Landsat5"
SENSOR_ID = "TM"
SENSOR_MODE = "BUMPER"
ACQUISITION_DATE = 2010-06-15
SCENE_CENTER_SCAN_TIME = 04:57:44.2830500Z
WRS_PATH = 145
STARTING_ROW = 26
ENDING_ROW = 26
BAND_COMBINATION = "1234567"
PRODUCT_UL_CORNER_LAT = 49.8314223
PRODUCT_UL_CORNER_LON = 84.0018859
PRODUCT_UR_CORNER_LAT = 49.8694055
PRODUCT_UR_CORNER_LON = 87.4313889
PRODUCT_LL_CORNER_LAT = 47.8261840
PRODUCT_LL_CORNER_LON = 84.1192898
PRODUCT_LR_CORNER_LAT = 47.8615913
PRODUCT_LR_CORNER_LON = 87.4144676
PRODUCT_UL_CORNER_MAPX = 284400.000
PRODUCT_UL_CORNER_MAPY = 5524200.000
PRODUCT_UR_CORNER_MAPX = 531000.000
PRODUCT_UR_CORNER_MAPY = 5524200.000
PRODUCT_LL_CORNER_MAPX = 284400.000
PRODUCT_LL_CORNER_MAPY = 5301000.000
PRODUCT_LR_CORNER_MAPX = 531000.000
PRODUCT_LR_CORNER_MAPY = 5301000.000
PRODUCT_SAMPLES_REF = 8221
PRODUCT_LINES_REF = 7441
PRODUCT_SAMPLES_THM = 4111
PRODUCT_LINES_THM = 3721
BAND1_FILE_NAME = "L5145026_02620100615_B10.TIF"
BAND2_FILE_NAME = "L5145026_02620100615_B20.TIF"
BAND3_FILE_NAME = "L5145026_02620100615_B30.TIF"
BAND4_FILE_NAME = "L5145026_02620100615_B40.TIF"
BAND5_FILE_NAME = "L5145026_02620100615_B50.TIF"
BAND6_FILE_NAME = "L5145026_02620100615_B60.TIF"
BAND7_FILE_NAME = "L5145026_02620100615_B70.TIF"
GCP_FILE_NAME = "L5145026_02620100615_GCP.txt"
METADATA_L1_FILE_NAME = "L5145026_02620100615_MTL.txt"
CPF_FILE_NAME = "L5CPF20100401_20100630_09"
END_GROUP = PRODUCT_METADATA
GROUP = MIN_MAX_RADIANCE
LMAX_BAND1 = 193.000
LMIN_BAND1 = -1.520
LMAX_BAND2 = 365.000
LMIN_BAND2 = -2.840
LMAX_BAND3 = 264.000
LMIN_BAND3 = -1.170
LMAX_BAND4 = 221.000
LMIN_BAND4 = -1.510
LMAX_BAND5 = 30.200
LMIN_BAND5 = -0.370
LMAX_BAND6 = 15.303
LMIN_BAND6 = 1.238
LMAX_BAND7 = 16.500
LMIN_BAND7 = -0.150
END_GROUP = MIN_MAX_RADIANCE
GROUP = MIN_MAX_PIXEL_VALUE
QCALMAX_BAND1 = 255.0
QCALMIN_BAND1 = 1.0
QCALMAX_BAND2 = 255.0
QCALMIN_BAND2 = 1.0
QCALMAX_BAND3 = 255.0
QCALMIN_BAND3 = 1.0
QCALMAX_BAND4 = 255.0
QCALMIN_BAND4 = 1.0
QCALMAX_BAND5 = 255.0
QCALMIN_BAND5 = 1.0
QCALMAX_BAND6 = 255.0
QCALMIN_BAND6 = 1.0
QCALMAX_BAND7 = 255.0
QCALMIN_BAND7 = 1.0
END_GROUP = MIN_MAX_PIXEL_VALUE
GROUP = PRODUCT_PARAMETERS
CORRECTION_METHOD_GAIN_BAND1 = "CPF"
CORRECTION_METHOD_GAIN_BAND2 = "CPF"
CORRECTION_METHOD_GAIN_BAND3 = "CPF"
CORRECTION_METHOD_GAIN_BAND4 = "CPF"
CORRECTION_METHOD_GAIN_BAND5 = "CPF"
CORRECTION_METHOD_GAIN_BAND6 = "IC"
CORRECTION_METHOD_GAIN_BAND7 = "CPF"
CORRECTION_METHOD_BIAS = "IC"
SUN_AZIMUTH = 141.2669762
SUN_ELEVATION = 59.9909680
OUTPUT_FORMAT = "GEOTIFF"
END_GROUP = PRODUCT_PARAMETERS
GROUP = CORRECTIONS_APPLIED
STRIPING_BAND1 = "NONE"
STRIPING_BAND2 = "NONE"
STRIPING_BAND3 = "NONE"
STRIPING_BAND4 = "NONE"
STRIPING_BAND5 = "NONE"
STRIPING_BAND6 = "NONE"
STRIPING_BAND7 = "NONE"
BANDING = "N"
COHERENT_NOISE = "N"
MEMORY_EFFECT = "Y"
SCAN_CORRELATED_SHIFT = "Y"
INOPERABLE_DETECTORS = "N"
DROPPED_LINES = "N"
END_GROUP = CORRECTIONS_APPLIED
GROUP = PROJECTION_PARAMETERS
REFERENCE_DATUM = "WGS84"
REFERENCE_ELLIPSOID = "WGS84"
GRID_CELL_SIZE_THM = 60.000
GRID_CELL_SIZE_REF = 30.000
ORIENTATION = "NUP"
RESAMPLING_OPTION = "CC"
MAP_PROJECTION = "UTM"
END_GROUP = PROJECTION_PARAMETERS
GROUP = UTM_PARAMETERS
ZONE_NUMBER = 45
END_GROUP = UTM_PARAMETERS
END_GROUP = L1_METADATA_FILE
END
You might first find the contour with the biggest area. Then try some algorithm to find the points you want. It seems that the satellite picture in the image is not a perfect rectangle, so you can't fit a rectangle on it using OpenCV's built-in methods.
You should try something like that:
import cv2
import numpy as np
img = cv2.imread('z_edited.jpg')
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(imgray, (11, 11), 0)
ret, thresh = cv2.threshold(blurred, 27, 255, 0)
cnts, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
max_area = 0
max_area_index = 0
for i, cnt in enumerate(cnts):
area = cv2.contourArea(cnt)
if area > max_area:
max_area = area
max_area_index = i
x_min = np.min(cnts[max_area_index][:, 0, 0])
x_max = np.max(cnts[max_area_index][:, 0, 0])
y_min = np.min(cnts[max_area_index][:, 0, 1])
y_max = np.max(cnts[max_area_index][:, 0, 1])
(x_left, y_left) = (x_min, cnts[max_area_index][np.max(np.where(cnts[max_area_index][:, 0, 0] == x_min)), 0, 1])
(x_right, y_right) = (x_max, cnts[max_area_index][np.max(np.where(cnts[max_area_index][:, 0, 0] == x_max)), 0, 1])
(x_down, y_down) = (cnts[max_area_index][np.max(np.where(cnts[max_area_index][:, 0, 1] == y_max)), 0, 0], y_max)
(x_top, y_top) = (cnts[max_area_index][np.max(np.where(cnts[max_area_index][:, 0, 1] == y_min)), 0, 0], y_min)
cv2.circle(img, (x_left, y_left), 10, (0, 0, 255), thickness=8)
cv2.circle(img, (x_right, y_right), 10, (0, 0, 255), thickness=8)
cv2.circle(img, (x_down, y_down), 10, (0, 0, 255), thickness=8)
cv2.circle(img, (x_top, y_top), 10, (0, 0, 255), thickness=8)
# cv2.drawContours(img, cnts, max_area_index, (0, 255, 0), 2)
cv2.namedWindow('s', cv2.WINDOW_NORMAL)
cv2.imshow('s', img)
cv2.waitKey(0)
And the result looks like:
Using this code you can find the coordinates of the corners of the satellite picture inside the image(red points).
Also need to say I have assumed that your satellite picture background is completely black(the image you have uploaded, has a thin gray strip around the whole image).
I am trying to print a stylemix encoder image however my printed images are too small, I am not sure where am I doing wrong.
my latent space
jon = np.load('latent_representations/example0.npy')
drogo = np.load('latent_representations/example1.npy')
# Loading already learned latent directions
smile_direction = np.load('ffhq_dataset/latent_directions/smile.npy')
gender_direction = np.load('ffhq_dataset/latent_directions/gender.npy')
age_direction = np.load('ffhq_dataset/latent_directions/age.npy'
)
my draw style mix loop
def draw_style_mixing_figure(png, Gs, w, h, src_dlatents, dst_dlatents, style_ranges):
print(png)
#src_dlatents = Gs.components.mapping.run(src_latents, None) # [seed, layer, component]
#dst_dlatents = Gs.components.mapping.run(dst_latents, None)
src_images = Gs.components.synthesis.run(src_dlatents, randomize_noise=False, **synthesis_kwargs)
dst_images = Gs.components.synthesis.run(dst_dlatents, randomize_noise=False, **synthesis_kwargs)
canvas = PIL.Image.new('RGB', (w * (len(src_dlatents) + 1), h * (len(dst_dlatents) + 1)), 'white')
for col, src_image in enumerate(list(src_images)):
canvas.paste(PIL.Image.fromarray(src_image, 'RGB'), ((col + 1) * w, 0))
for row, dst_image in enumerate(list(dst_images)):
canvas.paste(PIL.Image.fromarray(dst_image, 'RGB'), (0, (row + 1) * h))
row_dlatents = np.stack([dst_dlatents[row]] * len(src_dlatents))
row_dlatents[:, style_ranges[row]] = src_dlatents[:, style_ranges[row]]
row_images = Gs.components.synthesis.run(row_dlatents, randomize_noise=False, **synthesis_kwargs)
for col, image in enumerate(list(row_images)):
canvas.paste(PIL.Image.fromarray(image, 'RGB'), ((col + 1) * w, (row + 1) * h))
canvas.save(png)
return canvas.resize((512,512))
my printing image order
tflib.init_tf()
synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True), minibatch_size=1)
_Gs_cache = dict()
draw_style_mixing_figure(os.path.join(config.result_dir, 'style-mixing.png'), Gs, w=1024, h=1024, src_dlatents=jon.reshape((1, 12, 512)), dst_dlatents=drogo.reshape((1, 12, 512)), style_ranges=[range(1,1)]),
But resulting pictures are too small
any idea how to make them bigger?
White Balancing is a rather well-covered topic, but most of the answers I have seen cover automatic white balancing techniques for an entire image that does not have a known point for what is white, gray, and black. I cannot seem to find many that cover white balancing from a known point. I have the script (below) that takes an image of a color card (Spyder Checkr 48) and returns the white, 20% Gray, and Black color card blocks:
Color L A B sR sG sB aR aG aB
Card White 96.04 2.16 2.6 249 242 238 247 242 237
20% Gray 80.44 1.17 2.05 202 198 195 199 196 193
Card Black 16.91 1.43 -0.81 43 41 43 46 46 47
Question: Since I know the ground truth LAB, sRGB and AdobeRGB values for specific parts of the image, what would be the best way to white balance the image?
Here is a link to the images I am working with. This is the code for extracting the color card blocks (I currently am running this on Windows, Python 3.7):
from __future__ import print_function
import cv2
import imutils
import numpy as np
from matplotlib import pyplot as plt
import os
import sys
image = cv2.imread("PATH_TO_IMAGE")
template = cv2.imread("PATH_TO_TEMPLATE")
rtemplate = cv2.imread("PATH_TO_RIGHT_TEMPLATE")
def sift(image):
sift = cv2.xfeatures2d.SIFT_create()
kp, des = sift.detectAndCompute(image, None)
return kp, des
def sift_match(im1, im2, vis=False, save=False):
MIN_MATCH_COUNT = 10
FLANN_INDEX_KDTREE = 0
kp1, des1 = sift(im1)
kp2, des2 = sift(im2)
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=7)
search_params = dict(checks=100)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# Need to draw only good matches, so create a mask
matchesMask = [[0, 0] for i in range(len(matches))]
if vis is True:
draw_params = dict(matchColor=(0, 255, 0),
singlePointColor=(255, 0, 0),
matchesMask=matchesMask,
flags=0)
im3 = cv2.drawMatchesKnn(im1, kp1, im2, kp2, matches, None, **draw_params)
if save:
cv2.imwrite("tempSIFT_Match.png", im3)
plt.imshow(im3), plt.show()
good = []
for m, n in matches:
if m.distance < 0.75 * n.distance:
good.append(m)
return kp1, des1, kp2, des2, good
def smartextractor(im1, im2, vis=False):
# Detect features and compute descriptors.
kp1, d1, kp2, d2, matches = sift_match(im1, im2, vis)
kp1 = np.asarray(kp1)
kp2 = np.asarray(kp2)
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = kp1[match.queryIdx].pt
points2[i, :] = kp2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
if h is None:
print("could not find homography")
return None, None
# Use homography
height, width, channels = im2.shape
im1Reg = cv2.warpPerspective(im1, h, (width, height))
return im1Reg, h
def show_images(images, cols=1, titles=None):
"""
Display a list of images in a single figure with matplotlib.
"""
assert ((titles is None) or (len(images) == len(titles)))
n_images = len(images)
if titles is None: titles = ['Image (%d)' % i for i in range(1, n_images + 1)]
fig = plt.figure()
for n, (image, title) in enumerate(zip(images, titles)):
a = fig.add_subplot(cols, np.ceil(n_images / float(cols)), n + 1)
if image.ndim == 2:
plt.gray()
plt.imshow(image)
a.set_title(title)
fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)
plt.show()
def Sobel(img, bilateralFilter=True):
# timestart = time.clock()
try:
img = cv2.imread(img, 0)
except TypeError:
None
try:
rheight, rwidth, rdepth = img.shape
img1 = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
except ValueError:
raise TypeError
# cv2.imwrite('temp.png',img)
_, s, v = cv2.split(img1)
b, g, r = cv2.split(img)
if bilateralFilter is True:
s = cv2.bilateralFilter(s, 11, 17, 17)
v = cv2.bilateralFilter(v, 11, 17, 17)
b = cv2.bilateralFilter(b, 11, 17, 17)
g = cv2.bilateralFilter(g, 11, 17, 17)
r = cv2.bilateralFilter(r, 11, 17, 17)
# calculate sobel in x,y,diagonal directions with the following kernels
sobelx = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], dtype=np.float32)
sobely = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]], dtype=np.float32)
sobeldl = np.array([[0, 1, 2], [-1, 0, 1], [-2, -1, 0]], dtype=np.float32)
sobeldr = np.array([[2, 1, 0], [1, 0, -1], [0, -1, -2]], dtype=np.float32)
# calculate the sobel on value of hsv
gx = cv2.filter2D(v, -1, sobelx)
gy = cv2.filter2D(v, -1, sobely)
gdl = cv2.filter2D(v, -1, sobeldl)
gdr = cv2.filter2D(v, -1, sobeldr)
# combine sobel on value of hsv
xylrv = 0.25 * gx + 0.25 * gy + 0.25 * gdl + 0.25 * gdr
# calculate the sobel on saturation of hsv
sx = cv2.filter2D(s, -1, sobelx)
sy = cv2.filter2D(s, -1, sobely)
sdl = cv2.filter2D(s, -1, sobeldl)
sdr = cv2.filter2D(s, -1, sobeldr)
# combine sobel on value of hsv
xylrs = 0.25 * sx + 0.25 * sy + 0.25 * sdl + 0.25 * sdr
# combine value sobel and saturation sobel
xylrc = 0.5 * xylrv + 0.5 * xylrs
xylrc[xylrc < 6] = 0
# calculate the sobel on value on green
grx = cv2.filter2D(g, -1, sobelx)
gry = cv2.filter2D(g, -1, sobely)
grdl = cv2.filter2D(g, -1, sobeldl)
grdr = cv2.filter2D(g, -1, sobeldr)
# combine sobel on value on green
xylrgr = 0.25 * grx + 0.25 * gry + 0.25 * grdl + 0.25 * grdr
# calculate the sobel on blue
bx = cv2.filter2D(b, -1, sobelx)
by = cv2.filter2D(b, -1, sobely)
bdl = cv2.filter2D(b, -1, sobeldl)
bdr = cv2.filter2D(b, -1, sobeldr)
# combine sobel on value on blue
xylrb = 0.25 * bx + 0.25 * by + 0.25 * bdl + 0.25 * bdr
# calculate the sobel on red
rx = cv2.filter2D(r, -1, sobelx)
ry = cv2.filter2D(r, -1, sobely)
rdl = cv2.filter2D(r, -1, sobeldl)
rdr = cv2.filter2D(r, -1, sobeldr)
# combine sobel on value on red
xylrr = 0.25 * rx + 0.25 * ry + 0.25 * rdl + 0.25 * rdr
# combine value sobel and saturation sobel
xylrrgb = 0.33 * xylrgr + 0.33 * xylrb + 0.33 * xylrr
xylrrgb[xylrrgb < 6] = 0
# combine HSV and RGB sobel outputs
xylrc = 0.5 * xylrc + 0.5 * xylrrgb
xylrc[xylrc < 6] = 0
xylrc[xylrc > 25] = 255
return xylrc
print("extracting image")
extractedImage, _ = smartextractor(image, template)
print("extracting right image")
rextractedImage, _ = smartextractor(extractedImage, rtemplate, vis=False)
grextractedImage = cv2.cvtColor(rextractedImage, cv2.COLOR_BGR2GRAY)
bfsobelImg = Sobel(rextractedImage)
sobelImg = Sobel(rextractedImage, bilateralFilter=False)
csobelImg = cv2.add(bfsobelImg, sobelImg)
csobelImg[csobelImg < 6] = 0
csobelImg[csobelImg > 18] = 255
csobelImg = csobelImg.astype(np.uint8)
img2 = csobelImg.copy()
ret, thresh = cv2.threshold(img2, 18, 255, 0)
contours = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
count = 0
trigger = False
for c in contours:
# approximate the contour
peri = cv2.arcLength(c, True)
contours[count] = cv2.approxPolyDP(c, 0.05 * peri, True)
if len(contours[count]) == 4:
if trigger is False:
screenCnt = contours[count]
trigger = True
count += 1
tl = screenCnt[0]
tr = screenCnt[1]
bl = screenCnt[3]
br = screenCnt[2]
tLy, tLx = tl[0]
tRy, tRx = tr[0]
bLy, bLx = bl[0]
bRy, bRx = br[0]
ratio = .15
realSpace = (3/16)
boxwidth = int(((tRx - tLx) + (bRx - bLx))*.5 - (tLx + bLx)*.5)
boxheight = int(((bRy - tRy) + (bLy - tLy))*.5 - (tRy + tLy)*.5)
spaceWidth = int((boxwidth + boxheight)*.5*realSpace)
boxcenter = [int(((bRy - tRy)*.5 + (bLy - tLy)*.5)*.5), int(((tRx - tLx)*.5 + (bRx - bLx)*.5)*.5)]
roitl = [boxcenter[0] - int(ratio*boxheight), boxcenter[1] - int(ratio*boxwidth)]
roitr = [boxcenter[0] - int(ratio*boxheight), boxcenter[1] + int(ratio*boxwidth)]
roibl = [boxcenter[0] + int(ratio*boxheight), boxcenter[1] - int(ratio*boxwidth)]
roibr = [boxcenter[0] + int(ratio*boxheight), boxcenter[1] + int(ratio*boxwidth)]
spacing = int((boxwidth + boxheight)*.5)+spaceWidth
roiWhite = np.array((roitl, roitr, roibr, roibl))
roiGray = np.array(([roitl[1], roitl[0]+spacing*1], [roitr[1], roitr[0]+spacing*1],
[roibr[1], roibr[0]+spacing*1], [roibl[1], roibl[0]+spacing*1]))
roiBlack = np.array(([roitl[1], roitl[0]+spacing*6], [roitr[1], roitr[0]+spacing*6],
[roibr[1], roibr[0]+spacing*6], [roibl[1], roibl[0]+spacing*6]))
whiteAvgb, whiteAvgg, whiteAvgr, _ = cv2.mean(rextractedImage[(roitl[0]+spacing*0):(roibr[0]+spacing*0),
roitl[1]:roibr[1]])
grayAvgb, grayAvgg, grayAvgr, _ = cv2.mean(rextractedImage[(roitl[0]+spacing*1):(roibr[0]+spacing*1),
roitl[1]:roibr[1]])
blackAvgb, blackAvgg, blackAvgr, _ = cv2.mean(rextractedImage[(roitl[0]+spacing*6):(roibr[0]+spacing*6),
roitl[1]:roibr[1]])
whiteROI = rextractedImage[(roitl[0]+spacing*0):(roibr[0]+spacing*0), roitl[1]:roibr[1]]
grayROI = rextractedImage[(roitl[0]+spacing*1):(roibr[0]+spacing*1), roitl[1]:roibr[1]]
blackROI = rextractedImage[(roitl[0]+spacing*6):(roibr[0]+spacing*6), roitl[1]:roibr[1]]
imageList = [whiteROI, grayROI, blackROI]
show_images(imageList, cols=1)
correctedImage = rextractedImage.copy()
whiteROI[:, :, 0] = whiteAvgb
whiteROI[:, :, 1] = whiteAvgg
whiteROI[:, :, 2] = whiteAvgr
grayROI[:, :, 0] = grayAvgb
grayROI[:, :, 1] = grayAvgg
grayROI[:, :, 2] = grayAvgr
blackROI[:, :, 0] = blackAvgb
blackROI[:, :, 1] = blackAvgg
blackROI[:, :, 2] = blackAvgr
imageList = [whiteROI, grayROI, blackROI]
show_images(imageList, cols=1)
# SPYDER COLOR CHECKR Values: http://www.bartneck.de/2017/10/24/patch-color-definitions-for-datacolor-spydercheckr-48/
blank = np.zeros_like(csobelImg)
maskedImg = blank.copy()
maskedImg = cv2.fillConvexPoly(maskedImg, roiWhite, 255)
maskedImg = cv2.fillConvexPoly(maskedImg, roiGray, 255)
maskedImg = cv2.fillConvexPoly(maskedImg, roiBlack, 255)
res = cv2.bitwise_and(rextractedImage, rextractedImage, mask=maskedImg)
# maskedImg = cv2.fillConvexPoly(maskedImg, roi2Black, 255)
cv2.drawContours(blank, contours, -1, 255, 3)
outputSquare = np.zeros_like(csobelImg)
cv2.drawContours(outputSquare, [screenCnt], -1, 255, 3)
imageList = [rextractedImage, grextractedImage, bfsobelImg, sobelImg, csobelImg, blank, outputSquare, maskedImg, res]
show_images(imageList, cols=3)
sys.exit()
Given the RGB value of a white patch, the image can be corrected for white balance by dividing by that value. That is, applying a linear transformation that makes the white patch have the same level in the three channels:
lum = (whiteR + whiteG + whiteB)/3
imgR = imgR * lum / whiteR
imgG = imgG * lum / whiteG
imgB = imgB * lum / whiteB
Multiplying by lum makes it so that the average intensity doesn’t change.
(The computation of lum will be better if using proper weights: 0.2126, 0.7152, 0.0722, but I wanted to keep it simple. This would only make a big difference if the input white is way off the mark, in which case you'll have other issues too.)
Note that this transformation is best applied in linear RGB space. Both the image and the RGB values for white should first be converted to linear RGB if the image is stored in sRGB or similar (a raw image from the camera would be linear RGB, a JPEG would be sRGB). See here for the relevant equations.
For better precision, you can apply the above using also the RGB values of the grey patch. Take the average multiplication factor (whiteR/lum) derived from the white and grey patches, for each channel, and apply those to the image.
The black level could be subtracted from the image, prior to determining the white RGB values and correcting for white balance. This will improve contrast and color perception, but not part of white balancing.
A full color correction is way more complex, I will not go into that.