How to create surface from 3-D numpy array in Pyopengl? - python-3.x

I have created a 3-d numpy array consisting of X, Y, and Z axis coordinates. Now I am trying to create a surface using these points in opengl but all I have got success is in creating wire model something like this below . Can anyone suggest changes in my code to form actual 3-D surface from data?
Data file used link https://drive.google.com/open?id=1PWbNIt3xbchtQ9HIIS96k7ZjblzPO_wO
Code:-
import OpenGL.GL as gl
import OpenGL.arrays.vbo as glvbo
from PyQt5.Qt import *
import numpy as np
import Backend_algo as Sb
import sys
import ctypes
def compile_vertex_shader(source):
"""Compile a vertex shader from source."""
vertex_shader = gl.glCreateShader(gl.GL_VERTEX_SHADER)
gl.glShaderSource(vertex_shader, source)
gl.glCompileShader(vertex_shader)
# check compilation error
result = gl.glGetShaderiv(vertex_shader, gl.GL_COMPILE_STATUS)
if not (result):
raise RuntimeError(gl.glGetShaderInfoLog(vertex_shader))
return vertex_shader
def compile_fragment_shader(source):
"""Compile a fragment shader from source."""
fragment_shader = gl.glCreateShader(gl.GL_FRAGMENT_SHADER)
gl.glShaderSource(fragment_shader, source)
gl.glCompileShader(fragment_shader)
result = gl.glGetShaderiv(fragment_shader, gl.GL_COMPILE_STATUS)
if not (result):
raise RuntimeError(gl.glGetShaderInfoLog(fragment_shader))
return fragment_shader
def link_shader_program(vertex_shader, fragment_shader):
"""Create a shader program with from compiled shaders."""
program = gl.glCreateProgram()
gl.glAttachShader(program, vertex_shader)
gl.glAttachShader(program, fragment_shader)
gl.glLinkProgram(program)
result = gl.glGetProgramiv(program, gl.GL_LINK_STATUS)
if not (result):
raise RuntimeError(gl.glGetProgramInfoLog(program))
return program
VS = '''
attribute vec3 position;
uniform float right;
uniform float bottom;
uniform float left;
uniform float top;
uniform float far;
uniform float near;
void main() {
mat4 testmat = mat4(
vec4(2.0 / (right - left), 0, 0, 0),
vec4(0, 2.0 / (top - bottom), 0, 0),
vec4(0, 0, -2.0 / (far - near), 0),
vec4(-(right + left) / (right - left), -(top + bottom) / (top - bottom), -(far + near) / (far - near), 1)
);
gl_Position = testmat * vec4(position, 1.);
}
'''
FS = '''
#version 450
// Output variable of the fragment shader, which is a 4D vector containing the
// RGBA components of the pixel color.
uniform vec3 triangleColor;
out vec4 outColor;
void main()
{
outColor = vec4(triangleColor, 1.0);
}
'''
class GLPlotWidget3D(QGLWidget):
def __init__(self, *args):
# QGLWidget.__init__(self)
super(GLPlotWidget3D, self).__init__()
# self.parent = args[0]
self.width, self.height = 100, 100
self.right, self.left, self.top, self.bottom = 21000, -21000, 10, -10
self.data = np.zeros((3, 10, 2))
self.vbo = glvbo.VBO(self.data)
self.showMaximized()
def initializeGL(self):
vs = Sb.compile_vertex_shader(VS)
fs = Sb.compile_fragment_shader(FS)
self.shaders_program = link_shader_program(vs, fs)
self.e = np.load(('three.npy'), mmap_mode='r')
self.e = np.array(self.e, dtype=np.float32)
self.right, self.left, self.top, self.bottom, self.far, self.near = self.e[:, :, 1].min(), self.e[:, : , 1].max(), self.e[:, : , 0].min(), self.e[:, : , 0].max(), self.e[:, : , 2].max(), self.e[:, : , 2].min()
def ortho_view(self):
right = gl.glGetUniformLocation(self.shaders_program, "right")
gl.glUniform1f(right, self.right)
left = gl.glGetUniformLocation(self.shaders_program, "left")
gl.glUniform1f(left, self.left)
top = gl.glGetUniformLocation(self.shaders_program, "top")
gl.glUniform1f(top, self.top)
bottom = gl.glGetUniformLocation(self.shaders_program, "bottom")
gl.glUniform1f(bottom, self.bottom)
far = gl.glGetUniformLocation(self.shaders_program, "far")
gl.glUniform1f(far, self.far)
near = gl.glGetUniformLocation(self.shaders_program, "near")
gl.glUniform1f(near, self.near)
def paintGL(self):
self.resizeGL(self.width, self.height)
gl.glClearColor(0.2, 0.2, 0.2, 0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
gl.glUseProgram(self.shaders_program)
buffer = gl.glGenBuffers(1)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, buffer)
stride = self.e.strides[0]
offset = ctypes.c_void_p(1)
loc = gl.glGetAttribLocation(self.shaders_program, "position")
gl.glEnableVertexAttribArray(loc)
gl.glVertexAttribPointer(loc, 3, gl.GL_FLOAT, False, stride, offset)
gl.glBufferData(gl.GL_ARRAY_BUFFER, self.e.nbytes, self.e, gl.GL_DYNAMIC_DRAW)
gl.glDrawArrays(gl.GL_LINE_LOOP, 0, self.e.shape[0])
self.ortho_view()
uni_color = gl.glGetUniformLocation(self.shaders_program, "triangleColor")
gl.glUniform3f(uni_color, 0.9, 0.9, 0.9)
def resizeGL(self, width, height):
self.width, self.height = width, height
gl.glViewport(0, 0, width, height)
def main():
app = QApplication(sys.argv)
editor = GLPlotWidget3D()
editor.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()

'three.npy' contains a 3 dimensional array (7782 x 24 x3) with the vertex coordinates of a tube. The 3rd dimension with a size of 3 contains the x, y and z coordinates of the vertices. The vertices are organized in 7782 rings with 24 point around the circumference.
Read the vertex coordinates to a flatten buffer (the numpy array is flatten automatically by glBufferData).
Generate an array of indices (indices of the vertex buffer). The indices describe GL_TRIANGLE_STRIP primitives which stack up 7781 rings. Each ring consisting of 24 quads around the circumference.
self.e = np.load(('three.npy'), mmap_mode='r')
self.e = np.array(self.e, dtype=np.float32)
self.elems = []
ring_c = self.e.shape[1]
slice_c = self.e.shape[0]
for si in range(slice_c-1):
self.elems += [si*ring_c, si*ring_c]
for ri in range(ring_c+1):
ie = ri % ring_c
self.elems += [ie+si*ring_c, ie+(si+1)*ring_c]
self.elems = np.array(self.elems, dtype=np.int32)
The x and y component of the vertices are in range [-10, 10], but the z component is in range [3, 29724672].
x min x max y min y max z min z max
-10.589109 10.517833 -10.464569 10.594374 29724672.0 3.1618009
I recommend to define a scale for the z coordinate:
self.scaleZ = 0.000001
Create a Vertex Buffer Object (GL_ARRAY_BUFFER) for the vertices and an Index buffer Object (GL_ELEMENT_ARRAY_BUFFER) for the indices:
self.vertexbuffer = gl.glGenBuffers(1)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vertexbuffer)
gl.glBufferData(gl.GL_ARRAY_BUFFER, self.e, gl.GL_DYNAMIC_DRAW)
self.elementbuffer = gl.glGenBuffers(1)
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.elementbuffer)
gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, self.elems, gl.GL_DYNAMIC_DRAW)
Specif the array of vertex coordinates. See Vertex Specification.
The stride and offset parameter of glVertexAttribPointer have to be 0.
stride specifies the byte offset between consecutive generic vertex attributes it has to be either 3*self.e.itemsize (12) or 0. 0 has a special meaning and and interprets the attributes as to be tightly packed. If stride is 0 it is computed by the size and type parameter.
offset hast to be ctypes.c_void_p(0) or None, because the offset of the 1 attribute in the array is 0.
In any case the unit of stride and offset is bytes.
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vertexbuffer)
stride = 0 # 3*self.e.itemsize
offset = None # ctypes.c_void_p(0)
loc = self.attrib['position']
gl.glEnableVertexAttribArray(loc)
gl.glVertexAttribPointer(loc, 3, gl.GL_FLOAT, False, stride, offset)
The primitive type is GL_TRIANGLE_STRIP and the index buffer has to be bound before the elements are drawn by glDrawElements:
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.elementbuffer)
self.perspective_view()
gl.glUniform3f(self.uniform['triangleColor'], 1, 1, 1)
gl.glDrawElements(gl.GL_TRIANGLE_STRIP, self.elems.size, gl.GL_UNSIGNED_INT, None)
Instead of specifying an Orthographic projection matrix in the vertex shader, I recommend to use matrix Uniform variable for the projection, respectively model and view transformation.
The projection matrix defines the projection of the 3 dimensional viewing volume to the 2 dimensional viewport. The view matrix defines the viewing position of view and viewing direction onto the scene. The model matrix defines the scale and animation of the mode.
attribute vec3 position;
uniform mat4 u_proj;
uniform mat4 u_view;
uniform mat4 u_model;
void main() {
gl_Position = u_proj * u_view * u_model * vec4(position, 1.0);
}
Get the attribute index and uniform locations after the shader program is linked:
vs = compile_vertex_shader(VS)
fs = compile_fragment_shader(FS)
self.shaders_program = link_shader_program(vs, fs)
self.attrib = { a : gl.glGetAttribLocation (self.shaders_program, a) for a in ['position'] }
print(self.attrib)
self.uniform = { u : gl.glGetUniformLocation (self.shaders_program, u) for u in ['u_model', 'u_view', 'u_proj', "triangleColor"] }
print(self.uniform)
For a 3D look, I recommend to use Perspective projection rather than Orthographic projection.
Use numpy.array or numpy.matrix to set the matrices.
# projection matrix
aspect, ta, near, far = self.width/self.height, np.tan(np.radians(90.0) / 2), 0.1, 50
proj = np.matrix(((1/ta/aspect, 0, 0, 0), (0, 1/ta, 0, 0), (0, 0, -(far+near)/(far-near), -1), (0, 0, -2*far*near/(far-near), 0)), np.float32)
# view matrix
view = np.matrix(((1, 0, 0, 0), (0, 0, -1, 0), (0, 1, 0, 0), (0, 0, -30, 1)), np.float32)
# model matrix
c, s = math.cos(self.angle), math.sin(self.angle)
model = scale
gl.glUniformMatrix4fv(self.uniform['u_proj'], 1, gl.GL_FALSE, proj)
gl.glUniformMatrix4fv(self.uniform['u_view'], 1, gl.GL_FALSE, view)
gl.glUniformMatrix4fv(self.uniform['u_model'], 1, gl.GL_FALSE, model)
Full example:
(The fragment shader tints the fragments dependent on their depth)
import OpenGL.GL as gl
import OpenGL.arrays.vbo as glvbo
from PyQt5.Qt import *
import numpy as np
#import Backend_algo as Sb
import sys
import ctypes
import os
import math
sourceFileDir = os.path.dirname(os.path.abspath(__file__))
def compile_vertex_shader(source):
"""Compile a vertex shader from source."""
vertex_shader = gl.glCreateShader(gl.GL_VERTEX_SHADER)
gl.glShaderSource(vertex_shader, source)
gl.glCompileShader(vertex_shader)
# check compilation error
result = gl.glGetShaderiv(vertex_shader, gl.GL_COMPILE_STATUS)
if not (result):
raise RuntimeError(gl.glGetShaderInfoLog(vertex_shader))
return vertex_shader
def compile_fragment_shader(source):
"""Compile a fragment shader from source."""
fragment_shader = gl.glCreateShader(gl.GL_FRAGMENT_SHADER)
gl.glShaderSource(fragment_shader, source)
gl.glCompileShader(fragment_shader)
result = gl.glGetShaderiv(fragment_shader, gl.GL_COMPILE_STATUS)
if not (result):
raise RuntimeError(gl.glGetShaderInfoLog(fragment_shader))
return fragment_shader
def link_shader_program(vertex_shader, fragment_shader):
"""Create a shader program with from compiled shaders."""
program = gl.glCreateProgram()
gl.glAttachShader(program, vertex_shader)
gl.glAttachShader(program, fragment_shader)
gl.glLinkProgram(program)
result = gl.glGetProgramiv(program, gl.GL_LINK_STATUS)
if not (result):
raise RuntimeError(gl.glGetProgramInfoLog(program))
return program
VS = '''
attribute vec3 position;
uniform mat4 u_proj;
uniform mat4 u_view;
uniform mat4 u_model;
void main() {
gl_Position = u_proj * u_view * u_model * vec4(position, 1.0);
}
'''
FS = '''
#version 450
out vec4 outColor;
uniform vec3 triangleColor;
void main()
{
float d = 1.0 - gl_FragCoord.z;
outColor = vec4(triangleColor * d, 1.0);
}
'''
class GLPlotWidget3D(QGLWidget):
def __init__(self, *args):
# QGLWidget.__init__(self)
super(GLPlotWidget3D, self).__init__()
# self.parent = args[0]
self.width, self.height = 100, 100
self.right, self.left, self.top, self.bottom = 21000, -21000, 10, -10
self.data = np.zeros((3, 10, 2))
self.vbo = glvbo.VBO(self.data)
self.showMaximized()
def initializeGL(self):
vs = compile_vertex_shader(VS)
fs = compile_fragment_shader(FS)
self.shaders_program = link_shader_program(vs, fs)
self.attrib = { a : gl.glGetAttribLocation (self.shaders_program, a) for a in ['position'] }
print(self.attrib)
self.uniform = { u : gl.glGetUniformLocation (self.shaders_program, u) for u in ['u_model', 'u_view', 'u_proj', "triangleColor"] }
print(self.uniform)
self.e = np.load((os.path.join(sourceFileDir,'three.npy')), mmap_mode='r')
self.e = np.array(self.e, dtype=np.float32)
print(self.e.shape)
self.elems = []
ring_c = self.e.shape[1]
slice_c = self.e.shape[0]
for si in range(slice_c-1):
self.elems += [si*ring_c, si*ring_c]
for ri in range(ring_c+1):
ie = ri % ring_c
self.elems += [ie+si*ring_c, ie+(si+1)*ring_c]
self.elems = np.array(self.elems, dtype=np.int32)
self.vertexbuffer = gl.glGenBuffers(1)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vertexbuffer)
gl.glBufferData(gl.GL_ARRAY_BUFFER, self.e, gl.GL_DYNAMIC_DRAW)
self.elementbuffer = gl.glGenBuffers(1)
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.elementbuffer)
gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, self.elems, gl.GL_DYNAMIC_DRAW)
self.scaleZ = 0.000001
self.right, self.left, self.top, self.bottom, self.far, self.near = self.e[:, :, 1].min(), self.e[:, : , 1].max(), self.e[:, : , 0].min(), self.e[:, : , 0].max(), self.e[:, : , 2].max(), self.e[:, : , 2].min()
print(self.right, self.left, self.top, self.bottom, self.far, self.near)
self.far *= self.scaleZ
self.near *= self.scaleZ
self.angle = 0.0
self.GLtimer = QTimer()
self.GLtimer.setInterval(10)
self.GLtimer.timeout.connect(self.redraw)
self.GLtimer.start()
def redraw(self):
self.angle += 0.01
self.update()
def perspective_view(self):
# projection matrix
aspect, ta, near, far = self.width/self.height, np.tan(np.radians(90.0) / 2), 10, 50
proj = np.matrix(((1/ta/aspect, 0, 0, 0), (0, 1/ta, 0, 0), (0, 0, -(far+near)/(far-near), -1), (0, 0, -2*far*near/(far-near), 0)), np.float32)
# view matrix
view = np.matrix(((1, 0, 0, 0), (0, 0, -1, 0), (0, 1, 0, 0), (0, 0, -30, 1)), np.float32)
# model matrix
c, s = math.cos(self.angle), math.sin(self.angle)
scale = np.matrix(((1, 0, 0, 0), (0, 1, 0, 0), (0, 0, self.scaleZ, 0), (0, 0, 0, 1)), np.float32)
rotZ = np.array(((c, s, 0, 0), (-s, c, 0, 0), (0, 0, 1, 0), (0, 0, 0, 1)), np.float32)
rotY = np.matrix(((0, 0, 1, 0), (0, 1, 0, 0), (-1, 0, 0, 0), (0, 0, 0, 1)), np.float32)
trans = np.matrix(((1, 0, 0, 0), (0, 1, 0, 0), (0, 0, 1, 0), (0, 0, (self.near - self.far)/2, 1)), np.float32)
model = scale * trans * rotY * rotZ
gl.glUniformMatrix4fv(self.uniform['u_proj'], 1, gl.GL_FALSE, proj)
gl.glUniformMatrix4fv(self.uniform['u_view'], 1, gl.GL_FALSE, view)
gl.glUniformMatrix4fv(self.uniform['u_model'], 1, gl.GL_FALSE, model)
def paintGL(self):
self.resizeGL(self.width, self.height)
gl.glClearColor(0.2, 0.2, 0.2, 0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glUseProgram(self.shaders_program)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vertexbuffer)
stride = 0 # 3*self.e.itemsize
offset = None # ctypes.c_void_p(0)
loc = self.attrib['position']
gl.glEnableVertexAttribArray(loc)
gl.glVertexAttribPointer(loc, 3, gl.GL_FLOAT, False, stride, offset)
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.elementbuffer)
self.perspective_view()
gl.glUniform3f(self.uniform['triangleColor'], 1, 1, 1)
#gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
gl.glDrawElements(gl.GL_TRIANGLE_STRIP, self.elems.size, gl.GL_UNSIGNED_INT, None)
def resizeGL(self, width, height):
self.width, self.height = width, height
gl.glViewport(0, 0, width, height)
def main():
app = QApplication(sys.argv)
editor = GLPlotWidget3D()
editor.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()

Related

How can we store the coordinates of face detecting and applying grabcut algorithm to crop it

I am having separate code for face detection and for grabcut algorithm. I want to run them in one where first it will detect the face and store the coordinates of the rectangle and then applying garbcut to crop it using that rectangle. Please help me.
import cv2
import numpy as np
#---loading haarcascade detector---
face_detector=cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
#---Loading the image-----
img = cv2.imread('/home/mongoose/Videos/Projects/backgroundremoval/detectface/1665410456980.JPEG')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
padding = 50
cv2.rectangle(img,(x-padding,y-padding),(x+w+padding,y+h+padding),(0,255,0),2)
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
import cv2
import numpy as np
drawn = False
startx, starty = -1, -1
rectangle = (0, 0, 0, 0)
def load_and_resize(path):
image = cv2.imread(path)
new_size = (800, 800)
resized_image = cv2.resize(image, new_size, interpolation=cv2.INTER_AREA)
return resized_image
def select_roi(event, newx, newy, flags, params):
global startx, starty, drawn, rectangle
if event == cv2.EVENT_LBUTTONDOWN:
startx, starty = newx, newy
cv2.circle(image, (startx, starty), 4, (255, 255, 120), -1)
elif event == cv2.EVENT_LBUTTONUP:
drawn = True
rectangle = (startx, starty, newx - startx, newy - starty)
print("\nROI Selected Successfully")
def extract_foreground(image):
global drawn
cv2.namedWindow(winname='BG Subractor')
cv2.setMouseCallback('BG Subractor', select_roi)
print("\nSelect ROI from mouse pointer.")
black_mask = np.zeros(image.shape[:2], np.uint8)
background = np.zeros((1, 65), np.float64)
foreground = np.zeros((1, 65), np.float64)
while True:
if drawn:
print("\nPerforming Background Subtraction")
cv2.grabCut(image, black_mask, rectangle,background, foreground, 50, cv2.GC_INIT_WITH_RECT)
mask2 = np.where((black_mask == 2) | (black_mask == 0), 0, 1).astype('uint8')
image = image * mask2[:, :, np.newaxis]
drawn = False
print("\nExtraction complete")
cv2.imshow('BG Subractor', image)
cv2.imwrite('/home/mongoose/Videos/Projects/backgroundremoval/bg_grabcut-20221014T120042Z-001/bg_grabcut/new/1.jpg',image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
Please help me to run the code by combining both script.

vtk: how to obtain the image pixel index from a world point

If I pick a world point from a image, How can I convert the world coordinate to image index?
import vtk
import numpy as np
from vtk.util.numpy_support import numpy_to_vtk
def numpyToVTK(data, multi_component=False, type='float'):
if type == 'float':
data_type = vtk.VTK_FLOAT
elif type == 'char':
data_type = vtk.VTK_UNSIGNED_CHAR
else:
raise RuntimeError('unknown type')
if multi_component == False:
if len(data.shape) == 2:
data = data[:, :, np.newaxis]
flat_data_array = data.transpose(2,1,0).flatten()
vtk_data = numpy_to_vtk(num_array=flat_data_array, deep=True, array_type=data_type)
shape = data.shape
else:
assert len(data.shape) == 3, 'only test for 2D RGB'
flat_data_array = data.transpose(1, 0, 2)
flat_data_array = np.reshape(flat_data_array, newshape=[-1, data.shape[2]])
vtk_data = numpy_to_vtk(num_array=flat_data_array, deep=True, array_type=data_type)
shape = [data.shape[0], data.shape[1], 1]
img = vtk.vtkImageData()
img.GetPointData().SetScalars(vtk_data)
img.SetDimensions(shape[0], shape[1], shape[2])
return img
global sphereActor, textActor
sphereActor = None
textActor = None
def mouseMoveEvent(iren, event):
x, y = iren.GetEventPosition()
picker = vtk.vtkWorldPointPicker()
picker.Pick(x, y, 0, render)
worldPoint = picker.GetPickPosition()
##############################################
## convert world point to image index
##############################################
sphere = vtk.vtkSphereSource()
sphere.SetCenter(worldPoint[0], worldPoint[1], worldPoint[2])
sphere.SetRadius(2)
sphere.Update()
sphereMapper = vtk.vtkPolyDataMapper()
sphereMapper.SetInputData(sphere.GetOutput())
global sphereActor, textActor
if sphereActor != None:
render.RemoveActor(sphereActor)
sphereActor = vtk.vtkActor()
sphereActor.SetMapper(sphereMapper)
sphereActor.GetProperty().SetColor(255, 0, 0)
render.AddActor(sphereActor)
render.Render()
if textActor != None:
render.RemoveActor(textActor)
textActor = vtk.vtkTextActor()
textActor.SetInput('world coordinate: (%.2f, %.2f, %.2f)'%(worldPoint[0], worldPoint[1], worldPoint[2]))
textActor.GetTextProperty().SetColor(1, 0, 0)
textActor.GetTextProperty().SetFontSize(15)
render.AddActor(textActor)
img = np.zeros(shape=[128, 128])
for i in range(128):
for j in range(128):
img[i, j] = i+j
vtkImg = numpyToVTK(img)
imgActor = vtk.vtkImageActor()
imgActor.SetInputData(vtkImg)
render = vtk.vtkRenderer()
render.AddActor(imgActor)
# render.Render()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(render)
renWin.Render()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())
iren.Initialize()
iren.AddObserver('MouseMoveEvent', mouseMoveEvent)
iren.Start()
In the above code, if I don't rotate the image, the world point is (x, y, 0):
And it is agree with what I know. For the world point (x, y, z) and the image index (i, j, k), the conversion should be:
worldPoint (x,y,z) = i*spacingX*directionX + j*spacingY*directionY + k*spacingZ*directionZ + originPoint
In the above code, the image is converted from numpy, thus:
directionX = [1, 0, 0]
directionY = [0, 1, 0]
directionZ = [0, 0, 1]
originPoint=[0, 0, 0]
spacingX=1
spacingY=1
spacingZ=1
In this way, x=i, y=j, z=k. Since this image is a 2D image, the k should be 0 and 'z' should also be 0.
Then, I rotate the image, z is not 0. Like the following picture.
I don't know why z is -0.24.
It means the following conversion is wrong. And how can I obtain the image index by the world point?
worldPoint (x,y,z) = i*spacingX*directionX + j*spacingY*directionY + k*spacingZ*directionZ + originPoint
Any suggestion is appreciated!
vtkImageData has the method TransformPhysicalPointToContinuousIndex for going from world space to image space and TransformIndexToPhysicalPoint to go the other way.
I don't think the computation you're doing is right, since direction is 3x3 rotation matrix.

How to set background greyscale of multiple line plots in pyopengl?

I have a 2-D numpy array that I have plotted in Pyopengl using Pyqt. Now I want to set The background of the plot greyscale such that when the line moves up or down, its background grey color changes intensity. I am attaching images for the behaviour I want.
but all I am able to create till now is with white background like this
the code that I have written is
import OpenGL.GL as gl
import OpenGL.arrays.vbo as glvbo
from PyQt5.Qt import *
import numpy as np
import sys
VS = '''
#version 450
layout(location = 0) in vec2 position;
uniform float right;
uniform float bottom;
uniform float left;
uniform float top;
void main() {
const float far = 1.0;
const float near = -1.0;
mat4 testmat = mat4(
vec4(2.0 / (right - left), 0, 0, 0),
vec4(0, 2.0 / (top - bottom), 0, 0),
vec4(0, 0, -2.0 / (far - near), 0),
vec4(-(right + left) / (right - left), -(top + bottom) / (top - bottom), -(far + near) / (far - near), 1)
);
gl_Position = testmat * vec4(position.x, position.y, 0., 1.);
}
'''
FS = '''
#version 450
// Output variable of the fragment shader, which is a 4D vector containing the
// RGBA components of the pixel color.
uniform vec3 triangleColor;
out vec4 outColor;
void main()
{
outColor = vec4(triangleColor, 1.0);
}
'''
def compile_vertex_shader(source):
"""Compile a vertex shader from source."""
vertex_shader = gl.glCreateShader(gl.GL_VERTEX_SHADER)
gl.glShaderSource(vertex_shader, source)
gl.glCompileShader(vertex_shader)
# check compilation error
result = gl.glGetShaderiv(vertex_shader, gl.GL_COMPILE_STATUS)
if not (result):
raise RuntimeError(gl.glGetShaderInfoLog(vertex_shader))
return vertex_shader
def compile_fragment_shader(source):
"""Compile a fragment shader from source."""
fragment_shader = gl.glCreateShader(gl.GL_FRAGMENT_SHADER)
gl.glShaderSource(fragment_shader, source)
gl.glCompileShader(fragment_shader)
result = gl.glGetShaderiv(fragment_shader, gl.GL_COMPILE_STATUS)
if not (result):
raise RuntimeError(gl.glGetShaderInfoLog(fragment_shader))
return fragment_shader
def link_shader_program(vertex_shader, fragment_shader):
"""Create a shader program with from compiled shaders."""
program = gl.glCreateProgram()
gl.glAttachShader(program, vertex_shader)
gl.glAttachShader(program, fragment_shader)
gl.glLinkProgram(program)
result = gl.glGetProgramiv(program, gl.GL_LINK_STATUS)
if not (result):
raise RuntimeError(gl.glGetProgramInfoLog(program))
return program
class GLPlotWidget(QGLWidget):
def __init__(self, *args):
super(GLPlotWidget, self).__init__()
self.width, self.height = 100, 100
self.e = np.load('two.npy', mmap_mode='r')
self.vbo = glvbo.VBO(self.e)
self.count = self.vbo.shape[1]
self.right, self.left, self.top, self.bottom = self.e[0, -1, 0].min(), self.e[0, 0, 0].max(), self.e[0, :,
1].max(), self.e[-1,
:,
1].min()
# self.data = np.zeros((10, 2))
self.showMaximized()
def initializeGL(self):
vs = compile_vertex_shader(VS)
fs = compile_fragment_shader(FS)
self.shaders_program = link_shader_program(vs, fs)
def ortho_view(self):
right = gl.glGetUniformLocation(self.shaders_program, "right")
gl.glUniform1f(right, self.right)
left = gl.glGetUniformLocation(self.shaders_program, "left")
gl.glUniform1f(left, self.left)
top = gl.glGetUniformLocation(self.shaders_program, "top")
gl.glUniform1f(top, self.top)
bottom = gl.glGetUniformLocation(self.shaders_program, "bottom")
gl.glUniform1f(bottom, self.bottom)
def paintGL(self):
self.resizeGL(self.width, self.height)
gl.glDisable(gl.GL_LINE_STIPPLE)
gl.glClearColor(1, 1,1, 0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
self.vbo.bind()
gl.glEnableVertexAttribArray(0)
gl.glVertexAttribPointer(0, 2, gl.GL_FLOAT, gl.GL_FALSE, 0, None)
gl.glUseProgram(self.shaders_program)
self.ortho_view()
uni_color = gl.glGetUniformLocation(self.shaders_program, "triangleColor")
for i in range(0, self.vbo.data.shape[0]):
gl.glUniform3f(uni_color, 0, 0, 0)
# gl.glLineWidth(1.8)
gl.glDrawArrays(gl.GL_LINE_STRIP, i * self.count, self.count)
self.vbo.unbind()
def resizeGL(self, width, height):
self.width, self.height = width, height
gl.glViewport(0, 0, width, height)
def main():
app = QApplication(sys.argv)
editor = GLPlotWidget()
editor.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
Can anyone suggest me any method to solve this problem?
file link https://drive.google.com/file/d/1y6w35kuMguR1YczK7yMJpXU86T6qtGSv/view?usp=sharing
edit:- I tried to increase linewidth of points and set color to them but the result is not even close to satisfactory.. and I am unable to understand how to pass color in triangles?
Got it...I have rendered triangles and set color to get the desired result..
import OpenGL.GL as gl
import OpenGL.arrays.vbo as glvbo
from PyQt5.Qt import *
import numpy as np
import sys
import copy
VS = '''
#version 450
attribute vec2 position;
attribute vec3 a_Color;
out vec3 g_color;
void main() {
gl_Position = vec4(position.x, position.y, 0., 1.);
g_color = a_Color;
}
'''
FS = '''
#version 450
// Output variable of the fragment shader, which is a 4D vector containing the
// RGBA components of the pixel color.
in vec3 g_color;
out vec4 outColor;
void main()
{
outColor = vec4(g_color, 1.0);
}
'''
VS1 = '''
#version 450
layout(location = 0) in vec2 position;
void main() {
gl_Position = vec4(position.x, position.y, 0.0, 1.);
}
'''
FS1 = '''
#version 450
// Output variable of the fragment shader, which is a 4D vector containing the
// RGBA components of the pixel color.
uniform vec3 triangleColor;
out vec4 outColor;
void main()
{
outColor = vec4(triangleColor, 1.0);
}
'''
def compile_vertex_shader(source):
"""Compile a vertex shader from source."""
vertex_shader = gl.glCreateShader(gl.GL_VERTEX_SHADER)
gl.glShaderSource(vertex_shader, source)
gl.glCompileShader(vertex_shader)
# check compilation error
result = gl.glGetShaderiv(vertex_shader, gl.GL_COMPILE_STATUS)
if not (result):
raise RuntimeError(gl.glGetShaderInfoLog(vertex_shader))
return vertex_shader
def compile_fragment_shader(source):
"""Compile a fragment shader from source."""
fragment_shader = gl.glCreateShader(gl.GL_FRAGMENT_SHADER)
gl.glShaderSource(fragment_shader, source)
gl.glCompileShader(fragment_shader)
result = gl.glGetShaderiv(fragment_shader, gl.GL_COMPILE_STATUS)
if not (result):
raise RuntimeError(gl.glGetShaderInfoLog(fragment_shader))
return fragment_shader
def link_shader_program(vertex_shader, fragment_shader):
"""Create a shader program with from compiled shaders."""
program = gl.glCreateProgram()
gl.glAttachShader(program, vertex_shader)
gl.glAttachShader(program, fragment_shader)
gl.glLinkProgram(program)
result = gl.glGetProgramiv(program, gl.GL_LINK_STATUS)
if not (result):
raise RuntimeError(gl.glGetProgramInfoLog(program))
return program
class GLPlotWidget(QGLWidget):
def __init__(self, *args):
super(GLPlotWidget, self).__init__()
self.width, self.height = 100, 100
self.we = np.load('two.npy', mmap_mode='r')
self.e = copy.deepcopy(self.we[:, :, :])
self.e[:, :, 1] = np.interp(self.e[:, :, 1], (self.e[:, :, 1].min(), self.e[:, :, 1].max()),
(-1, 1))
self.e[:, :, 0] = np.interp(self.e[:, :, 0], (self.e[:, :, 0].min(), self.e[:, :, 0].max()),
(-1, +1))
self.vbo = glvbo.VBO(self.e)
self.count = self.vbo.shape[1]
self.showMaximized()
def initializeGL(self):
self.greyscale_data()
vs = compile_vertex_shader(VS1)
fs = compile_fragment_shader(FS1)
self.shaders_program_plot = link_shader_program(vs, fs)
def greyscale_data(self):
self.color = np.zeros((self.e.shape[1]*self.e.shape[0], 3), dtype=np.float32)
for i in range(0, 24):
a = self.e[i, :, 1].min()
b = self.e[i, :, 1].max()
c = np.interp(self.e[i, :, 1], (a, b), (0.15, 0.5))
self.color[self.e.shape[1] * i:self.e.shape[1] * (i + 1), 0] = c
self.color[self.e.shape[1] * i:self.e.shape[1] * (i + 1), 1] = c
self.color[self.e.shape[1] * i:self.e.shape[1] * (i + 1), 2] = c
self.elems = []
b = self.e.shape[1] # number of points per line
a = self.e.shape[0] # total number of arms
for i in range(0, a - 1):
for j in range(0, b - 1):
self.elems += [j + b * i, j + b * i + 1, j + b * (i + 1)]
self.elems += [j + b * (i + 1), j + b * (i + 1) + 1, j + b * i + 1]
self.elems = np.array(self.elems, dtype=np.int32)
# print(self.elems[0:100])
vs = compile_vertex_shader(VS)
fs = compile_fragment_shader(FS)
self.shaders_program = link_shader_program(vs, fs)
self.vertexbuffer = gl.glGenBuffers(1)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vertexbuffer)
gl.glBufferData(gl.GL_ARRAY_BUFFER, self.e, gl.GL_DYNAMIC_DRAW)
self.elementbuffer = gl.glGenBuffers(1)
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.elementbuffer)
gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, self.elems, gl.GL_DYNAMIC_DRAW)
self.colorbuffer = gl.glGenBuffers(1)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.colorbuffer)
gl.glBufferData(gl.GL_ARRAY_BUFFER, self.color, gl.GL_DYNAMIC_DRAW)
def ortho_view(self, i):
right = gl.glGetUniformLocation(i, "right")
gl.glUniform1f(right, self.right)
left = gl.glGetUniformLocation(i, "left")
gl.glUniform1f(left, self.left)
top = gl.glGetUniformLocation(i, "top")
gl.glUniform1f(top, self.top)
bottom = gl.glGetUniformLocation(i, "bottom")
gl.glUniform1f(bottom, self.bottom)
def greyscale(self):
gl.glUseProgram(self.shaders_program)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vertexbuffer)
stride = 0 # 3*self.e.itemsize
offset = None # ctypes.c_void_p(0)
loc = gl.glGetAttribLocation(self.shaders_program, 'position')
gl.glEnableVertexAttribArray(loc)
gl.glVertexAttribPointer(loc, 2, gl.GL_FLOAT, False, stride, offset)
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.elementbuffer)
loc = gl.glGetAttribLocation(self.shaders_program, 'a_Color')
gl.glEnableVertexAttribArray(loc)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.colorbuffer)
gl.glVertexAttribPointer(loc, 3, gl.GL_FLOAT, False, stride, offset)
gl.glDrawElements(gl.GL_TRIANGLE_STRIP, self.elems.size, gl.GL_UNSIGNED_INT, None)
def paintGL(self):
self.resizeGL(self.width, self.height)
gl.glClearColor(1, 1, 1, 0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glEnable(gl.GL_DEPTH_TEST)
self.vbo.bind()
gl.glEnableVertexAttribArray(0)
gl.glVertexAttribPointer(0, 2, gl.GL_FLOAT, gl.GL_FALSE, 0, None)
gl.glUseProgram(self.shaders_program_plot)
uni_color = gl.glGetUniformLocation(self.shaders_program_plot, "triangleColor")
for i in range(0, self.vbo.data.shape[0]):
gl.glUniform3f(uni_color, 0, 0, 0)
gl.glLineWidth(1)
gl.glDrawArrays(gl.GL_LINE_STRIP, i * self.count, self.count)
self.vbo.unbind()
self.greyscale()
gl.glUseProgram(0)
def resizeGL(self, width, height):
self.width, self.height = width, height
gl.glViewport(0, 0, width, height)
def main():
app = QApplication(sys.argv)
editor = GLPlotWidget()
editor.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()

White Balance a photo from a known point

White Balancing is a rather well-covered topic, but most of the answers I have seen cover automatic white balancing techniques for an entire image that does not have a known point for what is white, gray, and black. I cannot seem to find many that cover white balancing from a known point. I have the script (below) that takes an image of a color card (Spyder Checkr 48) and returns the white, 20% Gray, and Black color card blocks:
Color L A B sR sG sB aR aG aB
Card White 96.04 2.16 2.6 249 242 238 247 242 237
20% Gray 80.44 1.17 2.05 202 198 195 199 196 193
Card Black 16.91 1.43 -0.81 43 41 43 46 46 47
Question: Since I know the ground truth LAB, sRGB and AdobeRGB values for specific parts of the image, what would be the best way to white balance the image?
Here is a link to the images I am working with. This is the code for extracting the color card blocks (I currently am running this on Windows, Python 3.7):
from __future__ import print_function
import cv2
import imutils
import numpy as np
from matplotlib import pyplot as plt
import os
import sys
image = cv2.imread("PATH_TO_IMAGE")
template = cv2.imread("PATH_TO_TEMPLATE")
rtemplate = cv2.imread("PATH_TO_RIGHT_TEMPLATE")
def sift(image):
sift = cv2.xfeatures2d.SIFT_create()
kp, des = sift.detectAndCompute(image, None)
return kp, des
def sift_match(im1, im2, vis=False, save=False):
MIN_MATCH_COUNT = 10
FLANN_INDEX_KDTREE = 0
kp1, des1 = sift(im1)
kp2, des2 = sift(im2)
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=7)
search_params = dict(checks=100)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# Need to draw only good matches, so create a mask
matchesMask = [[0, 0] for i in range(len(matches))]
if vis is True:
draw_params = dict(matchColor=(0, 255, 0),
singlePointColor=(255, 0, 0),
matchesMask=matchesMask,
flags=0)
im3 = cv2.drawMatchesKnn(im1, kp1, im2, kp2, matches, None, **draw_params)
if save:
cv2.imwrite("tempSIFT_Match.png", im3)
plt.imshow(im3), plt.show()
good = []
for m, n in matches:
if m.distance < 0.75 * n.distance:
good.append(m)
return kp1, des1, kp2, des2, good
def smartextractor(im1, im2, vis=False):
# Detect features and compute descriptors.
kp1, d1, kp2, d2, matches = sift_match(im1, im2, vis)
kp1 = np.asarray(kp1)
kp2 = np.asarray(kp2)
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = kp1[match.queryIdx].pt
points2[i, :] = kp2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
if h is None:
print("could not find homography")
return None, None
# Use homography
height, width, channels = im2.shape
im1Reg = cv2.warpPerspective(im1, h, (width, height))
return im1Reg, h
def show_images(images, cols=1, titles=None):
"""
Display a list of images in a single figure with matplotlib.
"""
assert ((titles is None) or (len(images) == len(titles)))
n_images = len(images)
if titles is None: titles = ['Image (%d)' % i for i in range(1, n_images + 1)]
fig = plt.figure()
for n, (image, title) in enumerate(zip(images, titles)):
a = fig.add_subplot(cols, np.ceil(n_images / float(cols)), n + 1)
if image.ndim == 2:
plt.gray()
plt.imshow(image)
a.set_title(title)
fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)
plt.show()
def Sobel(img, bilateralFilter=True):
# timestart = time.clock()
try:
img = cv2.imread(img, 0)
except TypeError:
None
try:
rheight, rwidth, rdepth = img.shape
img1 = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
except ValueError:
raise TypeError
# cv2.imwrite('temp.png',img)
_, s, v = cv2.split(img1)
b, g, r = cv2.split(img)
if bilateralFilter is True:
s = cv2.bilateralFilter(s, 11, 17, 17)
v = cv2.bilateralFilter(v, 11, 17, 17)
b = cv2.bilateralFilter(b, 11, 17, 17)
g = cv2.bilateralFilter(g, 11, 17, 17)
r = cv2.bilateralFilter(r, 11, 17, 17)
# calculate sobel in x,y,diagonal directions with the following kernels
sobelx = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], dtype=np.float32)
sobely = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]], dtype=np.float32)
sobeldl = np.array([[0, 1, 2], [-1, 0, 1], [-2, -1, 0]], dtype=np.float32)
sobeldr = np.array([[2, 1, 0], [1, 0, -1], [0, -1, -2]], dtype=np.float32)
# calculate the sobel on value of hsv
gx = cv2.filter2D(v, -1, sobelx)
gy = cv2.filter2D(v, -1, sobely)
gdl = cv2.filter2D(v, -1, sobeldl)
gdr = cv2.filter2D(v, -1, sobeldr)
# combine sobel on value of hsv
xylrv = 0.25 * gx + 0.25 * gy + 0.25 * gdl + 0.25 * gdr
# calculate the sobel on saturation of hsv
sx = cv2.filter2D(s, -1, sobelx)
sy = cv2.filter2D(s, -1, sobely)
sdl = cv2.filter2D(s, -1, sobeldl)
sdr = cv2.filter2D(s, -1, sobeldr)
# combine sobel on value of hsv
xylrs = 0.25 * sx + 0.25 * sy + 0.25 * sdl + 0.25 * sdr
# combine value sobel and saturation sobel
xylrc = 0.5 * xylrv + 0.5 * xylrs
xylrc[xylrc < 6] = 0
# calculate the sobel on value on green
grx = cv2.filter2D(g, -1, sobelx)
gry = cv2.filter2D(g, -1, sobely)
grdl = cv2.filter2D(g, -1, sobeldl)
grdr = cv2.filter2D(g, -1, sobeldr)
# combine sobel on value on green
xylrgr = 0.25 * grx + 0.25 * gry + 0.25 * grdl + 0.25 * grdr
# calculate the sobel on blue
bx = cv2.filter2D(b, -1, sobelx)
by = cv2.filter2D(b, -1, sobely)
bdl = cv2.filter2D(b, -1, sobeldl)
bdr = cv2.filter2D(b, -1, sobeldr)
# combine sobel on value on blue
xylrb = 0.25 * bx + 0.25 * by + 0.25 * bdl + 0.25 * bdr
# calculate the sobel on red
rx = cv2.filter2D(r, -1, sobelx)
ry = cv2.filter2D(r, -1, sobely)
rdl = cv2.filter2D(r, -1, sobeldl)
rdr = cv2.filter2D(r, -1, sobeldr)
# combine sobel on value on red
xylrr = 0.25 * rx + 0.25 * ry + 0.25 * rdl + 0.25 * rdr
# combine value sobel and saturation sobel
xylrrgb = 0.33 * xylrgr + 0.33 * xylrb + 0.33 * xylrr
xylrrgb[xylrrgb < 6] = 0
# combine HSV and RGB sobel outputs
xylrc = 0.5 * xylrc + 0.5 * xylrrgb
xylrc[xylrc < 6] = 0
xylrc[xylrc > 25] = 255
return xylrc
print("extracting image")
extractedImage, _ = smartextractor(image, template)
print("extracting right image")
rextractedImage, _ = smartextractor(extractedImage, rtemplate, vis=False)
grextractedImage = cv2.cvtColor(rextractedImage, cv2.COLOR_BGR2GRAY)
bfsobelImg = Sobel(rextractedImage)
sobelImg = Sobel(rextractedImage, bilateralFilter=False)
csobelImg = cv2.add(bfsobelImg, sobelImg)
csobelImg[csobelImg < 6] = 0
csobelImg[csobelImg > 18] = 255
csobelImg = csobelImg.astype(np.uint8)
img2 = csobelImg.copy()
ret, thresh = cv2.threshold(img2, 18, 255, 0)
contours = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
count = 0
trigger = False
for c in contours:
# approximate the contour
peri = cv2.arcLength(c, True)
contours[count] = cv2.approxPolyDP(c, 0.05 * peri, True)
if len(contours[count]) == 4:
if trigger is False:
screenCnt = contours[count]
trigger = True
count += 1
tl = screenCnt[0]
tr = screenCnt[1]
bl = screenCnt[3]
br = screenCnt[2]
tLy, tLx = tl[0]
tRy, tRx = tr[0]
bLy, bLx = bl[0]
bRy, bRx = br[0]
ratio = .15
realSpace = (3/16)
boxwidth = int(((tRx - tLx) + (bRx - bLx))*.5 - (tLx + bLx)*.5)
boxheight = int(((bRy - tRy) + (bLy - tLy))*.5 - (tRy + tLy)*.5)
spaceWidth = int((boxwidth + boxheight)*.5*realSpace)
boxcenter = [int(((bRy - tRy)*.5 + (bLy - tLy)*.5)*.5), int(((tRx - tLx)*.5 + (bRx - bLx)*.5)*.5)]
roitl = [boxcenter[0] - int(ratio*boxheight), boxcenter[1] - int(ratio*boxwidth)]
roitr = [boxcenter[0] - int(ratio*boxheight), boxcenter[1] + int(ratio*boxwidth)]
roibl = [boxcenter[0] + int(ratio*boxheight), boxcenter[1] - int(ratio*boxwidth)]
roibr = [boxcenter[0] + int(ratio*boxheight), boxcenter[1] + int(ratio*boxwidth)]
spacing = int((boxwidth + boxheight)*.5)+spaceWidth
roiWhite = np.array((roitl, roitr, roibr, roibl))
roiGray = np.array(([roitl[1], roitl[0]+spacing*1], [roitr[1], roitr[0]+spacing*1],
[roibr[1], roibr[0]+spacing*1], [roibl[1], roibl[0]+spacing*1]))
roiBlack = np.array(([roitl[1], roitl[0]+spacing*6], [roitr[1], roitr[0]+spacing*6],
[roibr[1], roibr[0]+spacing*6], [roibl[1], roibl[0]+spacing*6]))
whiteAvgb, whiteAvgg, whiteAvgr, _ = cv2.mean(rextractedImage[(roitl[0]+spacing*0):(roibr[0]+spacing*0),
roitl[1]:roibr[1]])
grayAvgb, grayAvgg, grayAvgr, _ = cv2.mean(rextractedImage[(roitl[0]+spacing*1):(roibr[0]+spacing*1),
roitl[1]:roibr[1]])
blackAvgb, blackAvgg, blackAvgr, _ = cv2.mean(rextractedImage[(roitl[0]+spacing*6):(roibr[0]+spacing*6),
roitl[1]:roibr[1]])
whiteROI = rextractedImage[(roitl[0]+spacing*0):(roibr[0]+spacing*0), roitl[1]:roibr[1]]
grayROI = rextractedImage[(roitl[0]+spacing*1):(roibr[0]+spacing*1), roitl[1]:roibr[1]]
blackROI = rextractedImage[(roitl[0]+spacing*6):(roibr[0]+spacing*6), roitl[1]:roibr[1]]
imageList = [whiteROI, grayROI, blackROI]
show_images(imageList, cols=1)
correctedImage = rextractedImage.copy()
whiteROI[:, :, 0] = whiteAvgb
whiteROI[:, :, 1] = whiteAvgg
whiteROI[:, :, 2] = whiteAvgr
grayROI[:, :, 0] = grayAvgb
grayROI[:, :, 1] = grayAvgg
grayROI[:, :, 2] = grayAvgr
blackROI[:, :, 0] = blackAvgb
blackROI[:, :, 1] = blackAvgg
blackROI[:, :, 2] = blackAvgr
imageList = [whiteROI, grayROI, blackROI]
show_images(imageList, cols=1)
# SPYDER COLOR CHECKR Values: http://www.bartneck.de/2017/10/24/patch-color-definitions-for-datacolor-spydercheckr-48/
blank = np.zeros_like(csobelImg)
maskedImg = blank.copy()
maskedImg = cv2.fillConvexPoly(maskedImg, roiWhite, 255)
maskedImg = cv2.fillConvexPoly(maskedImg, roiGray, 255)
maskedImg = cv2.fillConvexPoly(maskedImg, roiBlack, 255)
res = cv2.bitwise_and(rextractedImage, rextractedImage, mask=maskedImg)
# maskedImg = cv2.fillConvexPoly(maskedImg, roi2Black, 255)
cv2.drawContours(blank, contours, -1, 255, 3)
outputSquare = np.zeros_like(csobelImg)
cv2.drawContours(outputSquare, [screenCnt], -1, 255, 3)
imageList = [rextractedImage, grextractedImage, bfsobelImg, sobelImg, csobelImg, blank, outputSquare, maskedImg, res]
show_images(imageList, cols=3)
sys.exit()
Given the RGB value of a white patch, the image can be corrected for white balance by dividing by that value. That is, applying a linear transformation that makes the white patch have the same level in the three channels:
lum = (whiteR + whiteG + whiteB)/3
imgR = imgR * lum / whiteR
imgG = imgG * lum / whiteG
imgB = imgB * lum / whiteB
Multiplying by lum makes it so that the average intensity doesn’t change.
(The computation of lum will be better if using proper weights: 0.2126, 0.7152, 0.0722, but I wanted to keep it simple. This would only make a big difference if the input white is way off the mark, in which case you'll have other issues too.)
Note that this transformation is best applied in linear RGB space. Both the image and the RGB values for white should first be converted to linear RGB if the image is stored in sRGB or similar (a raw image from the camera would be linear RGB, a JPEG would be sRGB). See here for the relevant equations.
For better precision, you can apply the above using also the RGB values of the grey patch. Take the average multiplication factor (whiteR/lum) derived from the white and grey patches, for each channel, and apply those to the image.
The black level could be subtracted from the image, prior to determining the white RGB values and correcting for white balance. This will improve contrast and color perception, but not part of white balancing.
A full color correction is way more complex, I will not go into that.

VPython startMenu

My main program for a Bloxorz-Type program is working fine. Only issue I have is having to implement a system where the game shows a message on the screen if the block falls off the map, and a gameFinished message on the screen when the block hits the hole vertically. I'm thinking the way it would work is to match the coordinates of the hole and if the coordinates of the hole match the square-bases of the block, it will show a message.
My first real issue is being able to display a startMenu message, where it will display "Press S to Start" and "press Q to quit". I know how to use the scene.kb.getkey(), the only problem is displaying the text on the screen, which i will implement into my main while True loop. Here is my code:
from __future__ import division, print_function
from visual import *
import math, sys
from visual.graph import *
from sys import exit
"""Necessary Python Libraries"""
# top, left, bottom, right, front, back
tlbrfb = ( 2.0, -0.5, 0.0, 0.5, 0.5, -0.5 ) #the area that the block covers (dimensions)
Top = 0
Left = 1
Bottom = 2
Right = 3
Front = 4
Back = 5
"""Modules defining the Movement of the Block"""
def moveRight():
global tlbrfb
for r in range(0, 9):
rate(30)
block.rotate( angle = pi/18, axis = vector( 0, 0, -1), origin = vector(tlbrfb[Right], 0, 0) ) #pi/18 = 10degrees,
tlbrfb = (tlbrfb[Right]-tlbrfb[Left], tlbrfb[Right], 0, tlbrfb[Right]+tlbrfb[Top], tlbrfb[Front], tlbrfb[Back]) #update block's state of stance
def moveDown():
global tlbrfb
for r in range(0, 9):
rate(30)
block.rotate( angle = pi/18, axis = vector( +1, 0, 0), origin = vector(0, 0, tlbrfb[Front]) )
tlbrfb = (tlbrfb[Front]-tlbrfb[Back], tlbrfb[Left], 0, tlbrfb[Right], tlbrfb[Front]+tlbrfb[Top], tlbrfb[Front])
def moveLeft():
global tlbrfb
for r in range(0, 9):
rate(30)
block.rotate( angle = pi/18, axis = vector( 0, 0, +1), origin = vector(tlbrfb[Left], 0, 0) )
tlbrfb = (tlbrfb[Right]-tlbrfb[Left], tlbrfb[Left]-tlbrfb[Top], 0, tlbrfb[Left], tlbrfb[Front], tlbrfb[Back] )
def moveUp():
global tlbrfb
for r in range(0, 9):
rate(30)
block.rotate( angle = pi/18, axis = vector( -1, 0, 0), origin = vector(0, 0, tlbrfb[Back]) )
tlbrfb = (tlbrfb[Front]-tlbrfb[Back], tlbrfb[Left], 0, tlbrfb[Right], tlbrfb[Back], tlbrfb[Back]-tlbrfb[Top])
level1 = [ #array on the tile placement
[1,1,1,0,0,0,0,0,0,0],
[1,1,1,1,1,0,0,0,0,0],
[1,1,1,1,1,1,1,1,1,0],
[0,0,0,0,0,1,1,0,1,1],
[0,0,0,0,0,0,1,1,1,0]
]
def draw_level(array):
for y in range(0, len(array)):
for x in range(0, len(array[y])):
if array[y][x] == 1:
box( pos=vector(x, -0.1, y), length=0.9, width = 0.9, height = 0.2, color = vector(0.9, 0.8, 0.8))
block = box( pos=vector(0,1,0), length = 1, width = 1, height = 2, color = vector(0.9, 0.9, 0.5))
def handle_events():
key = scene.kb.getkey()
if key =="up":
moveUp()
elif key =="down":
moveDown()
elif key == "left":
moveLeft()
elif key == "right":
moveRight()
elif key == "q":
quit()
elif key == "escape":
quit()
def mainGameLoop():
while True:
scene.center = vector(4.5, 0, 2)
scene.forward = scene.center - vector(2, 6, 10)
scene.background = vector(0.57, 0.2, 0.2)
draw_level(level1) #draw mapout
handle_events()
while True:
mainGameLoop()
handle_events()
print(block.pos)

Resources