How to edit points data in vtk file - python-3.x

I am trying to project some points of mesh using some function. I have access the point and cell data from polydata. From that I get projected point. Now I want to replace those points with projected points. Is there any way to update points in mayavi or vtk?
To get polydata from .ply file
from plyfile import PlyData,PlyElement
import numpy as np
import time
from mayavi import mlab
from tvtk.api import tvtk
plydata = PlyData.read(ply_file)
points = plydata.elements[0].data
# Get X,Y,Z coordinates from .ply file
x,y,z = [],[],[]
for i in points:
x.append(i[0])
y.append(i[1])
z.append(i[2])
s = [0.1]*len(x)
mesh = mlab.pipeline.delaunay2d(pts)
surf = mlab.pipeline.surface(mesh,color=(1,1,1))
actor = surf.actor.actors[0]
polydata = tvtk.to_vtk(actor.mapper.input)
projecting points:
for i in range(polydata.GetNumberOfCells()):
pts = polydata.GetCell(i).GetPoints()
np_pts = np.array([pts.GetPoint(i) for i in range(pts.GetNumberOfPoints())])
projected_point1,projected_point2,projected_point3 = project_points(point1,point2,point3)
Now for saving I tried following line in above for loop:
polydata_return.GetCell(i).GetPoints() = np.array([projected_point1,projected_point2,projected_point3])
but got error as follows:
SyntaxError: can't assign to function call
Is there any way to replace/edit present points with projected points. Thanks in advance.
UPDATE :
To generate mesh :
from mayavi import mlab
from tvtk.api import tvtk
import numpy as np
from plyfile import PlyData,PlyElement
x = [random.randint(0,250) for i in range(250)]
y = [random.randint(0,250) for i in range(250)]
z = [random.randint(0,5) for i in range(250)]
s = [0.1]*len(x)
pts = mlab.points3d(x, y, z,s)
mesh = mlab.pipeline.delaunay2d(pts)
surf = mlab.pipeline.surface(mesh,color=(1,1,1))
After generating above mesh I want to map a texture to mesh using following image and then I have to flattened the mesh using some transformation, so mesh will get flattened along with a texture. I doing this for dewrapping image to remove distortion in image.
texture image
For projecting image I use following code:
image_file = texture_image_path
if image_file.split('.')[-1] == 'png':
img = tvtk.PNGReader()
elif image_file.split('.')[-1] == 'jpg':
img = tvtk.JPEGReader()
img.file_name=image_file
texture = tvtk.Texture(input_connection=img.output_port, interpolate=0)
surf.actor.enable_texture = True
surf.actor.tcoord_generator_mode = 'plane'
surf.actor.actor.texture = texture
mlab.show()

This should get you started, you just have to use the correct data at the correct position. There are several attributes to mesh.mlab_source.
I added some in the comments.
from mayavi import mlab
from tvtk.api import tvtk
import numpy as np
#from plyfile import PlyData,PlyElement
import random
x = [random.randint(0,250) for i in range(250)]
y = [random.randint(0,250) for i in range(250)]
z = [random.randint(0,5) for i in range(250)]
s = [0.1]*len(x)
pts = mlab.points3d(x, y, z,s)
mesh = mlab.pipeline.delaunay2d(pts)
surf = mlab.pipeline.surface(mesh,color=(1,1,1))
#After generating above mesh I want to map a texture to mesh using following image and then I have to flattened the mesh using some transformation, so mesh will get flattened along with a texture. I doing this for dewrapping image to remove distortion in image. texture image
#For projecting image I use following code:
image_file = 'texture.jpg'
if image_file.split('.')[-1] == 'png':
img = tvtk.PNGReader()
elif image_file.split('.')[-1] == 'jpg':
img = tvtk.JPEGReader()
img.file_name=image_file
texture = tvtk.Texture(input_connection=img.output_port, interpolate=1)
#texture = tvtk.Texture(interpolate=1)
#texture.input = img
surf.actor.enable_texture = True
surf.actor.tcoord_generator_mode = 'plane'
surf.actor.actor.texture = texture
#src = mlab.pipeline.scalar_scatter(x, y, z, s)
#src.mlab_source.dataset.lines = connections src.update(
#src.mlab_source.reset()
#mlab.animate
def anim():
for i in range(10):
print(i)
#plt.mlab_source.set(x=x, y=y, z=z)
print(surf.mlab_source.dataset)
x = [random.randint(0,250) for i in range(250)]
y = [random.randint(0,250) for i in range(250)]
z = [random.randint(0,5) for i in range(250)]
s = [0.1]*len(x)
pts = mlab.points3d(x, y, z,s)
mesh = mlab.pipeline.delaunay2d(pts)
surf.mlab_source.x = mesh.mlab_source.x
surf.mlab_source.y = mesh.mlab_source.y
surf.mlab_source.z = mesh.mlab_source.z
#s.mlab_source.scalars = np.asarray(x*0.1*(i+1), 'd')
yield
anim()
mlab.show()

Related

Compact graph-vizualization using pydot

I would like to visualize a "linear" directed graph with the layout like that:
All the in- and out-degrees are 1 (except the first and last, of course). The length of the labels are different, so I can't calculate easily, how many nodes will fit in one row or the other. The code I have so far is this.
import networkx as nx
from networkx.drawing.nx_pydot import to_pydot
G = nx.DiGraph()
G.add_node("XYZ 1.0")
for i in range(1, 20):
G.add_node(f'XYZ 1.{i}', style='filled', fillcolor='skyblue')
G.add_edge(f'XYZ 1.{i-1}', f'XYZ 1.{i}')
# set defaults
G.graph['graph'] = {'rankdir': 'LR'}
G.graph['node'] = {'shape': 'rectangle'}
G.graph['edges'] = {'arrowsize': '4.0'}
pydt = to_pydot(G)
prog = 'dot'
file_name = f'nx_graph_{prog}.png'
pydt.write(file_name, prog=prog, format="png")
So far I use networkx in a project that needs to be run in a Python docker container, so I would like to use pydot and Networkx, if it is possible.
In some of the graphviz programs I can set coordinates if I understand correctly, but for setting coordinates I should know the widths of the boxes to avoid overlapping boxes.
I managed to find a way to do this with pydot. We can create a dot file with the coordinates with the write_dot function. Reading it back, we can get the coordinates that dot program created (and also the widths, heights). We can somehow calculate the new coordinates and modify them in the networkx Digraph. Converting again to pydot.Dot object, and at the end, we can use neato with the -n option to create the graph, that way we use the coordinates we have set. A working code can be seen below.
import networkx as nx
from networkx.drawing.nx_pydot import to_pydot
import pydot
from typing import List
G = nx.DiGraph()
G.add_node(0, label="XYZ 1.0")
for i in range(1, 20):
G.add_node(i, label=f'XYZ 1.{i}')
G.add_edge(i - 1, i)
# set defaults
G.graph['graph'] = {'rankdir': 'LR'}
G.graph['node'] = {'shape': 'rectangle'}
G.graph['edges'] = {'arrowsize': '4.0'}
pydt = to_pydot(G)
dot_data = pydt.create_dot()
pydt2 = pydot.graph_from_dot_data(dot_data.decode('utf-8'))[0]
def get_position(node):
pydot_node = pydt2.get_node(str(node))[0]
return [float(i) for i in pydot_node.get_attributes().get("pos")[1:-1].split(',')]
def fix_position(position: List, w: float = 1000, shift: float = 80):
x_orig, y_orig = position
n = int(x_orig / w)
y = y_orig - n * shift
remain_x = x_orig - n * w
if n % 2 == 0:
x = remain_x
else:
x = w - remain_x
return x, y
def refresh_coordinates_using_x():
for node in G.nodes:
position = get_position(node)
x, y = fix_position(position)
pos = f'"{x},{y}!"'
G.nodes[node]['pos'] = pos
refresh_coordinates_using_x()
pydt3 = to_pydot(G)
file_name = f'nx_graph_neato.png'
pydt3.write(file_name, prog=["neato", "-n"], format="png")
If you want to calculate the position of the nodes based on the widths, you need to know, that while the coordinates are in points, the widths are in inches. 1 inch is 72 points.
The result will be similar to this one.

segment non-homogeneous image background with opencv

I have an image of a scanner and i wanted to segment the background but the background is not homogeneous. how could i do that?
My Input is a cloud points
I used the code below to convert to a depth image
import numpy as np
import cv2
import open3d as o3d
import sys
def convertToDepth():
argv = sys.argv[1]
argv2 = sys.argv[2]
pcd = o3d.io.read_point_cloud(argv)
p = np.asarray(pcd.points)
points = np.copy(p)
points = np.asarray(points, np.int16)
max_x = np.max(points[:,0])
min_x = np.min(points[:,0])
max_y = np.max(points[:,1])
min_y = np.min(points[:,1])
img = np.zeros(((max_x - min_x)+1,(max_y-min_y)+1), np.uint8)
img[img == 0] = 255
max_z = np.max(points[:,2])
min_z = np.min(points[:,2])
for p in points:
img[(p[0] - min_x),(p[1]-min_y)] = min(img[(p[0] - min_x),(p[1]-min_y)], int((p[2] - min_z)* (256/((max_z - min_z)+1))))
cv2.imwrite(argv2,img)
convertToDepth()
this is my depth image
I have tried this code to obtain the seeds
import cv2
import numpy as np
img = cv2.imread('amostra.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray ,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2.imwrite('thresh.png',thresh)
and obtained this
How can i get the seeds with an non-homogeneous background?
UPDATE:
I tried this code:
import cv2
import numpy as np
import sys
path_img = sys.argv[1]
img = cv2.imread(path_img)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
for i in range(0,255):
thresh = gray.copy()
thresh[:] = 0
thresh[(gray == i)] = 255
num_labels, labels_im = cv2.connectedComponents(thresh)
areaMaior = 100
maskMaior = np.zeros((img.shape[0],img.shape[1]),np.uint8)
for j in range(1,num_labels):
mask = np.zeros((img.shape[0],img.shape[1]),np.uint8)
mask[labels_im==j] = 255
pixels = cv2.countNonZero(mask)
if(pixels > areaMaior):
maskMaior = mask
areaMaior = pixels
#cv2.imwrite('segment/areaimg'+str(i)+'.png', maskMaior)
gray[(maskMaior==255)] = 255
cv2.imwrite('gray12.png',gray)
And obtained this

Detection of small object - aphids on plants

I'm currently trying to create a detector of aphids (green and rose) on plants but only using "classic" image processing technique (no neural network).
Here are an image I'm working on:
'aphids.jpg'
I'm working on a code (see below). If you apply it on the image you should have the plants alone. My problem is that I want to isolate the aphids that can be seen on the plants. There are a lot of them but I just want to detect the biggest or the more obvious.
On the code there is an "edges_detect" function I'm currently working on. One of the problem I have is that I can detect some of the aphids as contour but it will also take simple lines...
I tried to drop those line using the hierarchy of contour but it seems those line have inner contour so I can't easily delete them.
I also tried the adjust_gamma and contrast, but it doesn't give that much result.
I'm looking for more ideas. What would you try ?
Thank you in advance !
Here is the code:
import cv2
import numpy as np
import matplotlib.pyplot as plt
def adjust_gamma(image, gamma=1.0):
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv2.LUT(image, table)
def adjust_contrast(image,alpha=1.0,beta=0):
new = np.zeros(image.shape,image.dtype)
for y in range(image.shape[0]):
for x in range(image.shape[1]):
for c in range(image.shape[2]):
new[y,x,c] = np.clip(alpha*image[y,x,c]+beta,0,255)
return(new)
def img_process(img):
(h1, w1) = img.shape[:2]
center = (w1 / 2, h1 / 2)
blur = cv2.GaussianBlur(img.copy(),(5,5),0)
hsv = cv2.cvtColor(blur,cv2.COLOR_BGR2HSV)
#image = img.copy()
#Boundaries to separate plants from the image
l_bound = np.array([20,0,0])
h_bound = np.array([90,250,170])#green
mask = cv2.inRange(hsv,l_bound,h_bound)
res = cv2.bitwise_and(img,img,mask=mask)
#Find contour plants
cnt,_ = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
sort_cnt = sorted(cnt,key=cv2.contourArea,reverse=True)
cnt = [sort_cnt[i] for i in range(len(sort_cnt)) if cv2.contourArea(sort_cnt[i])>300]
cv2.drawContours(res, cnt, -1, (0,255,0), -1)
#Inverse mask to have only the plant in the image
mask2 = cv2.inRange(res,np.array([0,0,0]),np.array([250,250,250]))
mask2 = cv2.bitwise_not(mask2)
res2 = cv2.bitwise_and(img,img,mask=mask2)
#Augment bright/contrast
res2=res2*1.45
res2=res2.astype('uint8')
#Crop
res2 = res2[:-50,int(center[0]-300):int(center[0]+550)]
return res2
def edge_detec(img):
(h1, w1) = img.shape[:2]
center = (w1 / 2, h1 / 2)
blur = cv2.GaussianBlur(img.copy(),(5,5),0)
gray = cv2.cvtColor(blur,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,30,70,apertureSize = 3)
edges = edges[:-50,int(center[0]-300):int(center[0]+550)]
#kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
#edges = cv2.morphologyEx(edges, cv2.MORPH_GRADIENT, kernel)
cnt,hierarchy = cv2.findContours(edges,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnt = sorted(cnt,key=cv2.contourArea,reverse=True)
listArea = list(map(cv2.contourArea,cnt))
sort_cnt = [x for x in cnt if cv2.contourArea(x)>10]
cv2.drawContours(edges, sort_cnt, -1, (0,255,0), -1)
return edges,center,img
### Debut programme
img = cv2.imread('051.jpg')
while True:
##Put processing function here
img_mod = img_process(img)
cv2.imshow('img',img_mod)
if cv2.waitKey(1) & 0xFF == 27:
break
cv2.destroyAllWindows()

Undefined name problem in camera calibration

I am using the same code that is provided by the OpenCv tutorial, it was working few weeks ago, today I was trying to run it is says that gray name is not defined!! can some one find me the error?
import numpy as np
#import matplotlib.pyplot as plt
import cv2
import glob
import os
def draw(img, corners, imgpts):
corner = tuple(corners[0].ravel())
img = cv2.line(img, corner, tuple(imgpts[0].ravel()), (255,0,0), 5)
img = cv2.line(img, corner, tuple(imgpts[1].ravel()), (0,255,0), 5)
img = cv2.line(img, corner, tuple(imgpts[2].ravel()), (0,0,255), 5)
return img
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((7*7,3), np.float32)
objp[:,:2] = np.mgrid[0:7,0:7].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
img_dir = "C:\\Hungary\\Biblography\\Rotating Solitary Wave\\My Work\\Final Work\\Experiment1111 \\Camera Calibration\\Image Processing\\chess"
data_path = os.path.join(img_dir,'*bmp')
images = glob.glob(data_path)
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (7,7),None)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (7,7), corners2,ret)
cv2.imshow('img',img)
cv2.waitKey(500)
cv2.destroyAllWindows()
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape [::-1],None,None)
print('Rotation Vector, or the Angles For Each Photo: ', rvecs, '\n')
R = cv2.Rodrigues(rvecs[0])
print('The Rotation Matrix is: ', R)
print('Translation Vector: ', tvecs, '\n')
print(mtx, '\n')
print('Distortion Coefficients ', dist, '\n')
img = cv2.imread('00000274.bmp')
h, w = img.shape[:2]
newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))
print('Camera Matrix', newcameramtx, '\n')
# undistort
dst = cv2.undistort(img, mtx, dist) #, None, newcameramtx)
p = np.ones_like(dst)
# crop the image
x,y,w,h = roi
dst = dst[y:y+h, x:x+w]
# undistort
mapx,mapy = cv2.initUndistortRectifyMap(mtx,dist,None,newcameramtx,(w,h),5)
dst = cv2.remap(img,mapx,mapy,cv2.INTER_LINEAR)
# crop the image
x,y,w,h = roi
dst = dst[y:y+h, x:x+w]
cv2.imwrite('calibresult.png',dst)
mean_error = 0
for i in range(len(objpoints)):
imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
error = cv2.norm(imgpoints[i],imgpoints2, cv2.NORM_L2)/len(imgpoints2)
mean_error += error
print("total error: ", mean_error/len(objpoints))
If you read the opencv document you will find that I did little changes on the code and it was working but today it is raising this error about the gray name is not defined!
Check your path once, and see if images is an empty list. In that case, for loop will not be executed where the gray variable is defined.

how to get the length of eyes and mouth using dlib

I am working on a project of yawn detection, i am using dlib and opencv to detect the face and landmark on a video.
I want to get the length of eyes and mouth.
this is what i have done till now
import sys
import os
import dlib
import glob
from skimage import io
import cv2
import time
if len(sys.argv) != 3:
print("")
exit()
predictor_path = sys.argv[1]
faces_folder_path = sys.argv[2]
vidcap = cv2.VideoCapture('video.avi')
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
win = dlib.image_window()
while vidcap.isOpened():
success, image = vidcap.read()
if success:
win.clear_overlay()
win.set_image(image)
# Ask the detector to find the bounding boxes of each face. The 1 in the
# second argument indicates that we should upsample the image 1 time. This
# will make everything bigger and allow us to detect more faces.
dets = detector(image, 1)
print("Number of faces detected: {}".format(len(dets)))
for k, d in enumerate(dets):
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
k, d.left(), d.top(), d.right(), d.bottom()))
# Get the landmarks/parts for the face in box d.
shape = predictor(image, d)
print(shape)
print("Part 0: {}, Part 1: {},Part 2: {} ...".format(shape.part(0),shape.part(1),shape.part(2)))
# Draw the face landmarks on the screen.
win.add_overlay(shape)
win.add_overlay(dets)
time.sleep(0.01)
cv2.destroyAllWindows()
vidcap.release()
please help me how to get the length of open eyes and mouth.
From this figure
import Paths
import globals
from globals import ClassifierFiles
import numpy as np
import cv2
import time
import dlib
import math
import eyeCoordinates
import mouthCoordinates
from globals import Threshold
from globals import yawnFolder
import os
import openface
VIDEO_PATHS = []
readVideo('v.avi')#test video of faces
def readVideo(video):
global no,yes
video_capture = cv2.VideoCapture(video)
detector = dlib.get_frontal_face_detector() #Face detector
predictor = dlib.shape_predictor(ClassifierFiles.shapePredicter) #Landmark identifier
face_aligner = openface.AlignDlib(ClassifierFiles.shapePredicter)
u = 0
while True:
ret, frame = video_capture.read()
if frame != None:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
# clahe_image = clahe.apply(gray)
detections = detector(frame, 1) #Detect the faces in the image
for k,d in enumerate(detections): #For each detected face
shape = predictor(frame, d) #Get coordinates
vec = np.empty([68, 2], dtype = int)
coor = []
for i in range(1,68): #There are 68 landmark points on each face
#cv2.circle(frame, (shape.part(i).x, shape.part(i).y), 1, (0,0,255), thickness=1)
coor.append([shape.part(i).x, shape.part(i).y])
vec[i][0] = shape.part(i).x
vec[i][1] = shape.part(i).y
#RightEye and LeftEye coordinates
rightEye = eyeCoordinates.distanceRightEye(coor)
leftEye = eyeCoordinates.distanceLeftEye(coor)
eyes = (rightEye + leftEye)/2
#Mouth coordinates
mouth = mouthCoordinates.distanceBetweenMouth(coor)
print(eyes,mouth)
#prints both eyes average distance
#prints mouth distance
break
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == '__main__':
VIDEO_PATHS = Paths.videosPaths()
init()
eyeCoordinates File
import distanceFormulaCalculator
def distanceRightEye(c):
eR_36,eR_37,eR_38,eR_39,eR_40,eR_41 = 0,0,0,0,0,0
eR_36 = c[35]
eR_37 = c[36]
eR_38 = c[37]
eR_39 = c[38]
eR_40 = c[39]
eR_41 = c[40]
x1 = distanceFormulaCalculator.distanceFormula(eR_37,eR_41)
x2 = distanceFormulaCalculator.distanceFormula(eR_38,eR_40)
return ((x1+x2)/2)
def distanceLeftEye(c):
eL_42,eL_43,eL_44,eL_45,eL_46,eL_47 = 0,0,0,0,0,0
eL_42 = c[41]
eL_43 = c[42]
eL_44 = c[43]
eL_45 = c[44]
eL_46 = c[45]
eL_47 = c[46]
x1 = distanceFormulaCalculator.distanceFormula(eL_43,eL_47)
x2 = distanceFormulaCalculator.distanceFormula(eL_44,eL_46)
return ((x1+x2)/2)
def eyePoints():
return [36,37,38,39,40,41,42,43,44,45,46,47]
Mouth Coordinates File
import distanceFormulaCalculator
def distanceBetweenMouth(c):
m_60,m_61,m_62,m_63,m_64,m_65,m_66,m_67 = 0,0,0,0,0,0,0,0
m_60 = c[59]
m_61 = c[60]
m_62 = c[61]
m_63 = c[62]
m_64 = c[63]
m_65 = c[64]
m_66 = c[65]
m_67 = c[66]
x1 = distanceFormulaCalculator.distanceFormula(m_61,m_67)
x2 = distanceFormulaCalculator.distanceFormula(m_62,m_66)
x3 = distanceFormulaCalculator.distanceFormula(m_63,m_65)
return ((x1+x2+x3)/3)
def mouthPoints():
return [60,61,62,63,64,65,66,67]

Resources