How do I implement a custom shader in PyOpenGL? - python-3.x

I am diving into 3d graphics with PyOpenGL and am thoroughly overwhelmed. I want to be able to implement shadows in my scenes and prevent GL from drawing object through each other. This is the way I see it:
The best way I can see is a custom shader.
According to the PyOpenGL tutorials on SourceForge, I need a whole bunch of libraries. I want to do this with just PyOpenGL if possible.
I need this shader to be PyQt5 compatible.
As much as possible, I would like to render in the GPU. I believe that this would permit the CPU to focus on my data handling, etc. in my project to fully utilize all resources.
If I make a custom shader, I would like to include a basic function to draw lines and faces. Something like I do normally, like glvertex3f.
How do I go about this? I looked at this tutorial, but I cannot sort out what is bare necessity and what is not.
This is what I am using right now, but GL will draw shapes on top of each other. It also has no sort of shadow rendering.
# File structure is as follows:
# imports
# exceptions
# shape classes
# main shader
#---------- imports ----------#
from OpenGL.GL import (
glLoadIdentity, glTranslatef, glRotatef,
glClear, glBegin, glEnd,
glColor3fv, glVertex3fv,
GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT,
GL_QUADS, GL_LINES
)
from OpenGL.GLU import gluPerspective
#---------- exceptions ----------#
class shapeNotFound(Exception):
pass
#---------- shape classes ----------#
class cube():
render = True
def __init__(self, location = (0, 0, 0), size = 0.1, drawWires = True, drawFaces = False, color = (1, 1, 1)):
self.location = location
self.size = size
self.drawWires = drawWires
self.drawFaces = drawFaces
self.color = color
self.compute()
def compute(self):
x, y, z = self.location
s = self.size / 2
self.vertices = [ #8 corner points calculated in reference to the supplied center point
(-s + x, s + y, -s + z), (s + x, s + y, -s + z),
(s + x, -s + y, -s + z), (-s + x, -s + y, -s + z),
(-s + x, s + y, s + z), (s + x, s + y, s + z),
(s + x, -s + y, s + z), (-s + x, -s + y, s + z)
]
self.wires = [ #12 tuples referencing the corner points
(0,1), (0,3), (0,4), (2,1), (2,3), (2,6),
(7,3), (7,4), (7,6), (5,1), (5,4), (5,6)
]
self.facets = [ #6 tuples referencing the corner points
(0, 1, 2, 3), (0, 1, 6, 5), (0, 3, 7, 4),
(6, 5, 1, 2), (6, 7, 4, 5), (6, 7, 3, 2)
]
def show(self):
self.render = True
def hide(self):
self.render = False
def move(self, location):
self.location = location
self.compute()
def recolor(self, col):
if type(col) is tuple:
self.color = col
class mesh():
vertices = []
facets = []
wires = []
render = True
def __init__(self, drawWires = True, drawFaces = False, color = (1, 1, 1)):
self.drawWires = drawWires
self.drawFaces = drawFaces
self.color = color
self.vertices = []
self.facets = []
self.wires = []
self.render = True
def addFacet(self, coords): #takes a tuple of three location tuples.
addr = len(self.vertices)
addrs = [None, None, None]
for i in range(3):
c = coords[i]
if not c in self.vertices:
self.vertices.append(c)
addrs[i] = self.vertices.index(c)
self.facets.append((addrs[0], addrs[1], addrs[2]))
self.wires.append((addrs[0], addrs[1]))
self.wires.append((addrs[2], addrs[1]))
self.wires.append((addrs[2], addrs[0]))
#---------- main shader ----------#
class shader():
#variables
parent = None
shapes = []
shapeTypes = [type(cube), type(mesh)]
#functions
def __init__(self, parent = None):
self.parent = parent
print('Initiated new shader as child of {}.'.format(self.parent))
def resize(self, newSize):
self.sizeX, self.sizeY = newSize
def addShape(self, shapeIn):
if type(shapeIn) not in self.shapeTypes:
raise shapeNotFound("Shape {} not found.".format(shapeIn))
self.shapes.append(shapeIn)
def paintGL(self):
#This function uses shape objects, such as cube() or mesh(). Shape objects require the following:
#a list named 'vertices' - This list is a list of points, from which edges and faces are drawn.
#a list named 'wires' - This list is a list of tuples which refer to vertices, dictating where to draw wires.
#a list named 'facets' - This list is a list of tuples which refer to vertices, ditating where to draw facets.
#a bool named 'render' - This bool is used to dictate whether or not to draw the shape.
#a bool named 'drawWires' - This bool is used to dictate whether wires should be drawn.
#a bool named 'drawFaces' - This bool is used to dictate whether facets should be drawn.
glLoadIdentity()
gluPerspective(45, self.sizeX / self.sizeY, 0.1, 110.0) #set perspective?
glTranslatef(0, 0, self.zoomLevel) #I used -10 instead of -2 in the PyGame version.
glRotatef(self.rotateDegreeV, 1, 0, 0) #I used 2 instead of 1 in the PyGame version.
glRotatef(self.rotateDegreeH, 0, 0, 1)
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
if len(self.shapes) != 0:
glBegin(GL_LINES)
for s in self.shapes:
glColor3fv(s.color)
if s.render and s.drawWires:
for w in s.wires:
for v in w:
glVertex3fv(s.vertices[v])
glEnd()
glBegin(GL_QUADS)
for s in self.shapes:
glColor3fv(s.color)
if s.render and s.drawFaces:
for f in s.facets:
for v in f:
glVertex3fv(s.vertices[v])
glEnd()

Related

Only integer scalar arrays can be converted to a scalar index not running under Spyder

I have the following code, which runs well under Visual Studio Code with python 3.9.10, opencv 4.5.5 and numpy 1.22.1.
I would like to migrate this code into the Spyder IDE (Version 5, another notebook), python 3.8, opencv 4.5.1 and numpy 1.22.2.
In spyder, I get the error message TypeError: only integer scalar arrays can be converted a scalar index in line: output_layers = [layer_names[i-1]...] (marked line down in the code section)
I have already checked other answers on this site such as
TypeError when indexing a list with a NumPy array: only integer scalar arrays can be converted to a scalar index
which suggests list comprehension, but in my understanding I am already implemented this.
What is the reason for running currectly in on environment but not in the other?
import cv2
import numpy as np
def get_output_layers(net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
def draw_prediction(img, class_id, confidence, x, y, x_plus_w, y_plus_h):
label = str(classes[class_id])
color = COLORS[class_id]
cv2.rectangle(img, (x,y), (x_plus_w,y_plus_h), color, 2)
cv2.putText(img, label, (x-10,y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
image = cv2.imread('horses.jpg')
Width = image.shape[1]
Height = image.shape[0]
scale = 0.00392
classes = None
with open(r'yolov3.txt', 'r') as f:
classes = [line.strip() for line in f.readlines()]
COLORS = np.random.uniform(0, 255, size=(len(classes), 3))
net = cv2.dnn.readNet('yolov3.weights','yolov3.cfg')
blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True, crop=False)
net.setInput(blob)
outs = net.forward(get_output_layers(net))
class_ids = []
confidences = []
boxes = []
conf_threshold = 0.5
nms_threshold = 0.4
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
center_x = int(detection[0] * Width)
center_y = int(detection[1] * Height)
w = int(detection[2] * Width)
h = int(detection[3] * Height)
x = center_x - w / 2
y = center_y - h / 2
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([x, y, w, h])
indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)
for i in indices:
box = boxes[i]
x = box[0]
y = box[1]
w = box[2]
h = box[3]
draw_prediction(image, class_ids[i], confidences[i], round(x), round(y),
round(x+w), round(y+h))
cv2.imshow("object detection", image)
cv2.waitKey()
cv2.imwrite("object-detection.jpg", image)
cv2.destroyAllWindows()
there were subtle, recent api changes wrt handling std::vector in python
(4.5.1 still expects a 2d array, but it's 1d in 4.5.5)
to avoid the whole trouble, please simply use:
output_layers = net.getUnconnectedOutLayersNames()
(like it is done in the sample)

vtk: how to obtain the image pixel index from a world point

If I pick a world point from a image, How can I convert the world coordinate to image index?
import vtk
import numpy as np
from vtk.util.numpy_support import numpy_to_vtk
def numpyToVTK(data, multi_component=False, type='float'):
if type == 'float':
data_type = vtk.VTK_FLOAT
elif type == 'char':
data_type = vtk.VTK_UNSIGNED_CHAR
else:
raise RuntimeError('unknown type')
if multi_component == False:
if len(data.shape) == 2:
data = data[:, :, np.newaxis]
flat_data_array = data.transpose(2,1,0).flatten()
vtk_data = numpy_to_vtk(num_array=flat_data_array, deep=True, array_type=data_type)
shape = data.shape
else:
assert len(data.shape) == 3, 'only test for 2D RGB'
flat_data_array = data.transpose(1, 0, 2)
flat_data_array = np.reshape(flat_data_array, newshape=[-1, data.shape[2]])
vtk_data = numpy_to_vtk(num_array=flat_data_array, deep=True, array_type=data_type)
shape = [data.shape[0], data.shape[1], 1]
img = vtk.vtkImageData()
img.GetPointData().SetScalars(vtk_data)
img.SetDimensions(shape[0], shape[1], shape[2])
return img
global sphereActor, textActor
sphereActor = None
textActor = None
def mouseMoveEvent(iren, event):
x, y = iren.GetEventPosition()
picker = vtk.vtkWorldPointPicker()
picker.Pick(x, y, 0, render)
worldPoint = picker.GetPickPosition()
##############################################
## convert world point to image index
##############################################
sphere = vtk.vtkSphereSource()
sphere.SetCenter(worldPoint[0], worldPoint[1], worldPoint[2])
sphere.SetRadius(2)
sphere.Update()
sphereMapper = vtk.vtkPolyDataMapper()
sphereMapper.SetInputData(sphere.GetOutput())
global sphereActor, textActor
if sphereActor != None:
render.RemoveActor(sphereActor)
sphereActor = vtk.vtkActor()
sphereActor.SetMapper(sphereMapper)
sphereActor.GetProperty().SetColor(255, 0, 0)
render.AddActor(sphereActor)
render.Render()
if textActor != None:
render.RemoveActor(textActor)
textActor = vtk.vtkTextActor()
textActor.SetInput('world coordinate: (%.2f, %.2f, %.2f)'%(worldPoint[0], worldPoint[1], worldPoint[2]))
textActor.GetTextProperty().SetColor(1, 0, 0)
textActor.GetTextProperty().SetFontSize(15)
render.AddActor(textActor)
img = np.zeros(shape=[128, 128])
for i in range(128):
for j in range(128):
img[i, j] = i+j
vtkImg = numpyToVTK(img)
imgActor = vtk.vtkImageActor()
imgActor.SetInputData(vtkImg)
render = vtk.vtkRenderer()
render.AddActor(imgActor)
# render.Render()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(render)
renWin.Render()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())
iren.Initialize()
iren.AddObserver('MouseMoveEvent', mouseMoveEvent)
iren.Start()
In the above code, if I don't rotate the image, the world point is (x, y, 0):
And it is agree with what I know. For the world point (x, y, z) and the image index (i, j, k), the conversion should be:
worldPoint (x,y,z) = i*spacingX*directionX + j*spacingY*directionY + k*spacingZ*directionZ + originPoint
In the above code, the image is converted from numpy, thus:
directionX = [1, 0, 0]
directionY = [0, 1, 0]
directionZ = [0, 0, 1]
originPoint=[0, 0, 0]
spacingX=1
spacingY=1
spacingZ=1
In this way, x=i, y=j, z=k. Since this image is a 2D image, the k should be 0 and 'z' should also be 0.
Then, I rotate the image, z is not 0. Like the following picture.
I don't know why z is -0.24.
It means the following conversion is wrong. And how can I obtain the image index by the world point?
worldPoint (x,y,z) = i*spacingX*directionX + j*spacingY*directionY + k*spacingZ*directionZ + originPoint
Any suggestion is appreciated!
vtkImageData has the method TransformPhysicalPointToContinuousIndex for going from world space to image space and TransformIndexToPhysicalPoint to go the other way.
I don't think the computation you're doing is right, since direction is 3x3 rotation matrix.

Move Robot in a 10 X 10 grid

I am working on this code challenge:
Given a 2D bot/robot which can only move in four directions, move forward which is UP(U), move backward which is DOWN(D), LEFT(L), RIGHT(R) in a 10x10 grid. The robot can't go beyond the 10x10 area.
Given a string consisting of instructions to move.
Output the coordinates of a robot after executing the instructions. Initial position of robot is at origin(0, 0).
Example:
Input : move = “UDDLRL”
Output : (-1, -1)
Explanation:
Move U : (0, 0)–(0, 1)
Move D : (0, 1)–(0, 0)
Move D : (0, 0)–(0, -1)
Move L : (0, -1)–(-1, -1)
Move R : (-1, -1)–(0, -1)
Move L : (0, -1)–(-1, -1)
Therefore final position after the complete
movement is: (-1, -1)
I got the code working without using the 10x10 grid information. How could I incorporate the 10x10 grid information into my solution in an OOP fashion? My solution doesn't follow the OOP principles.
# function to find final position of
# robot after the complete movement
def finalPosition(move):
l = len(move)
countUp, countDown = 0, 0
countLeft, countRight = 0, 0
# traverse the instruction string 'move'
for i in range(l):
# for each movement increment its respective counter
if (move[i] == 'U'):
countUp += 1
elif(move[i] == 'D'):
countDown += 1
elif(move[i] == 'L'):
countLeft += 1
elif(move[i] == 'R'):
countRight += 1
# required final position of robot
print("Final Position: (", (countRight - countLeft),
", ", (countUp - countDown), ")")
# Driver code
if __name__ == '__main__':
move = "UDDLLRUUUDUURUDDUULLDRRRR"
finalPosition(move)
This fixes it:
class Robot:
class Mover:
def __init__(self, x, y):
self.x, self.y = x, y
def new_pos(self, x, y):
new_x = x + self.x
new_y = y + self.y
if (new_x > 9 or new_y > 9):
raise ValueError("Box dimensions are greater than 10 X 10")
return new_x, new_y
WALKS = dict(U=Mover(0, 1), D=Mover(0, -1),
L=Mover(-1, 0), R=Mover(1, 0))
def move(self, moves):
x = y = 0
for id in moves:
x, y = self.WALKS[id].new_pos(x, y)
return (x,y)
if __name__ == '__main__':
moves2 = "UDDLLRUUUDUURUDDUULLDRRRR"
robot = Robot()
print(robot.move(moves2))
Output :
(2,3)
The way you use your counters makes it less trivial to detect that you would hit the border of the 10x10 grid. Without changing too much, you could replace the countUp and countDown variables by one countVertical variable, and add -1 to it when going up and 1 when going down. Then ignore a move if it would make that counter negative or greater than 9. And obviously you would do the same for horizontal movements.
[Edit: After the edit to your question, it turns out that you want the Y-coordinate to be opposite to what I assumed above. So I have changed the sign of the Y-coordinate updates (+1, -1).]
That's really it.
Now to make this more OOP, you could define a Robot class, which would maintain its x and y coordinate. Anyhow it would be good to remove the print call out of your function, so the function only deals with the movements, not with the reporting (separation of concern).
Here is how it could work:
class Robot:
def __init__(self, x=0, y=0):
self.position(x, y)
def position(self, x, y):
self.x = min(9, max(0, x))
self.y = min(9, max(0, y))
def move(self, moves):
for move in moves:
if move == 'U':
self.position(self.x, self.y + 1)
elif move == 'D':
self.position(self.x, self.y - 1)
elif move == 'L':
self.position(self.x - 1, self.y)
elif move == 'R':
self.position(self.x + 1, self.y)
else:
raise ValueError(f"Invalid direction '{move}'")
if __name__ == '__main__':
moves = "UDDLLRUUUDUURUDDUULLDRRRR"
robot = Robot(0, 0)
robot.move(moves)
print(f"Final position: {robot.x}, {robot.y}")

When ı compile yolov3 ı get take warnings

"""YOLO v3 output
"""
import numpy as np
import keras.backend as K
from keras.models import load_model
import os
class YOLO:
def __init__(self, obj_threshold, nms_threshold):
"""Init.
# Arguments
obj_threshold: Integer, threshold for object.
nms_threshold: Integer, threshold for box.
"""
self._t1 = obj_threshold
self._t2 = nms_threshold
self._yolo = load_model('data/yolo.h5')
def _process_feats(self, out, anchors, mask):
"""process output features.
# Arguments
out: Tensor (N, N, 3, 4 + 1 +80), output feature map of yolo.
anchors: List, anchors for box.
mask: List, mask for anchors.
# Returns
boxes: ndarray (N, N, 3, 4), x,y,w,h for per box.
box_confidence: ndarray (N, N, 3, 1), confidence for per box.
box_class_probs: ndarray (N, N, 3, 80), class probs for per box.
"""
grid_h, grid_w, num_boxes = map(int, out.shape[1: 4])
anchors = [anchors[i] for i in mask]
# Reshape to batch, height, width, num_anchors, box_params.
anchors_tensor = K.reshape(K.variable(anchors),
[1, 1,len(anchors), 2])
out = out[0]
box_xy = K.get_value(K.sigmoid(out[..., :2]))
box_wh = K.get_value(K.exp(out[..., 2:4]) * anchors_tensor)
box_confidence = K.get_value(K.sigmoid(out[..., 4]))
box_confidence = np.expand_dims(box_confidence, axis=-1)
box_class_probs = K.get_value(K.sigmoid(out[..., 5:]))
col = np.tile(np.arange(0, grid_w), grid_w).reshape(-1, grid_w)
row = np.tile(np.arange(0, grid_h).reshape(-1, 1), grid_h)
col = col.reshape(grid_h, grid_w, 1, 1).repeat(3, axis=-2)
row = row.reshape(grid_h, grid_w, 1, 1).repeat(3, axis=-2)
grid = np.concatenate((col, row), axis=-1)
box_xy += grid
box_xy /= (grid_w, grid_h)
box_wh /= (416, 416)
box_xy -= (box_wh / 2.)
boxes = np.concatenate((box_xy, box_wh), axis=-1)
return boxes, box_confidence, box_class_probs
def _filter_boxes(self, boxes, box_confidences, box_class_probs):
"""Filter boxes with object threshold.
# Arguments
boxes: ndarray, boxes of objects.
box_confidences: ndarray, confidences of objects.
box_class_probs: ndarray, class_probs of objects.
# Returns
boxes: ndarray, filtered boxes.
classes: ndarray, classes for boxes.
scores: ndarray, scores for boxes.
"""
box_scores = box_confidences * box_class_probs
box_classes = np.argmax(box_scores, axis=-1)
box_class_scores = np.max(box_scores, axis=-1)
pos = np.where(box_class_scores >= self._t1)
boxes = boxes[pos]
classes = box_classes[pos]
scores = box_class_scores[pos]
return boxes, classes, scores
def _nms_boxes(self, boxes, scores):
"""Suppress non-maximal boxes.
# Arguments
boxes: ndarray, boxes of objects.
scores: ndarray, scores of objects.
# Returns
keep: ndarray, index of effective boxes.
"""
x = boxes[:, 0]
y = boxes[:, 1]
w = boxes[:, 2]
h = boxes[:, 3]
areas = w * h
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x[i], x[order[1:]])
yy1 = np.maximum(y[i], y[order[1:]])
xx2 = np.minimum(x[i] + w[i], x[order[1:]] + w[order[1:]])
yy2 = np.minimum(y[i] + h[i], y[order[1:]] + h[order[1:]])
w1 = np.maximum(0.0, xx2 - xx1 + 1)
h1 = np.maximum(0.0, yy2 - yy1 + 1)
inter = w1 * h1
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= self._t2)[0]
order = order[inds + 1]
keep = np.array(keep)
return keep
def _yolo_out(self, outs, shape):
"""Process output of yolo base net.
# Argument:
outs: output of yolo base net.
shape: shape of original image.
# Returns:
boxes: ndarray, boxes of objects.
classes: ndarray, classes of objects.
scores: ndarray, scores of objects.
"""
masks = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
anchors = [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45],
[59, 119], [116, 90], [156, 198], [373, 326]]
boxes, classes, scores = [], [], []
for out, mask in zip(outs, masks):
b, c, s = self._process_feats(out, anchors, mask)
b, c, s = self._filter_boxes(b, c, s)
boxes.append(b)
classes.append(c)
scores.append(s)
boxes = np.concatenate(boxes)
classes = np.concatenate(classes)
scores = np.concatenate(scores)
# Scale boxes back to original image shape.
width, height = shape[1], shape[0]
image_dims = [width, height, width, height]
boxes = boxes * image_dims
nboxes, nclasses, nscores = [], [], []
for c in set(classes):
inds = np.where(classes == c)
b = boxes[inds]
c = classes[inds]
s = scores[inds]
keep = self._nms_boxes(b, s)
nboxes.append(b[keep])
nclasses.append(c[keep])
nscores.append(s[keep])
if not nclasses and not nscores:
return None, None, None
boxes = np.concatenate(nboxes)
classes = np.concatenate(nclasses)
scores = np.concatenate(nscores)
return boxes, classes, scores
def predict(self, image, shape):
"""Detect the objects with yolo.
# Arguments
image: ndarray, processed input image.
shape: shape of original image.
# Returns
boxes: ndarray, boxes of objects.
classes: ndarray, classes of objects.
scores: ndarray, scores of objects.
"""
outs = self._yolo.predict(image)
boxes, classes, scores = self._yolo_out(outs, shape)
return boxes, classes, scores
This is the yolo v3 code and when ı work main program ı take this error
InvalidArgumentError: Incompatible shapes: [13,13,2] vs. [1,1,3,2] [Op:Mul]
Main part is
import cv2
import numpy as np
from yolo_model import YOLO
yolo = YOLO(0.6, 0.5)
file = "data/coco_classes.txt"
with open(file) as f:
class_name = f.readlines()
all_classes = [c.strip() for c in class_name]
print("A")
f = "dog_cat.jpg"
path = "images/"+f
image = cv2.imread(path)
cv2.imshow("image",image)
pimage = cv2.resize(image, (416,416))
pimage = np.array(pimage, dtype = "float32")
pimage /= 255.0
pimage = np.expand_dims(pimage, axis = 0)
# yolo
boxes, classes, scores = yolo.predict(pimage, image.shape)
for box, score, cl in zip(boxes, scores, classes):
x,y,w,h = box
top = max(0, np.floor(x + 0.5).astype(int))
left = max(0, np.floor(y + 0.5).astype(int))
right = max(0, np.floor(x + w + 0.5).astype(int))
bottom = max(0, np.floor(y + h + 0.5).astype(int))
cv2.rectangle(image, (top,left), (right, bottom),(255,0,0),2)
cv2.putText(image, "{} {}".format(all_classes[cl],score),(top,left-6),cv2.FONT_HERSHEY_SIMPLEX,0.6, (0,0,255),1,cv2.LINE_AA)
cv2.imshow("yolo",image)
I take problem in box_wh = K.get_value(K.exp(out[..., 2:4]) * anchors_tensor). Is multiply necessary? And what do box_wh?

What is the best way to extract text contained within a table in a pdf using python?

I'm constructing a program to extract text from a pdf, put it in a structured format, and send it off to a database. I have roughly 1,400 individual pdfs that all follow a similar format, but nuances in the verbiage and plan designs that the documents summarize make it tricky.
I've played around with a couple different pdf readers in python including tabula-py and pdfminer but none of them are quite getting to what I'd like to do. Tabula reads in all of the text very well, however it pulls everything as it explicitly lays horizontally, excluding the fact that some of the text is wrapped in a box. For example, if you open up the sample SBC I have attached where it reads "What is the overall deductible?" Tabula will read in "What is the overall $500/Individual or..." skipping the fact that the word "deductible" is really part of the first sentence. (Note the files I'm working with are pdfs but I've attached a jpeg because I couldn't figure out how to attach a pdf.)
import tabula
df = tabula.read_pdf(*filepath*, pandas_options={'header': None))
print(df.iloc[0][0])
print(df)
In the end, I'd really like to be able to parse out the text within each box so that I can better identify what values belong to deductible, out-of-pocket limts, copays/coinsurance, etc. I thought possibly some sort of OCR would allow me to recognize which parts of the PDF are contained in the blue rectangles and then pull the string from there, but I really don't know where to start with that.Sample SBC
#jpnadas In this case the code you copied from my answer in this post isn't really suitable because it addresses the case when a table doesn't have surrounding grid. That algorithm looks for repeating blocks of texts and tries to find a pattern that resembles a table heuristically.
But in this particular case the table does have the grid and by taking this advantage we can achieve a lot more accurate result.
The strategy is the following:
Increase image gamma to make the grid darker
Get rid of colour and apply Otsu thresholding
Find long vertical an horizontal lines in the image and create a mask from it using erode and dilate functions
Find the cell blocks in the mask using findContours function.
Find table objects
5.1 The rest can be as in the post about finding a table without the
grid: find table structure heuristically
5.2 Alternative approach could be using hierarchy returned by the findContours function. This approach is even more accurate and
allows to find multiple tables on a single image.
Having cell coordinates it's easy to extract certain cell image from the original image:
cell_image = image[cell_y:cell_y + cell_h, cell_x:cell_x + cell_w]
Apply OCR to each cell_image.
BUT! I consider the OpenCV approach as a last resort when you're not able to read the PDF's contents: for instance in case when a PDF contains raster image inside.
If it's a vector-based PDF and its contents are readable it makes more sense to find the table inside contents and just read the text from it instead of doing heavy 'OCR lifting'.
Here's the code for reference for more accurate table recognition:
import os
import imutils
import numpy as np
import argparse
import cv2
def gamma_correction(image, gamma = 1.0):
look_up_table = np.empty((1,256), np.uint8)
for i in range(256):
look_up_table[0,i] = np.clip(pow(i / 255.0, gamma) * 255.0, 0, 255)
result = cv2.LUT(image, look_up_table)
return result
def pre_process_image(image):
# Let's get rid of color first
# Applying gamma to make the table lines darker
gamma = gamma_correction(image, 2)
# Getting rid of color
gray = cv2.cvtColor(gamma, cv2.COLOR_BGR2GRAY)
# Then apply Otsu threshold to reveal important areas
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# inverting the thresholded image
return ~thresh
def get_horizontal_lines_mask(image, horizontal_size=100):
horizontal = image.copy()
horizontal_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (horizontal_size, 1))
horizontal = cv2.erode(horizontal, horizontal_structure, anchor=(-1, -1), iterations=1)
horizontal = cv2.dilate(horizontal, horizontal_structure, anchor=(-1, -1), iterations=1)
return horizontal
def get_vertical_lines_mask(image, vertical_size=100):
vertical = image.copy()
vertical_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (1, vertical_size))
vertical = cv2.erode(vertical, vertical_structure, anchor=(-1, -1), iterations=1)
vertical = cv2.dilate(vertical, vertical_structure, anchor=(-1, -1), iterations=1)
return vertical
def make_lines_mask(preprocessed, min_horizontal_line_size=100, min_vertical_line_size=100):
hor = get_horizontal_lines_mask(preprocessed, min_horizontal_line_size)
ver = get_vertical_lines_mask(preprocessed, min_vertical_line_size)
mask = np.zeros((preprocessed.shape[0], preprocessed.shape[1], 1), dtype=np.uint8)
mask = cv2.bitwise_or(mask, hor)
mask = cv2.bitwise_or(mask, ver)
return ~mask
def find_cell_boxes(mask):
# Looking for the text spots contours
# OpenCV 3
# img, contours, hierarchy = cv2.findContours(pre, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# OpenCV 4
contours = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
image_width = mask.shape[1]
# Getting the texts bounding boxes based on the text size assumptions
boxes = []
for contour in contours:
box = cv2.boundingRect(contour)
w = box[2]
# Excluding the page box shape but adding smaller boxes
if w < 0.95 * image_width:
boxes.append(box)
return boxes
def find_table_in_boxes(boxes, cell_threshold=10, min_columns=2):
rows = {}
cols = {}
# Clustering the bounding boxes by their positions
for box in boxes:
(x, y, w, h) = box
col_key = x // cell_threshold
row_key = y // cell_threshold
cols[row_key] = [box] if col_key not in cols else cols[col_key] + [box]
rows[row_key] = [box] if row_key not in rows else rows[row_key] + [box]
# Filtering out the clusters having less than 2 cols
table_cells = list(filter(lambda r: len(r) >= min_columns, rows.values()))
# Sorting the row cells by x coord
table_cells = [list(sorted(tb)) for tb in table_cells]
# Sorting rows by the y coord
table_cells = list(sorted(table_cells, key=lambda r: r[0][1]))
return table_cells
def build_vertical_lines(table_cells):
if table_cells is None or len(table_cells) <= 0:
return [], []
max_last_col_width_row = max(table_cells, key=lambda b: b[-1][2])
max_x = max_last_col_width_row[-1][0] + max_last_col_width_row[-1][2]
max_last_row_height_box = max(table_cells[-1], key=lambda b: b[3])
max_y = max_last_row_height_box[1] + max_last_row_height_box[3]
hor_lines = []
ver_lines = []
for box in table_cells:
x = box[0][0]
y = box[0][1]
hor_lines.append((x, y, max_x, y))
for box in table_cells[0]:
x = box[0]
y = box[1]
ver_lines.append((x, y, x, max_y))
(x, y, w, h) = table_cells[0][-1]
ver_lines.append((max_x, y, max_x, max_y))
(x, y, w, h) = table_cells[0][0]
hor_lines.append((x, max_y, max_x, max_y))
return hor_lines, ver_lines
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path to images directory")
args = vars(ap.parse_args())
in_file = args["image"]
filename_base = in_file.replace(os.path.splitext(in_file)[1], "")
img = cv2.imread(in_file)
pre_processed = pre_process_image(img)
# Visualizing pre-processed image
cv2.imwrite(filename_base + ".pre.png", pre_processed)
lines_mask = make_lines_mask(pre_processed, min_horizontal_line_size=1800, min_vertical_line_size=500)
# Visualizing table lines mask
cv2.imwrite(filename_base + ".mask.png", lines_mask)
cell_boxes = find_cell_boxes(lines_mask)
cells = find_table_in_boxes(cell_boxes)
# apply OCR to each cell rect here
# the cells array contains cell coordinates in tuples (x, y, w, h)
hor_lines, ver_lines = build_vertical_lines(cells)
# Visualize the table lines
vis = img.copy()
for line in hor_lines:
[x1, y1, x2, y2] = line
cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255), 1)
for line in ver_lines:
[x1, y1, x2, y2] = line
cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255), 1)
cv2.imwrite(filename_base + ".result.png", vis)
Some parameters are hard-coded:
page size threshold - 0.95
min horizontal line size - 1800 px
min vertical line size - 500 px
You can provide them as configurable parameters or make them relative to image size.
Results:
I think that the best way to do what you need is to find and isolate the cells in the file and then apply OCR to each individual cell.
There are a number of solutions in SO for that, I got the code from this answer and played around a little with the parameters to get the output below (not perfect yet, but you can tweak it a little bit yourself).
import os
import cv2
import imutils
# This only works if there's only one table on a page
# Important parameters:
# - morph_size
# - min_text_height_limit
# - max_text_height_limit
# - cell_threshold
# - min_columns
def pre_process_image(img, save_in_file, morph_size=(23, 23)):
# get rid of the color
pre = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Otsu threshold
pre = cv2.threshold(pre, 250, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
# dilate the text to make it solid spot
cpy = pre.copy()
struct = cv2.getStructuringElement(cv2.MORPH_RECT, morph_size)
cpy = cv2.dilate(~cpy, struct, anchor=(-1, -1), iterations=1)
pre = ~cpy
if save_in_file is not None:
cv2.imwrite(save_in_file, pre)
return pre
def find_text_boxes(pre, min_text_height_limit=20, max_text_height_limit=120):
# Looking for the text spots contours
contours, _ = cv2.findContours(pre, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# Getting the texts bounding boxes based on the text size assumptions
boxes = []
for contour in contours:
box = cv2.boundingRect(contour)
h = box[3]
if min_text_height_limit < h < max_text_height_limit:
boxes.append(box)
return boxes
def find_table_in_boxes(boxes, cell_threshold=100, min_columns=3):
rows = {}
cols = {}
# Clustering the bounding boxes by their positions
for box in boxes:
(x, y, w, h) = box
col_key = x // cell_threshold
row_key = y // cell_threshold
cols[row_key] = [box] if col_key not in cols else cols[col_key] + [box]
rows[row_key] = [box] if row_key not in rows else rows[row_key] + [box]
# Filtering out the clusters having less than 2 cols
table_cells = list(filter(lambda r: len(r) >= min_columns, rows.values()))
# Sorting the row cells by x coord
table_cells = [list(sorted(tb)) for tb in table_cells]
# Sorting rows by the y coord
table_cells = list(sorted(table_cells, key=lambda r: r[0][1]))
return table_cells
def build_lines(table_cells):
if table_cells is None or len(table_cells) <= 0:
return [], []
max_last_col_width_row = max(table_cells, key=lambda b: b[-1][2])
max_x = max_last_col_width_row[-1][0] + max_last_col_width_row[-1][2]
max_last_row_height_box = max(table_cells[-1], key=lambda b: b[3])
max_y = max_last_row_height_box[1] + max_last_row_height_box[3]
hor_lines = []
ver_lines = []
for box in table_cells:
x = box[0][0]
y = box[0][1]
hor_lines.append((x, y, max_x, y))
for box in table_cells[0]:
x = box[0]
y = box[1]
ver_lines.append((x, y, x, max_y))
(x, y, w, h) = table_cells[0][-1]
ver_lines.append((max_x, y, max_x, max_y))
(x, y, w, h) = table_cells[0][0]
hor_lines.append((x, max_y, max_x, max_y))
return hor_lines, ver_lines
if __name__ == "__main__":
in_file = os.path.join(".", "test.jpg")
pre_file = os.path.join(".", "pre.png")
out_file = os.path.join(".", "out.png")
img = cv2.imread(os.path.join(in_file))
pre_processed = pre_process_image(img, pre_file)
text_boxes = find_text_boxes(pre_processed)
cells = find_table_in_boxes(text_boxes)
hor_lines, ver_lines = build_lines(cells)
# Visualize the result
vis = img.copy()
# for box in text_boxes:
# (x, y, w, h) = box
# cv2.rectangle(vis, (x, y), (x + w - 2, y + h - 2), (0, 255, 0), 1)
for line in hor_lines:
[x1, y1, x2, y2] = line
cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255), 1)
for line in ver_lines:
[x1, y1, x2, y2] = line
cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255), 1)
cv2.imwrite(out_file, vis)

Resources