how to calculate the skew slopes of the skewed document using connnected components opencv python? - python-3.x

I am using Connected components(NN) method to detect and correct the skew document.I have an image of skew document.I have done the following steps :
1.document image preprocessing.
2.elegible connected components
def imshow(image1):
plt.figure(figsize=(20,10)
plt.imshow(image1)
output = cv2.connectedComponentsWithStats(invr_binary, connectivity, cv2.CV_32S)
(numLabels, labels, stats, centroids) = output
## non text removal
w_avg=stats[1:, cv2.CC_STAT_WIDTH].mean()
h_avg=stats[1: , cv2.CC_STAT_HEIGHT].mean()
B_max=(w_avg * h_avg) * 4
B_min=(w_avg * h_avg) * 0.25
result = np.zeros((labels.shape), np.uint8)
output1=image.copy()
a, b=0.6, 2
for i in range(0, numLabels - 1):
area=stats[i, cv2.CC_STAT_AREA]
if area>B_min and area<B_max: ## non text removal
result[labels == i + 1] = 255
x = stats[i, cv2.CC_STAT_LEFT]
y = stats[i, cv2.CC_STAT_TOP]
w = stats[i, cv2.CC_STAT_WIDTH]
h = stats[i, cv2.CC_STAT_HEIGHT]
area = stats[i, cv2.CC_STAT_AREA]
(cX, cY) = centroids[i]
c=w/h
if a<c and c<b: ## A and C type filtering
result[labels == i + 1] = 255
cv2.rectangle(output1, (x, y), (x + w, y + h), (0, 255, 0), 1)
cv2.circle(output1, (int(cX), int(cY)), 1, (0, 0, 255), -1)
imshow(output1)
input image :
output image :
After finding the center points of the text which is shown in the output image.Now is the next step skew slop calculation. But I could not understand that how to calculate that.I am using that research papers link :3.3(page no. 7)
https://www.mdpi.com/2079-9292/9/1/55/pdf

Related

detect text in passport images with opencv

I am detecting text from various passport images with OpenCV. The task is to get the cropped text present on passports like Name, DOB, Nationality, etc. The current code is given below:
image = cv2.imread(image) # read image
image = imutils.resize(image, height=600)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (3, 3), 0)
rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (13, 5))
sqKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (21, 21))
blackhat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT, rectKernel)
gradX = cv2.Sobel(blackhat, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)
gradX = np.absolute(gradX)
(minVal, maxVal) = (np.min(gradX), np.max(gradX))
gradX = (255 * ((gradX - minVal) / (maxVal - minVal))).astype("uint8")
gradX = cv2.morphologyEx(gradX, cv2.MORPH_CLOSE, rectKernel)
thresh = cv2.threshold(gradX, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
if do_erosion: # do erosion if flag is True only
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, sqKernel)
thresh = cv2.erode(thresh, None, iterations=4)
p = int(image.shape[1] * 0.05)
thresh[:, 0:p] = 0
thresh[:, image.shape[1] - p:] = 0 # thresholded image shown below
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE) # finding contours from threshold image
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
rois=[]
# loop over the contours
for c in cnts:
# compute the bounding box of the contour and use the contour to
# compute the aspect ratio and coverage ratio of the bounding box
# width to the width of the image
(x, y, w, h) = cv2.boundingRect(c)
ar = w / float(h)
crWidth = w / float(gray.shape[1])
# check to see if the aspect ratio and coverage width are within
# acceptable criteria
if ar > 2 and crWidth > 0.05:
# pad the bounding box since we applied erosions and now need
# to re-grow it
pX = int((x + w) * 0.03)
pY = int((y + h) * 0.03)
(x, y) = (x - pX, y - pY)
(w, h) = (w + (pX * 2), h + (pY * 2))
# extract the ROI from the image and draw a bounding box
# surrounding the MRZ
roi = original[y:y + h, x:x + w].copy() # interested in finding all ROIs having text
rois.append(roi)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
I am detecting some textual values correctly but the solution is not generic. It is sometimes extracting only the year of DOB but not the full date (sometimes it detects month and day as well but not in the same ROI). The original and threshold image is shown below:
As you can see, in the "ROIs found image" some ROIs are not being detected in the same image like DOB and date of expiry. Any help in extracting all the text regions from the passport will be highly appreciated. Thanks!
You need to tweak the Aspect Ratio and Coverage ratio thresholds to obtain all the desired bounding boxes.
When I run your code, for 05, the value of ar is 1.541 and value of crWidth is 0.03. Since these values are less than the thresholds you have specified, they are getting filtered out. This is why some of the words do not have bounding boxes in the final image.
But, since you want to get the entire DOB in a single bounding box, just before the line thresh[:, 0:p] = 0, you can apply a dilation operation:
thresh = cv2.morphologyEx(thresh, cv2.MORPH_DILATE, np.ones((1, 30), np.uint8)).
This will dilate the pixels and combine blobs that are nearby horizontally. The resultant image after preprocessing is as follows -
The final image -

stylegan encoder print image is too small

I am trying to print a stylemix encoder image however my printed images are too small, I am not sure where am I doing wrong.
my latent space
jon = np.load('latent_representations/example0.npy')
drogo = np.load('latent_representations/example1.npy')
# Loading already learned latent directions
smile_direction = np.load('ffhq_dataset/latent_directions/smile.npy')
gender_direction = np.load('ffhq_dataset/latent_directions/gender.npy')
age_direction = np.load('ffhq_dataset/latent_directions/age.npy'
)
my draw style mix loop
def draw_style_mixing_figure(png, Gs, w, h, src_dlatents, dst_dlatents, style_ranges):
print(png)
#src_dlatents = Gs.components.mapping.run(src_latents, None) # [seed, layer, component]
#dst_dlatents = Gs.components.mapping.run(dst_latents, None)
src_images = Gs.components.synthesis.run(src_dlatents, randomize_noise=False, **synthesis_kwargs)
dst_images = Gs.components.synthesis.run(dst_dlatents, randomize_noise=False, **synthesis_kwargs)
canvas = PIL.Image.new('RGB', (w * (len(src_dlatents) + 1), h * (len(dst_dlatents) + 1)), 'white')
for col, src_image in enumerate(list(src_images)):
canvas.paste(PIL.Image.fromarray(src_image, 'RGB'), ((col + 1) * w, 0))
for row, dst_image in enumerate(list(dst_images)):
canvas.paste(PIL.Image.fromarray(dst_image, 'RGB'), (0, (row + 1) * h))
row_dlatents = np.stack([dst_dlatents[row]] * len(src_dlatents))
row_dlatents[:, style_ranges[row]] = src_dlatents[:, style_ranges[row]]
row_images = Gs.components.synthesis.run(row_dlatents, randomize_noise=False, **synthesis_kwargs)
for col, image in enumerate(list(row_images)):
canvas.paste(PIL.Image.fromarray(image, 'RGB'), ((col + 1) * w, (row + 1) * h))
canvas.save(png)
return canvas.resize((512,512))
my printing image order
tflib.init_tf()
synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True), minibatch_size=1)
_Gs_cache = dict()
draw_style_mixing_figure(os.path.join(config.result_dir, 'style-mixing.png'), Gs, w=1024, h=1024, src_dlatents=jon.reshape((1, 12, 512)), dst_dlatents=drogo.reshape((1, 12, 512)), style_ranges=[range(1,1)]),
But resulting pictures are too small
any idea how to make them bigger?

Delay in output video stream when using YOLOV3 Detection using OpenCV

This is my Code of Mask Detection using YOLOV3 weights created by me. Whenever I run my Program, I experience a delay in my output Video of detection. This is the code please have a look.
import cv2
import numpy as np
net = cv2.dnn.readNet("yolov3_custom_final.weights", "yolov3_custom.cfg")
with open("obj.name", "r") as f:
classes = f.read().splitlines()
cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW)
while True:
ret, img = cap.read()
height, weight, _ = img.shape
blob = cv2.dnn.blobFromImage(img, 1 / 255, (416, 416), (0, 0, 0), swapRB=True, crop=False)
net.setInput(blob)
output = net.getUnconnectedOutLayersNames()
layers = net.forward(output)
box = []
confidences = []
class_ids = []
for out in layers:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.3:
centre_x = int(detection[0] * weight)
centre_y = int(detection[1] * height)
w = int(detection[2] * weight)
h = int(detection[3] * height)
x = int(centre_x - w / 2)
y = int(centre_y - h / 2)
box.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = np.array(cv2.dnn.NMSBoxes(box, confidences, 0.5, 0.4))
font = cv2.FONT_HERSHEY_PLAIN
colors = np.random.uniform(0, 255, size=(len(box), 3))
for i in indexes.flatten():
x, y, w, h = box[i]
label = str(classes[class_ids[i]])
confidence = str(round(confidences[i], 2))
color = colors[i]
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
cv2.putText(img, label + "I" + confidence, (x, y + 20), font, 2, (255, 255, 255), 2)
cv2.imshow("Final", img)
if cv2.waitKey(1) & 0xff == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
Can someone Please help me in this Issue or suggest a way to reduce the Lag in my Output videostream ?
As I have done some research over the Time, I have a found a Possible answer to this question. As I'm running my YOLO model in my local system which has no GPU, This is the factor that is causing a delay in the Output as it Processes a frame and takes another frame after completion.

Mapping RGB data to values in legend

This is a follow-up to my previous question here
I've been trying to convert the color data in a heatmap to RGB values.
source image
In the below image, to the left is a subplot present in panel D of the source image. This has 6 x 6 cells (6 rows and 6 columns). On the right, we see the binarized image, with white color highlighted in the cell that is clicked after running the code below. The input for running the code is the below image. The ouput is(mean = [ 27.72 26.83 144.17])is the mean of BGR color in the cell that is highlighted in white on the right image below.
A really nice solution that was provided as an answer to my previous question is the following (ref)
import cv2
import numpy as np
# print pixel value on click
def mouse_callback(event, x, y, flags, params):
if event == cv2.EVENT_LBUTTONDOWN:
# get specified color
row = y
column = x
color = image[row, column]
print('color = ', color)
# calculate range
thr = 20 # ± color range
up_thr = color + thr
up_thr[up_thr < color] = 255
down_thr = color - thr
down_thr[down_thr > color] = 0
# find points in range
img_thr = cv2.inRange(image, down_thr, up_thr) # accepted range
height, width, _ = image.shape
left_bound = x - (x % round(width/6))
right_bound = left_bound + round(width/6)
up_bound = y - (y % round(height/6))
down_bound = up_bound + round(height/6)
img_rect = np.zeros((height, width), np.uint8) # bounded by rectangle
cv2.rectangle(img_rect, (left_bound, up_bound), (right_bound, down_bound), (255,255,255), -1)
img_thr = cv2.bitwise_and(img_thr, img_rect)
# get points around specified point
img_spec = np.zeros((height, width), np.uint8) # specified mask
last_img_spec = np.copy(img_spec)
img_spec[row, column] = 255
kernel = np.ones((3,3), np.uint8) # dilation structuring element
while cv2.bitwise_xor(img_spec, last_img_spec).any():
last_img_spec = np.copy(img_spec)
img_spec = cv2.dilate(img_spec, kernel)
img_spec = cv2.bitwise_and(img_spec, img_thr)
cv2.imshow('mask', img_spec)
cv2.waitKey(10)
avg = cv2.mean(image, img_spec)[:3]
mean.append(np.around(np.array(avg), 2))
print('mean = ', np.around(np.array(avg), 2))
# print(mean) # appends data to variable mean
if __name__ == '__main__':
mean = [] #np.zeros((6, 6))
# create window and callback
winname = 'img'
cv2.namedWindow(winname)
cv2.setMouseCallback(winname, mouse_callback)
# read & display image
image = cv2.imread('ip2.png', 1)
#image = image[3:62, 2:118] # crop the image to 6x6 cells
#---- resize image--------------------------------------------------
# appended this to the original code
print('Original Dimensions : ', image.shape)
scale_percent = 220 # percent of original size
width = int(image.shape[1] * scale_percent / 100)
height = int(image.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
image = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)
# ----------------------------------------------------------------------
cv2.imshow(winname, image)
cv2.waitKey() # press any key to exit
cv2.destroyAllWindows()
What do I want to do next?
The mean of the RGB values thus obtained has to be mapped to the values in the following legend provided in the source image,
I would like to ask for suggestions on how to map the RGB data to the values in the legend.
Note: In my previous post it has been suggested that one could
fit the RGB values into an equation which gives continuous results.
Any suggestions in this direction will also be helpful.
EDIT:
Answering the comment below
I did the following to measure the RGB values of legend
Input image:
This image has 8 cells in columns width and 1 cell in rows height
Changed these lines of code:
left_bound = x - (x % round(width/8)) # 6 replaced with 8
right_bound = left_bound + round(width/8) # 6 replaced with 8
up_bound = y - (y % round(height/1)) # 6 replaced with 1
down_bound = up_bound + round(height/1) # 6 replaced with 1
Mean obtained for each cell/ each color in legend from left to right:
mean = [ 82.15 174.95 33.66]
mean = [45.55 87.01 17.51]
mean = [8.88 8.61 5.97]
mean = [16.79 17.96 74.46]
mean = [ 35.59 30.53 167.14]
mean = [ 37.9 32.39 233.74]
mean = [120.29 118. 240.34]
mean = [238.33 239.56 248.04]
You can try to apply piece wise approach, make pair wise transitions between colors:
c[i->i+1](t)=t*(R[i+1],G[i+1],B[i+1])+(1-t)*(R[i],G[i],B[i])
Do the same for these values:
val[i->i+1](t)=t*val[i+1]+(1-t)*val[i]
Where i - index of color in legend scale, t - parameter in [0:1] range.
So, you have continuous mapping of 2 values, and just need to find color parameters i and t closest to sample and find value from mapping.
Update:
To find the color parameters you can think about every pair of neighbour legend colors as a pair of 3d points, and your queried color as external 3d point. Now you just meed to find a length of perpendicular from the external point to a line, then, iterating over legend color pairs, find the shortest perpendicular (now you have i).
Then find intersection point of the perpendicular and the line. This point will be located at the distance A from line start and if line length is L then parameter value t=A/L.
Update2:
Simple brutforce solution to illustrate piece wise approach:
#include "opencv2/opencv.hpp"
#include <string>
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc, char* argv[])
{
Mat Image=cv::Mat::zeros(100,250,CV_32FC3);
std::vector<cv::Scalar> Legend;
Legend.push_back(cv::Scalar(82.15,174.95,33.66));
Legend.push_back(cv::Scalar(45.55, 87.01, 17.51));
Legend.push_back(cv::Scalar(8.88, 8.61, 5.97));
Legend.push_back(cv::Scalar(16.79, 17.96, 74.46));
Legend.push_back(cv::Scalar(35.59, 30.53, 167.14));
Legend.push_back(cv::Scalar(37.9, 32.39, 233.74));
Legend.push_back(cv::Scalar(120.29, 118., 240.34));
Legend.push_back(cv::Scalar(238.33, 239.56, 248.04));
std::vector<float> Values;
Values.push_back(-4);
Values.push_back(-2);
Values.push_back(0);
Values.push_back(2);
Values.push_back(4);
Values.push_back(8);
Values.push_back(16);
Values.push_back(32);
int w = 30;
int h = 10;
for (int i = 0; i < Legend.size(); ++i)
{
cv::rectangle(Image, Rect(i * w, 0, w, h), Legend[i]/255, -1);
}
std::vector<cv::Scalar> Smooth_Legend;
std::vector<float> Smooth_Values;
for (int i = 0; i < Legend.size()-1; ++i)
{
cv::Scalar c1 = Legend[i];
cv::Scalar c2 = Legend[i + 1];
float v1 = Values[i];
float v2 = Values[i+1];
for (int j = 0; j < w; ++j)
{
float t = (float)j / (float)w;
Scalar c = c2 * t + c1 * (1 - t);
float v = v2 * t + v1 * (1 - t);
float x = i * w + j;
line(Image, Point(x, h), Point(x, h + h), c/255, 1);
Smooth_Values.push_back(v);
Smooth_Legend.push_back(c);
}
}
Scalar qp = cv::Scalar(5, 0, 200);
float d_min = FLT_MAX;
int ind = -1;
for (int i = 0; i < Smooth_Legend.size(); ++i)
{
float d = cv::norm(qp- Smooth_Legend[i]);
if (d < d_min)
{
ind = i;
d_min = d;
}
}
std::cout << Smooth_Values[ind] << std::endl;
line(Image, Point(ind, 3 * h), Point(ind, 4 * h), Scalar::all(255), 2);
circle(Image, Point(ind, 4 * h), 3, qp/255,-1);
putText(Image, std::to_string(Smooth_Values[ind]), Point(ind, 70), FONT_HERSHEY_DUPLEX, 1, Scalar(0, 0.5, 0.5), 0.002);
cv::imshow("Legend", Image);
cv::imwrite("result.png", Image*255);
cv::waitKey();
}
The result:
Python:
import cv2
import numpy as np
height=100
width=250
Image = np.zeros((height, width,3), np.float)
legend = np.array([ (82.15,174.95,33.66),
(45.55,87.01,17.51),
(8.88,8.61,5.97),
(16.79,17.96,74.46),
( 35.59,0.53,167.14),
( 37.9,32.39,233.74),
(120.29,118.,240.34),
(238.33,239.56,248.04)], np.float)
values = np.array([-4,-2,0,2,4,8,16,32], np.float)
# width of cell, also defines number
# of one segment transituin subdivisions.
# Larger values will give more accuracy, but will woek slower.
w = 30
# Only fo displaying purpose. Height of bars in result image.
h = 10
# Plot legend cells ( to check correcrness only )
for i in range(len(legend)):
col=legend[i]
cv2.rectangle(Image, (i * w, 0, w, h), col/255, -1)
# Start form smoorhed scales for color and according values
Smooth_Legend=[]
Smooth_Values=[]
for i in range(len(legend)-1): # iterate known knots
c1 = legend[i] # start color point
c2 = legend[i + 1] # end color point
v1 = values[i] # start value
v2 = values[i+1] # emd va;ie
for j in range(w): # slide inside [start:end] interval.
t = float(j) / float(w) # map it to [0:1] interval
c = c2 * t + c1 * (1 - t) # transition between c1 and c2
v = v2 * t + v1 * (1 - t) # transition between v1 and v2
x = i * w + j # global scale coordinate (for drawing)
cv2.line(Image, (x, h), (x, h + h), c/255, 1) # draw one tick of smoothed scale
Smooth_Values.append(v) # append smoothed values for next step
Smooth_Legend.append(c) # append smoothed color for next step
# queried color
qp = np.array([5, 0, 200])
# initial value for minimal distance set to large value
d_min = 1e7
# index for clolor search
ind = -1
# search for minimal distance from queried color to smoothed scale color
for i in range(len(Smooth_Legend)):
# distance
d = cv2.norm(qp-Smooth_Legend[i])
if (d < d_min):
ind = i
d_min = d
# ind contains index of the closest color in smoothed scale
# and now we can extract according value from smoothed values scale
print(Smooth_Values[ind]) # value mapped to queried color.
# plot pointer (to check ourself)
cv2.line(Image, (ind, 3 * h), (ind, 4 * h), (255,255,255), 2);
cv2.circle(Image, (ind, 4 * h), 3, qp/255,-1);
cv2.putText(Image, str(Smooth_Values[ind]), (ind, 70), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0.5, 0.5), 1);
# show window
cv2.imshow("Legend", Image)
# save to file
cv2.imwrite("result.png", Image*255)
cv2.waitKey()

OpenCV: Segment each digit from the given image. Digits are written in each cell of a row matrix. Each cell is bounded by margins

I have been trying to recognise handwritten letters (digits/alphabet) from a form-document. As it is known that form-documents have 1d row cells, where the applicant has to fill their information within those bounded cells. However, I'm unable to segment the digits(currently my input consists only digits) from the bounding boxes.
I went through the following steps:
Reading the image (as a grayscale image) via "imread" method of opencv2. Initial Image size:19 x 209(in pixels).
pic = "crop/cropped000.jpg"
newImg = cv2.imread(pic, 0)
Resizing the image 200% its original size via "resize" method of opencv2. I used INTER_AREA Interpolation. Resized Image size: 38 x 418(in pixels)
h,w = newImg.shape
resizedImg = cv2.resize(newImg, (2*w,2*h), interpolation=cv2.INTER_AREA)
Applied Canny edge detection.
v = np.median(resizedImg)
sigma = 0.33
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edgedImg = cv2.Canny(resizedImg, lower, upper)
Cropped the contours and saved them as images in 'BB' directory.
im2, contours, hierarchy = cv2.findContours(edgedImg.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
num = 0
for c in contours:
x, y, w, h = cv2.boundingRect(c)
num += 1
new_img = resizedImg[y:y+h, x:x+w]
cv2.imwrite('BB/'+str(num).zfill(3) + '.jpg', new_img)
Entire code in summary:
pic = "crop/cropped000.jpg"
newImg = cv2.imread(pic, 0)
h,w = newImg.shape
print(newImg.shape)
resizedImg = cv2.resize(newImg, (2*w,2*h), interpolation=cv2.INTER_AREA)
print(resizedImg.shape)
v = np.median(resizedImg)
sigma = 0.33
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edgedImg = cv2.Canny(resizedImg, lower, upper)
im2, contours, hierarchy = cv2.findContours(edgedImg.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
num = 0
for c in contours:
x, y, w, h = cv2.boundingRect(c)
num += 1
new_img = resizedImg[y:y+h, x:x+w]
cv2.imwrite('BB/'+str(num).zfill(3) + '.jpg', new_img)
Images produced are posted here:
https://imgur.com/a/GStIcdj
I had to double the image size because Canny edge detection was producing double-edges for an object (However, it still does). I have also played with other openCV functionalities like Thresholding, Gaussian Blur, Dilate, Erode but all in vain.
# we need one more parameter for Date cell width : as this could be different for diff bank
def crop_image_data_from_date_field(image, new_start_h, new_end_h, new_start_w, new_end_w, cell_width):
#for date each cell has same height and width : here width: 25 px so cord will be changed based on width
cropped_image_list = []
starting_width = new_start_w
for i in range(1,9): # as date has only 8 fields: DD/MM/YYYY
cropped_img = image[new_start_h:new_end_h, new_start_w + 1 :new_start_w+22]
new_start_w = starting_width + (i*cell_width)
cropped_img = cv2.resize(cropped_img, (28, 28))
image_name = 'cropped_date/cropped_'+ str(i) + '.png'
cv2.imwrite(image_name, cropped_img)
cropped_image_list.append(image_name)
# print('cropped_image_list : ',cropped_image_list,len(cropped_image_list))
# rec_value = handwritten_digit_recog.recog_digits(cropped_image_list)
recvd_value = custom_predict.predict_digit(cropped_image_list)
# print('recvd val : ',recvd_value)
return recvd_value
you need to specify each cell width and it's x,y,w,h.
I think this will help you.

Resources