In JS we can do this:
var ctx = canvas.getContext('2d'),
img = new Image;
img.onload = draw;
img.src = "http://i.stack.imgur.com/UFBxY.png";
function draw() {
var dArr = [-1,-1, 0,-1, 1,-1, -1,0, 1,0, -1,1, 0,1, 1,1], // offset array
s = 2, // thickness scale
i = 0, // iterator
x = 5, // final position
y = 5;
// draw images at offsets from the array scaled by s
for(; i < dArr.length; i += 2)
ctx.drawImage(img, x + dArr[i]*s, y + dArr[i+1]*s);
// fill with color
ctx.globalCompositeOperation = "source-in";
ctx.fillStyle = "red";
ctx.fillRect(0,0,canvas.width, canvas.height);
// draw original image in normal mode
ctx.globalCompositeOperation = "source-over";
ctx.drawImage(img, x, y);
}
And it will draw a border around your image (PNG so it's not a rectangle border ).
How todo it in Pixi.js? Pixi.js doesn't seem to understand the transparent bits as well.
Related
Let's say I have two image, and the artery (the red arrow) is about the same position:
Now, I need to show the two image in one figure, and I use the vtkImageBlend for this purpose. My code is:
import vtk
img1 = vtk.vtkDICOMImageReader()
img1.SetFileName('C:\\Users\\MLoong\\Desktop\\dicom_data\\Chang Cheng\\TOF\\IM_0198')
img1.Update()
print('img1: ', img1.GetOutput().GetSpacing())
print('img1: ', img1.GetOutput().GetExtent())
img2 = vtk.vtkDICOMImageReader()
img2.SetFileName('C:\\Users\\MLoong\\Desktop\\dicom_data\\Chang Cheng\\SNAP\\IM_0502')
img2.Update()
print('img2: ', img2.GetOutput().GetSpacing())
print('img2: ', img2.GetOutput().GetExtent())
image_blender = vtk.vtkImageBlend()
image_blender.AddInputConnection(img1.GetOutputPort())
image_blender.AddInputConnection(img2.GetOutputPort())
image_blender.SetOpacity(0, 0.1)
image_blender.SetOpacity(1, 0.9)
image_blender.Update()
imageActor = vtk.vtkImageActor()
windowLevel = vtk.vtkImageMapToWindowLevelColors()
imageActor.GetMapper().SetInputConnection(windowLevel.GetOutputPort())
ren = vtk.vtkRenderer()
ren.AddActor(imageActor)
ren.SetBackground(0.1, 0.2, 0.4)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(400, 400)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
windowLevel.SetInputData(image_blender.GetOutput())
windowLevel.Update()
renWin.Render()
iren.Start()
And the result is:
In the above figure, the img2 is about half ot the img1.
Hoever, the printed information is:
img1: (0.3571428656578064, 0.3571428656578064, 1.399999976158142)
img1: (0, 559, 0, 559, 0, 0)
img2: (0.5, 0.5, 1.0)
img2: (0, 319, 0, 319, 0, 0)
For img1, the extent is (560, 560), and the spacing is (0.357, 0.357). Thus, the FOV is: 0.357*560=200, and the FOV of img2 is 160. Thus, I think the blend figure may be wrong.
Moreover, the RadiAnt also provide the fusion figure:
In the RadiAnt fusion figure, the artery of two images is overlay, which is what I want.
Is there anything wrong with my vtkImageBlend code?
From the Detailed Description of vtkImageBlend:
"blend images together using alpha or opacity
vtkImageBlend takes L, LA, RGB, or RGBA images as input and blends them according to the alpha values and/or the opacity setting for each input.
The spacing, origin, extent, and number of components of the output are the same as those for the first input."
So, you need to make sure to have the same spacing, extent and origin.
The way to solve it is to create new 2 vtkImageData with the same properties and copy your existing data to the new images.
Here I handle extent:
private static void AdjustImage(ref vtkImageData oldImage, int[] newDimensions)
{
vtkImageData newImage = new vtkImageData();
vtkInformation info = new vtkInformation();
int[] oldDimensions = oldImage.GetDimensions();
double[] spacing = oldImage.GetSpacing();
newImage.SetDimensions(newDimensions[0], newDimensions[1], newDimensions[2]);
vtkImageData.SetScalarType(4/*VTK_SHORT*/, info);
vtkImageData.SetNumberOfScalarComponents(1, info);//The components that each pixel needs to represent =1 is the index quantity map
newImage.AllocateScalars(info);//It is very important to allocate memory and generate image data. After the image is generated, the default value of all pixels is 0
newImage.SetSpacing(spacing[0], spacing[1], spacing[2]);
vtkImageData data = oldImage;
Parallel.For(0, newDimensions[0], i =>
{
if (i < oldDimensions[0])
{
Parallel.For(0, newDimensions[1], j =>
{
if (j < oldDimensions[1])
{
Parallel.For(0, newDimensions[2], k =>
{
if (k < oldDimensions[2])
{
newImage.SetScalarComponentFromDouble(i, j,
newDimensions[2] - 1 - k, 0,
data.GetScalarComponentAsDouble(i, j,
oldDimensions[2] - 1 - k, 0));
}
else
{
SetImageToDefault(newImage, newDimensions, i, j, k);
}
});
}
else
{
SetImageToDefault(newImage, newDimensions, i, j);
}
});
}
else
{
SetImageToDefault(newImage, newDimensions, i);
}
});
oldImage.Dispose();
oldImage = newImage;
}
private static void SetImageToDefault(vtkImageData img, int[] imageDimensions, int i, int j, int k)
{
const double transparentHu = -1000;
img.SetScalarComponentFromDouble(i, j, imageDimensions[2] - 1 - k, 0, transparentHu);
}
Afterward you will need to translate the second image by the delta of the differences between the 2 images origins.
Wondering how to take the definition of a half circle arc and convert it into quadratic bezier curve, using the lowercase letter b as an example.
var c = document.querySelector('canvas')
var ctx = c.getContext('2d')
ctx.beginPath()
ctx.moveTo(0, 0)
ctx.lineTo(0, 200)
ctx.lineTo(100, 200)
ctx.quadraticCurveTo(0, 100, 200, 0)
ctx.stroke()
<canvas></canvas>
I don't see how to apply this (which I think is using an outdated canvas API) to the current canvas API for quadratic bezier curves. Where the right side of the b is a half-circle.
var c = document.querySelector('canvas')
var ctx = c.getContext('2d')
ctx.width = ctx.height = 100
ctx.beginPath()
ctx.moveTo(0, 20)
ctx.lineTo(0, 40)
ctx.lineTo(10, 40)
half(10, 40, 5, 10, 30)
ctx.lineTo(0, 30)
ctx.stroke()
function half(sx, sy, radius, endx, endy, clock) {
var x = sx + radius
var y = sy - radius
var cpx = sx + radius
var cpy = sy
ctx.quadraticCurveTo(cpx, cpy, x, y)
var x = endx
var y = endy
var cpx = sx + radius
var cpy = endy
ctx.quadraticCurveTo(cpx, cpy, x, y)
}
<canvas></canvas>
I have been trying to remove the black background from the grabcut output using python opencv.
import numpy as np
import cv2
img = cv2.imread(r'myfile_1.png')
mask = np.zeros(img.shape[:2],np.uint8)
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
rect = (1,1,665,344)
cv2.grabCut(img,mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT)
mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
img = img*mask2[:,:,np.newaxis]
cv2.imshow('img',img)
cv2.imwrite('img.png',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Above code I had written to save the grabcut output. Please suggest, How I can remove the black background and make it transparent?
I have achieved this by using the following snippet.
import cv2
file_name = "grab.png"
src = cv2.imread(file_name, 1)
tmp = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
_,alpha = cv2.threshold(tmp,0,255,cv2.THRESH_BINARY)
b, g, r = cv2.split(src)
rgba = [b,g,r, alpha]
dst = cv2.merge(rgba,4)
cv2.imwrite("test.png", dst)
This is java code. After use grabcut, the result background is transparent.
public Bitmap removeBackground(Bitmap bitmap) {
//GrabCut part
Mat img = new Mat();
Utils.bitmapToMat(bitmap, img);
int r = img.rows();
int c = img.cols();
Point p1 = new Point(c / 100, r / 100);
Point p2 = new Point(c - c / 100, r - r / 100);
Rect rect = new Rect(p1, p2);
Mat mask = new Mat();
Mat fgdModel = new Mat();
Mat bgdModel = new Mat();
Mat imgC3 = new Mat();
Imgproc.cvtColor(img, imgC3, Imgproc.COLOR_RGBA2RGB);
Imgproc.grabCut(imgC3, mask, rect, bgdModel, fgdModel, 5, Imgproc.
GC_INIT_WITH_RECT);
Mat source = new Mat(1, 1, CvType.CV_8U, new Scalar(3.0));
Core.compare(mask, source/* GC_PR_FGD */, mask, Core.CMP_EQ);
//This is important. You must use Scalar(255,255, 255,255), not Scalar(255,255,255)
Mat foreground = new Mat(img.size(), CvType.CV_8UC3, new Scalar(255,
255, 255,255));
img.copyTo(foreground, mask);
// convert matrix to output bitmap
bitmap = Bitmap.createBitmap((int) foreground.size().width,
(int) foreground.size().height,
Bitmap.Config.ARGB_8888);
Utils.matToBitmap(foreground, bitmap);
return bitmap;
}
This is a polygon in my svg file.
<polygon id="UKCYEW" fill="#09252E" points="840.218,415.85 696.468,415.85 696.468,373.974 817.176,373.558 "/>
Through D3.js I can get all the points in the polygon and get the center of this polygon
var points = d3.select('#'+name).attr('points');
if(points !=null){
var temps = [], posx = 0, posy = 0;
temps = points.split(' ');
//console.log('.................................. temperary positions',temps);
temps.forEach(function(e){
// console.log(e);
var arr = e.split(',');
posx += Number(arr[0]), posy +=Number(arr[1]);
})
posx = posx/temps.length , posy = posy/temps.length;
}
now the point for me, is how to compute the size of polygon
document.getElementById("UKCYEW")[0].getBBox() will get the x, y, width and height. The size is the width and height and the centre is x + width / 2, y + height / 2. Isn't that simpler?
I need to draw a pie chart that's works in IE 8, so I'm using d34raphael.
currently this is my code, which is modified from a d3 pie chart example https://raw.github.com/mbostock/d3/master/examples/pie/pie.html
var width = 300,
height = 300,
outerRadius = Math.min(width, height) / 2,
innerRadius = outerRadius * .6,
data = d3.range(10).map(Math.random),
color = d3.scale.category20(),
donut = d3.layout.pie(),
arc = d3.svg.arc().innerRadius(innerRadius).outerRadius(outerRadius);
// #chart is a div
var paper = new Raphael($('#chart')[0], width, height);
var svg = d3.raphael(paper);
paper.setStart();
var vis = svg.selectAll('rect')
.data([data])
.enter().append('rect')
.attr('x', 0)
.attr('y', 0)
.attr('width', width)
.attr('height', height);
svg.selectAll('path')
.data(donut)
.enter().append('path')
.attr('fill', function(d, i) { return color(i); })
.attr('d', arc)
.attr('transform', 'translate(' + outerRadius + ',' + outerRadius + ')');
paper.setFinish().transform(['t', 0, 0]);
it crashes when I pass donut into the data function, the bind function inside d3's data function has a group argument. after stepping through the data function, I found that group was undefined. (for me, it crashes on d3.v2.js:4997 bind(group = this[i], value.call(group, group.parentNode.__data__, i));, it tries to reference parentNode on the undefined group) I think this may be related to raphael not supporting the g tag. any Ideas on how i can use the d3 pie layout with d34raphael? Thanks