I am trying to export a graph from SVG (obtained through d3.js) to a JPG image using javascript.
The fact is that the final image do not show the picture properly but it draws a black area enclosing the lines of the graph. Here they are the two images. At the top of the page the SVG is represented whereas the final JPG image is depicted.
The code I have written is the following:
root_node = d3.select("svg")
.attr("version", 1.1)
.attr("xmlns", "http://www.w3.org/2000/svg")
.node();
s_xml = (new XMLSerializer).serializeToString(root_node);
var imgsrc = 'data:image/svg+xml,'+ s_xml;
var img = '<img src="'+imgsrc+'">';
var canvas = document.createElement("canvas");
canvas.width = 1600;
canvas.height = 600;
context = canvas.getContext("2d");
var image = new Image;
image.src = imgsrc;
image.onload = function() {
context.drawImage(image, 0, 0);
context.fillStyle = 'white';
context.globalCompositeOperation = "destination-over";
context.fillRect(0, 0, canvas.width, canvas.height);
var canvasdata = canvas.toDataURL("image/jpeg");
var jpgimg = '<img src="'+canvasdata+'">';
d3.select("body").append("svgdataurl").html(jpgimg);
});
Anyone knows the reason of the wrong colours transformation? Thanks in advance!
You have to directly give the CSS for the line chart in the D3 code.
lineChart.append("path")
.attr("class", "lineChart")
.attr("stroke-width", 1)
.attr("fill","none")
.attr("d", valueline(lineChartData));
Related
For example, I have a PNG image where there is an object I need (not transparent) and a background area (transparent). I would like to cut the same object (non-transparent area) from another JPG image to a new PNG.
let jpg = d3.select("#myJpg");
let png = d3.select("#myPng");
// cut object of png from jpg
Using canvas you can cut out on image using another, but there are some serious restrictions here (your image needs to be hosted on your server or have the correct CORS header for you canvas to export data - if you don't need to export the image to a DataURL it should be fine).
void async function(){
const canvas = document.createElement( 'canvas' );
const ctx = canvas.getContext( '2d' );
const firstImage = await testImageSquare( 400, 400, 200, 'red' );
const secondImage = await testImageSquare( 400, 400, 100, 'green' );
canvas.width = firstImage.naturalWidth;
canvas.height = firstImage.naturalHeight;
ctx.drawImage( firstImage, 0, 0 );
ctx.globalCompositeOperation = 'source-in';
ctx.drawImage( secondImage, 0, 0 );
document.body.append( firstImage, secondImage, canvas );
}();
/** Helper method to create images */
async function testImageSquare( width, height, squareSize, fillStyle ){
const canvas = document.createElement( 'canvas' );
const ctx = canvas.getContext( '2d' );
canvas.width = width;
canvas.height = height;
ctx.fillStyle = fillStyle;
ctx.fillRect( (width - squareSize) / 2, (height - squareSize) / 2, squareSize, squareSize );
const image = new Image;
image.src = canvas.toDataURL();
await new Promise(r => image.addEventListener( 'load', r ));
return image;
}
img, canvas {
border: 1px solid black;
}
Using the WebGLRenderer, successfully loaded an .obj file created in Cinema4d.
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera( 75, window.innerWidth/window.innerHeight, 0.1, 1000 );
camera.position.z = 200;
var renderer = new THREE.WebGLRenderer();
renderer.setSize( window.innerWidth, window.innerHeight );
document.body.appendChild( renderer.domElement );
var controls = new THREE.OrbitControls(camera, renderer.domElement);
controls.enableDamping = true;
controls.dampingFactor = 0.25;
controls.enableZoom = true;
var keyLight = new THREE.DirectionalLight(new THREE.Color('hsl(30, 100%, 75%)'), 1.0);
keyLight.position.set(-100, 0, 100);
var fillLight = new THREE.DirectionalLight(new THREE.Color('hsl(240, 100%, 75%)'), 0.75);
fillLight.position.set(100, 0, 100);
var backLight = new THREE.DirectionalLight(0xffffff, 1.0);
backLight.position.set(100, 0, -100).normalize();
scene.add(keyLight);
scene.add(fillLight);
scene.add(backLight);
const material = new THREE.LineBasicMaterial( {
color: 0xffffff,
linewidth: 1,
linecap: 'round', //ignored by WebGLRenderer
linejoin: 'round' //ignored by WebGLRenderer
} );
scene.add(material)
var objLoader = new THREE.OBJLoader();
objLoader.setPath('/examples/3d-obj-loader/assets/');
objLoader.load('Untitled2.obj', function (object) {
object.position.y -= 60;
scene.add(object);
});
var animate = function () {
requestAnimationFrame( animate );
controls.update();
renderer.render(scene, camera);
};
animate();
Aiming to convert this to vector using the SVGRenderer - tried swapping out the THREE.WebGLRenderer for SVGRenderer but there's clearly more to it than this, and can't find any walkthroughs, or breakdowns on here.
Can see that what I'm looking for is technically possible with three.js!
Linked an image of the 3d file I'm looking to vectorise - a simple mountain style scene - aiming for each edges of the vertices to be stroked, with no texture needed (white shape, black edges).
Tried making a JSON with Bodymovin and proved impossible to join al the paths of the vertices without lots of stray lines appearing.
enter image description here
I have been working with html canvas compositing trying to clip a pattern with a mask.
The main issue that I have is that the mask I have comes from an svg with transparencies within the outer most border. I want the entire inside from the outer most border to be filled with the pattern.
Take this SVG for example you can see that there is a single pixel border, then some transparency, and then an opaque red inner blob. The compositing I have done works as the documentation says it should, the single pixel border and the red inner portion pick up the pattern that I want to mask into this shape. The problem is that I want to mask the entire innards starting from the single pixel border.
This is where I think clip might help. But it seems clip only works with manually drawn paths, not paths from an svg (at least that I am aware of).
Is there a way to accomplish what I am trying to do?
Regards,
James
The Path2D constructor accepts an SVG path data argument, that it will parse as the d attribute of an SVG <path> element.
You can then use this Path2D object with the clip() method:
(async () => {
// fetch the svg's path-data
const markup = await fetch("https://upload.wikimedia.org/wikipedia/commons/7/76/Autism_spectrum_infinity_awareness_symbol.svg").then(resp => resp.ok && resp.text());
const doc = new DOMParser().parseFromString(markup, "image/svg+xml");
const pathData = doc.querySelector("[d]").getAttribute("d");
// build our Path2D object and use it
const path = new Path2D(pathData);
const canvas = document.querySelector("canvas");
const ctx = canvas.getContext("2d");
ctx.clip(path);
// draw something that will get clipped
const rad = 30;
for(let y = 0; y < canvas.height; y += rad * 2 ) {
for(let x = 0; x < canvas.width; x += rad * 2 ) {
ctx.moveTo(x+rad, y);
ctx.arc(x, y, rad, 0, Math.PI*2);
}
}
ctx.fillStyle = "red";
ctx.fill();
})().catch(console.error);
<canvas width="792" height="612"></canvas>
If you need to transform this path-data (e.g scale, or rotate), then you can create a second Path2D object, and use its .addPath(path, matrix) method to do so:
// same as above, but smaller
(async () => {
const markup = await fetch("https://upload.wikimedia.org/wikipedia/commons/7/76/Autism_spectrum_infinity_awareness_symbol.svg").then(resp => resp.ok && resp.text());
const doc = new DOMParser().parseFromString(markup, "image/svg+xml");
const pathData = doc.querySelector("[d]").getAttribute("d");
const originalPath = new Path2D(pathData);
const path = new Path2D();
// scale by 0.5
path.addPath(originalPath, { a: 0.5, d: 0.5 });
const canvas = document.querySelector("canvas");
const ctx = canvas.getContext("2d");
ctx.clip(path);
// draw something that will get clipped
const rad = 15;
for(let y = 0; y < canvas.height; y += rad * 2 ) {
for(let x = 0; x < canvas.width; x += rad * 2 ) {
ctx.moveTo(x+rad, y);
ctx.arc(x, y, rad, 0, Math.PI*2);
}
}
ctx.fillStyle = "red";
ctx.fill();
})().catch(console.error);
<canvas width="396" height="306"></canvas>
I'm getting a pgm data, and want to put it in canvas, but it is not working .
that's why i need to convert it to png.
first I decoded the pgm base64 to UTF-8 and try to drawImage put nathing happen.
let base64= "UDIKIyBDcmVhdGVkIGJ5IEdJTVAgdmVyc2lvbiAyLjEwLjggUE5NIHBsdWctaW4KMjY3IDE2MgoyNTUKMjA1CjIwNQoyMDUKMjA1CjIwNQoyMDUKMjA1CjIwNQoyMDUKMjA1CjIwNQoyMDUKMjA1CjIwNQoyMDUKMjA1CjIwNQoyMDUKMjA1CjIwNQoyMDUKM....."
let decoded = base64decode(base64);
let imageUrl = 'data:image/pgm;utf-8,' + decoded;
var blob = this.dataURLtoBlob(imageUrl)
var canvas = document.getElementById('canvas') as HTMLCanvasElement;
let ctx = canvas.getContext('2d');
var img = new Image(500,500);
img.onload = function() {
canvas.width = img.width;
canvas.height = img.height;
ctx.fillStyle = 'white';
ctx.fillRect(0, 0, canvas.width, canvas.height);
ctx.drawImage(img, 0, 0);
}
img.src = window.URL.createObjectURL(blob);
console.log(img.src);
}
I expect the output map Image 500*500, but the actual output is white image 500*500 .
ctx.fillStyle = 'white';
any help or library thanks.
I have a simple 3D object and I am able to rotate i.e., move with my left mouse button, around the centre of axis - works fine. When I pan using the right mouse button the axis also shifts, as such it no longer moves around it’s present axis.
How can I move the object around it’s current axis, no matter where I drag the object?
Below is the complete code of script.js
var scene = new THREE.Scene();
var axisHelper = new THREE.AxisHelper(100);
scene.add(axisHelper);
var camera = new THREE.PerspectiveCamera( 75, window.innerWidth/window.innerHeight, 0.1, 1000 );
camera.position.y = -200;
var renderer = new THREE.WebGLRenderer();
renderer.setSize( window.innerWidth, window.innerHeight );
document.body.appendChild( renderer.domElement );
var controls = new THREE.OrbitControls(camera, renderer.domElement);
controls.enableDamping = true;
controls.dampingFactor = 0.25;
controls.enableZoom = true;
var keyLight = new THREE.DirectionalLight(new THREE.Color('hsl(30, 100%, 75%)'), 1.0);
keyLight.position.set(-100, 0, 100);
var fillLight = new THREE.DirectionalLight(new THREE.Color('hsl(240, 100%, 75%)'), 0.75);
fillLight.position.set(100, 0, 100);
var backLight = new THREE.DirectionalLight(0xffffff, 1.0);
backLight.position.set(100, 0, -100).normalize();
scene.add(keyLight);
scene.add(fillLight);
scene.add(backLight);
var mtlLoader = new THREE.MTLLoader();
mtlLoader.setTexturePath('assets/');
mtlLoader.setPath('assets/');
mtlLoader.load('180319_object01.mtl', function (materials) {
materials.preload();
var objLoader = new THREE.OBJLoader();
objLoader.setMaterials(materials);
objLoader.setPath('assets/');
objLoader.load('180319_object01.obj', function (object) {
object.scale.set( 200, 200, 200 );
scene.add(object);
object.position.x = -100;
object.position.y = -100;
object.position.z = 0;
});
});
var animate = function () {
requestAnimationFrame( animate );
controls.update();
renderer.render(scene, camera);
};
animate();
It is because your camera is being moved by the controller and not the object itself, rather than update the cameras position and direction ((ie which is what the orbit controller is doing).in your render method you'll want to update position and Euler angle on the object itself to achieve the desired effect. To do this you'll want to track and update the position and angle of rotation about the (y?) Axis of the object,in the object (model) space.hope that makes sense, let me know if you need me to elaborate.