scale SVG pathPoints - svg

I am scaling a polygon and set the actual scaled points into the pathArray
const pointsCal = this.findPoints(allocatedTable.tableType.shape.pathArray);//calculating max x,y min x,y of pathArray
const diameterX = (pointsCal.highX - pointsCal.lowX)/2;
const diameterY = (pointsCal.highY - pointsCal.lowX)/2;
const scalex = (diameterX + this.settings.tableTableSpace) / diameterX;
const scaleY = (diameterY + this.settings.tableTableSpace) / diameterY;
pathArray.forEach((point) => {
if (point.command !== 'z') {
point.x -= tableCenterPoint.x;
point.y -= tableCenterPoint.y;
point.x *= scalex;
point.y *= scaleY;
point.x += tableCenterPoint.x;
point.y += tableCenterPoint.y;[enter image description here][1]
}
});
but for the regular rectangle it is working properly but for the rotated Shapes it is not scling propely
I think I had made a mistake in logic in calculating scale X and scaleY value

Why the divide by 2?
Try something like this:
const width = pointsCal.highX - pointsCal.lowX;
const height = pointsCal.highY - pointsCal.lowY;
const scalex = this.settings.tableTableSpace / width;
const scaleY = this.settings.tableTableSpace / height;
If that doesn't work, then you'll need to provide a minimal workable example
Update
I'm still not 100% sure what you want. But looking at the code, I assume you are wanting to scale the original shape so that it fills the SVG. But also allowing for some padding around it.
If so, you'll want to do something like this:
DrawScalledRectangle() {
// Get the size of the original polygon
const bbox = this.tableGroup.getBBox();
// Get the size of the <svg> element.
// This will be the value at the time this function is run. But the <svg> has width
// and height of "100%" so it may change if the window is resized.
const mainSvg = document.getElementById("mainSVG");
const svgWidth = mainSvg.clientWidth;
const svgHeight = mainSvg.clientHeight;
// The scale will be svgSize / originalSize (but we subtract the padding from the svgSize first)
const scaleX = (svgWidth - this.tableTableSpace * 2) / bbox.width;
const scaleY = (svgHeight- this.tableTableSpace * 2) / bbox.height;
this.pathArray.forEach(point => {
if (point.command !== "z") {
// New point location = padding + (oldPoint - shapePosition) * scale
point.x = this.tableTableSpace + (point.x - bbox.x) * scaleX;
point.y = this.tableTableSpace + (point.y - bbox.y) * scaleY;
}
});
console.log(this.pathArray);
...snip...
}
https://stackblitz.com/edit/angular-thb9w5?file=src/app/app.component.ts

Related

Calculating point to line distance in GLSL

I am trying to calculate a point to line distance in GSLS - precisely in turbo.js turbo.js
This is part of a more general problem in which I try to find the [closest points on GeoJSON multiline] respective to a set of GeoJSON points - the number of calculations for a 500-points set on 1000 segments line ends up being 500k point-to-distance calculations.
This is way too much to handle in the browser (even in workers) so parallelism helps a lot.
The trick is that AFAIK I can only use a vec4 as an input, which means I can only do calculations on pairs of points.
So far I've progressed to calculating distance and bearing of all pairs - but can't make the last leg to calculating point-to-line distance.
So the question is - given 3 points a, b and c, and knowing
their position in lon and lat
their pairwise bearing and distance
Is it possible to calculate the distance from a to the line defined by b and c using transforms that use vec2, vec3 or vec4 as input argument?
As a sub-problem - I know how to calculate the distance if the height of the triangle (a, b, c) doesn't intersect the line (a, b) because it's min(distance(a, b), distance(a, c)).
But then, how do I calculate if it intersects?
I'm not totally sure I understand your question.
It sounds like for 500 input points you want to know, for 1000 line segments, for each point, which segment is closest.
If that's what you're asking then put all the points in a floating point textures (another word for a texture is a 2D array). Draw a -1 to +1 quad that's the size of the number of results (500 results so 50x10 or 25x20 etc..) Pass in the resolution of the textures. Use gl_FragCoord to calculate an index to get the input, A, and loop over all the other lines. Read the results via readPixels by encoding the index of the closest pair as a color.
precision highp float;
uniform sampler2D aValues;
uniform vec2 aDimensions; // the size of the aValues texture in pixels (texels)
uniform sampler2D bValues;
uniform vec2 bDimensions; // the size of the bValues texture in pixels (texels)
uniform sampler2D cValues;
uniform vec2 cDimensions; // the size of the cValues texture in pixels (texels)
uniform vec2 outputDimensions; // the size of the thing we're drawing to (canvas)
// this code, given a sampler2D, the size of the texture, and an index
// computes a UV coordinate to pull one RGBA value out of a texture
// as though the texture was a 1D array.
vec3 getPoint(in sampler2D tex, in vec2 dimensions, in float index) {
vec2 uv = (vec2(
floor(mod(index, dimensions.x)),
floor(index / dimensions.x)) + 0.5) / dimensions;
return texture2D(tex, uv).xyz;
}
// from https://stackoverflow.com/a/6853926/128511
float distanceFromPointToLine(in vec3 a, in vec3 b, in vec3 c) {
vec3 ba = a - b;
vec3 bc = c - b;
float d = dot(ba, bc);
float len = length(bc);
float param = 0.0;
if (len != 0.0) {
param = clamp(d / (len * len), 0.0, 1.0);
}
vec3 r = b + bc * param;
return distance(a, r);
}
void main() {
// gl_FragCoord is the coordinate of the pixel that is being set by the fragment shader.
// It is the center of the pixel so the bottom left corner pixel will be (0.5, 0.5).
// the pixel to the left of that is (1.5, 0.5), The pixel above that is (0.5, 1.5), etc...
// so we can compute back into a linear index
float ndx = floor(gl_FragCoord.y) * outputDimensions.x + floor(gl_FragCoord.x);
// find the closest points
float minDist = 10000000.0;
float minIndex = -1.0;
vec3 a = getPoint(aValues, aDimensions, ndx);
for (int i = 0; i < ${bPoints.length / 4}; ++i) {
vec3 b = getPoint(bValues, bDimensions, float(i));
vec3 c = getPoint(cValues, cDimensions, float(i));
float dist = distanceFromPointToLine(a, b, c);
if (dist < minDist) {
minDist = dist;
minIndex = float(i);
}
}
// convert to 8bit color. The canvas defaults to RGBA 8bits per channel
// so take our integer index (minIndex) and convert to float values that
// will end up as the same 32bit index when read via readPixels as
// 32bit values.
gl_FragColor = vec4(
mod(minIndex, 256.0),
mod(floor(minIndex / 256.0), 256.0),
mod(floor(minIndex / (256.0 * 256.0)), 256.0) ,
floor(minIndex / (256.0 * 256.0 * 256.0))) / 255.0;
}
I'm only going to guess though that in general this is better solved with some spatial structure that somehow makes it so you don't have to check every line with every point but something like the code above should work and be very parallel. Each result will be computed by another GPU core.
const v3 = twgl.v3;
// note: I'm using twgl to make the code smaller.
// This is not lesson in WebGL. You should already know what it means
// to setup buffers and attributes and set uniforms and create textures.
// What's important is the technique, not the minutia of WebGL. If you
// don't know how to do those things you need a much bigger tutorial
// on WebGL like https://webglfundamentals.org
function main() {
const gl = document.createElement('canvas').getContext('webgl');
const ext = gl.getExtension('OES_texture_float');
if (!ext) {
alert('need OES_texture_float');
return;
}
const r = max => Math.random() * max;
const hsl = (h, s, l) => `hsl(${h * 360},${s * 100 | 0}%,${l * 100 | 0}%)`;
function createPoints(numPoints) {
const points = [];
for (let i = 0; i < numPoints; ++i) {
points.push(r(300), r(150), 0, 0); // RGBA
}
return points;
}
function distanceFromPointToLineSquared(a, b, c) {
const ba = v3.subtract(a, b);
const bc = v3.subtract(c, b);
const dot = v3.dot(ba, bc);
const lenSq = v3.lengthSq(bc);
let param = 0;
if (lenSq !== 0) {
param = Math.min(1, Math.max(0, dot / lenSq));
}
const r = v3.add(b, v3.mulScalar(bc, param));
return v3.distanceSq(a, r);
}
const aPoints = createPoints(6);
const bPoints = createPoints(15);
const cPoints = createPoints(15);
// do it in JS to check
{
// compute closest lines to points
const closest = [];
for (let i = 0; i < aPoints.length; i += 4) {
const a = aPoints.slice(i, i + 3);
let minDistSq = Number.MAX_VALUE;
let minIndex = -1;
for (let j = 0; j < bPoints.length; j += 4) {
const b = bPoints.slice(j, j + 3);
const c = cPoints.slice(j, j + 3);
const distSq = distanceFromPointToLineSquared(a, b, c);
if (distSq < minDistSq) {
minDistSq = distSq;
minIndex = j / 4;
}
}
closest.push(minIndex);
}
drawResults(document.querySelector('#js'), closest);
}
const vs = `
attribute vec4 position;
void main() {
gl_Position = position;
}
`;
const fs = `
precision highp float;
uniform sampler2D aValues;
uniform vec2 aDimensions; // the size of the aValues texture in pixels (texels)
uniform sampler2D bValues;
uniform vec2 bDimensions; // the size of the bValues texture in pixels (texels)
uniform sampler2D cValues;
uniform vec2 cDimensions; // the size of the cValues texture in pixels (texels)
uniform vec2 outputDimensions; // the size of the thing we're drawing to (canvas)
// this code, given a sampler2D, the size of the texture, and an index
// computes a UV coordinate to pull one RGBA value out of a texture
// as though the texture was a 1D array.
vec3 getPoint(in sampler2D tex, in vec2 dimensions, in float index) {
vec2 uv = (vec2(
floor(mod(index, dimensions.x)),
floor(index / dimensions.x)) + 0.5) / dimensions;
return texture2D(tex, uv).xyz;
}
// from https://stackoverflow.com/a/6853926/128511
float distanceFromPointToLine(in vec3 a, in vec3 b, in vec3 c) {
vec3 ba = a - b;
vec3 bc = c - b;
float d = dot(ba, bc);
float len = length(bc);
float param = 0.0;
if (len != 0.0) {
param = clamp(d / (len * len), 0.0, 1.0);
}
vec3 r = b + bc * param;
return distance(a, r);
}
void main() {
// gl_FragCoord is the coordinate of the pixel that is being set by the fragment shader.
// It is the center of the pixel so the bottom left corner pixel will be (0.5, 0.5).
// the pixel to the left of that is (1.5, 0.5), The pixel above that is (0.5, 1.5), etc...
// so we can compute back into a linear index
float ndx = floor(gl_FragCoord.y) * outputDimensions.x + floor(gl_FragCoord.x);
// find the closest points
float minDist = 10000000.0;
float minIndex = -1.0;
vec3 a = getPoint(aValues, aDimensions, ndx);
for (int i = 0; i < ${bPoints.length / 4}; ++i) {
vec3 b = getPoint(bValues, bDimensions, float(i));
vec3 c = getPoint(cValues, cDimensions, float(i));
float dist = distanceFromPointToLine(a, b, c);
if (dist < minDist) {
minDist = dist;
minIndex = float(i);
}
}
// convert to 8bit color. The canvas defaults to RGBA 8bits per channel
// so take our integer index (minIndex) and convert to float values that
// will end up as the same 32bit index when read via readPixels as
// 32bit values.
gl_FragColor = vec4(
mod(minIndex, 256.0),
mod(floor(minIndex / 256.0), 256.0),
mod(floor(minIndex / (256.0 * 256.0)), 256.0) ,
floor(minIndex / (256.0 * 256.0 * 256.0))) / 255.0;
}
`;
// compile shader, link program, lookup locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for a -1 to +1 quad
const bufferInfo = twgl.primitives.createXYQuadBufferInfo(gl);
// make an RGBA float texture for each set of points
// calls gl.createTexture, gl.bindTexture, gl.texImage2D, gl.texParameteri
const aTex = twgl.createTexture(gl, {
src: aPoints,
width: aPoints.length / 4,
type: gl.FLOAT,
minMag: gl.NEAREST,
});
const bTex = twgl.createTexture(gl, {
src: bPoints,
width: bPoints.length / 4,
type: gl.FLOAT,
minMag: gl.NEAREST,
});
const cTex = twgl.createTexture(gl, {
src: cPoints,
width: cPoints.length / 4,
type: gl.FLOAT,
minMag: gl.NEAREST,
});
const numOutputs = aPoints.length / 4;
gl.canvas.width = numOutputs;
gl.canvas.height = 1;
gl.viewport(0, 0, numOutputs, 1);
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// calls gl.activeTexture, gl.bindTexture, gl.uniform
twgl.setUniforms(programInfo, {
aValues: aTex,
aDimensions: [aPoints.length / 4, 1],
bValues: cTex,
bDimensions: [bPoints.length / 4, 1],
cValues: bTex,
cDimensions: [cPoints.length / 4, 1],
outputDimensions: [aPoints.length / 4, 1],
});
// draw the quad
gl.drawElements(gl.TRIANGLES, 6, gl.UNSIGNED_SHORT, 0);
// get result
const pixels = new Uint8Array(numOutputs * 4);
const results = new Uint32Array(pixels.buffer);
gl.readPixels(0, 0, numOutputs, 1, gl.RGBA, gl.UNSIGNED_BYTE, pixels);
drawResults(document.querySelector('#glsl'), results);
function drawResults(canvas, closest) {
const ctx = canvas.getContext('2d');
// draw the lines
ctx.beginPath();
for (let j = 0; j < bPoints.length; j += 4) {
const b = bPoints.slice(j, j + 2);
const c = cPoints.slice(j, j + 2);
ctx.moveTo(...b);
ctx.lineTo(...c);
}
ctx.strokeStyle = '#888';
ctx.stroke();
// draw the points and closest lines
for (let i = 0; i < aPoints.length; i += 4) {
const a = aPoints.slice(i, i + 2);
const ndx = closest[i / 4] * 4;
const b = bPoints.slice(ndx, ndx + 2);
const c = cPoints.slice(ndx, ndx + 2);
const color = hsl(i / aPoints.length, 1, 0.4);
ctx.fillStyle = color;
ctx.strokeStyle = color;
ctx.fillRect(a[0] - 2, a[1] - 2, 5, 5);
ctx.beginPath();
ctx.moveTo(...b);
ctx.lineTo(...c);
ctx.stroke();
}
}
}
main();
canvas { border: 1px solid black; margin: 5px; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<div>glsl</div>
<canvas id="glsl"></canvas>
<div>js</div>
<canvas id="js"></canvas>
If you use WebGL2 then you can use texelFetch so getPoint becomes
vec3 getPoint(in sampler2D tex, in int index) {
ivec2 size = textureSize(tex, 0);
ivec2 uv = ivec2(index % size.x, index / size.x);
return texelFetch(tex, uv, 0).xyz;
}
and you don't need to pass in the size of the input textures, only the output size. Also you could make your output R32U and output unsigned integer indices so no need to encode the result.
note: The code assumes you are doing less then 2048 values for each a, b and c so much of the code assumes 1 dimensional textures. If you need more than 2048 you'll need to adjust the code to make rectangular textures of a size that fits your data for example if you had 9000 values then a 9x1000 texture would work. If you have 8999 values then you still need a 9x1000 texture just padded to make a rectangle since textures are 2D arrays.
Also note that calling readPixels is considered slow. For example, if you just wanted to draw the results as above, instead of rendering to the canvas and reading the values out via readPixels you could render the result to a texture, then pass the texture into another shader.
addendum
This is probably the wrong place for this but as a terse explanation of GLSL for stuff like this you can think of GLSL as a fancy version of Array.prototype.map. When you use map you don't choose what is being written to directly. It happens indirectly.
const a = [1, 2, 3, 4, 5];
const b = a.map((v, index) => { return v * 2 + index; });
The { return v * 2 + index} part is analogous to a shader. In JavaScript the function inside map returns in value. in GLSL ES 1.0 the shader sets gl_FragColor as the output. In the Javascript index is the index of the array being written to (and happens to be the index of the input array as well). In GLSL gl_FragCoord serves the same role.
Otherwise, the output of the vertex shader determines which pixels (which array elements of a 2D array) will get written to so that makes it a more selective version of map. In the code above we're drawing a -1 to +1 quad effectively saying "map over all pixels".
In fact here's a version of the above code, no GLSL, just JavaScript, but the JavaScript re-structured to look more like GLSL.
const v3 = twgl.v3;
function main() {
const r = max => Math.random() * max;
const hsl = (h, s, l) => `hsl(${h * 360},${s * 100 | 0}%,${l * 100 | 0}%)`;
function createPoints(numPoints) {
const points = [];
for (let i = 0; i < numPoints; ++i) {
points.push(r(300), r(150), 0, 0); // RGBA
}
return points;
}
function distanceFromPointToLineSquared(a, b, c) {
const ba = v3.subtract(a, b);
const bc = v3.subtract(c, b);
const dot = v3.dot(ba, bc);
const lenSq = v3.lengthSq(bc);
let param = 0;
if (lenSq !== 0) {
param = Math.min(1, Math.max(0, dot / lenSq));
}
const r = v3.add(b, v3.mulScalar(bc, param));
return v3.distanceSq(a, r);
}
const aPoints = createPoints(6);
const bPoints = createPoints(15);
const cPoints = createPoints(15);
const gl_FragCoord = {};
let gl_FragColor;
const aValues = aPoints;
const aDimensions = {}; // N/A
const bValues = bPoints;
const bDimensions = {}; // N/A
const cValues = cPoints;
const cDimensions = {}; // N/A
const outputDimensions = {x: aPoints.length / 4, y: 1 };
function getPoint(sampler, dimension, ndx) {
return sampler.slice(ndx * 4, ndx * 4 + 3);
}
function javaScriptFragmentShader() {
// gl_FragCoord is the coordinate of the pixel that is being set by the fragment shader.
// It is the center of the pixel so the bottom left corner pixel will be (0.5, 0.5).
// the pixel to the left of that is (1.5, 0.5), The pixel above that is (0.5, 1.5), etc...
// so we can compute back into a linear index
const ndx = Math.floor(gl_FragCoord.y) * outputDimensions.x + Math.floor(gl_FragCoord.x);
// find the closest points
let minDist = 10000000.0;
let minIndex = -1.0;
const a = getPoint(aValues, aDimensions, ndx);
for (let i = 0; i < bPoints.length / 4; ++i) {
const b = getPoint(bValues, bDimensions, i);
const c = getPoint(cValues, cDimensions, i);
const dist = distanceFromPointToLineSquared(a, b, c);
if (dist < minDist) {
minDist = dist;
minIndex = i;
}
}
// convert to 8bit color. The canvas defaults to RGBA 8bits per channel
// so take our integer index (minIndex) and convert to float values that
// will end up as the same 32bit index when read via readPixels as
// 32bit values.
gl_FragColor = [
minIndex % 256.0,
Math.floor(minIndex / 256.0) % 256.0,
Math.floor(minIndex / (256.0 * 256.0)) % 256.0,
Math.floor(minIndex / (256.0 * 256.0 * 256.0)),
].map(v => v / 255.0);
}
// do it in JS to check
{
// compute closest lines to points
const closest = [];
const width = aPoints.length / 4;
const height = 1;
// WebGL drawing each pixel
for (let y = 0; y < height; ++y) {
for (let x = 0; x < width; ++x) {
gl_FragCoord.x = x + 0.5; // because pixels represent a rectangle one unit wide in pixel space
gl_FragCoord.y = y + 0.5; // so the center of each pixel in the middle of that rectangle
javaScriptFragmentShader();
const index = gl_FragColor[0] * 255 +
gl_FragColor[1] * 255 * 256 +
gl_FragColor[2] * 255 * 256 * 256 +
gl_FragColor[3] * 255 * 256 * 256 * 256;
closest.push(index);
}
}
drawResults(document.querySelector('#js'), closest);
}
function drawResults(canvas, closest) {
const ctx = canvas.getContext('2d');
// draw the lines
ctx.beginPath();
for (let j = 0; j < bPoints.length; j += 4) {
const b = bPoints.slice(j, j + 2);
const c = cPoints.slice(j, j + 2);
ctx.moveTo(...b);
ctx.lineTo(...c);
}
ctx.strokeStyle = '#888';
ctx.stroke();
// draw the points and closest lines
for (let i = 0; i < aPoints.length; i += 4) {
const a = aPoints.slice(i, i + 2);
const ndx = closest[i / 4] * 4;
const b = bPoints.slice(ndx, ndx + 2);
const c = cPoints.slice(ndx, ndx + 2);
const color = hsl(i / aPoints.length, 1, 0.4);
ctx.fillStyle = color;
ctx.strokeStyle = color;
ctx.fillRect(a[0] - 2, a[1] - 2, 5, 5);
ctx.beginPath();
ctx.moveTo(...b);
ctx.lineTo(...c);
ctx.stroke();
}
}
}
main();
canvas { border: 1px solid black; margin: 5px; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas id="js"></canvas>

UIScrollView is stopping too soon

I'm using a scrollview to make a image gallery for my app and I have it mostly working. it'll allow me to scroll through the images one by one but the very last image always get cut off and I'm not sure why.
this is the bulk of the operation:
var idx = 0;
foreach (var mediaItem in _mediaItems)
{
var xPosition = UIScreen.MainScreen.Bounds.Width * idx;
var imageView = new UIImageView();
imageView.SetImage(new NSUrl(mediaItem), UIImage.FromBundle("image_placeholder"));
imageView.Frame = new CGRect(xPosition, 0, svGallery.Frame.Width + 50, svGallery.Frame.Height);
imageView.ContentMode = UIViewContentMode.ScaleAspectFit;
svGallery.ContentSize = new CGSize
{
Width = svGallery.Frame.Width * (idx + 1)
};
svGallery.AddSubview(imageView);
idx++;
}
minus that flaw, this works perfectly and as I expect it to.
From shared code , the Width of ContenSize is:
Width = svGallery.Frame.Width * (idx + 1)
However, each Width(svGallery.Frame.Width + 50) of ImageView is greater than vGallery.Frame.Width:
imageView.Frame = new CGRect(xPosition, 0, svGallery.Frame.Width + 50, svGallery.Frame.Height);
Therefore, the actually Width of ContenSize can not contains all the ImageView's Content. And if the number of ImageView is larger, the last picture will be cut off more.
You can modif the Width of ContentSize as follow to check whether it works:
svGallery.ContentSize = new CGSize
{
Width = (svGallery.Frame.Width + 50) * (idx + 1)
};

Zoom in and out : canvas offset

I'm working on a zoom functionnality like this :
public wheelEventHandler(ev) {
var pointer = this.canvas.getPointer(ev.e);
var posx = pointer.x;
var posy = pointer.y;
if (ev.wheelDelta > 0) {
//zoom in
let valeurZoom = this.canvas.getZoom() * 1.1 <= this.maxZoom ? this.canvas.getZoom() * 1.1 : this.maxZoom;
this.canvas.zoomToPoint(new fabric.Point(posx, posy), valeurZoom);
}
else {
//zoom out
let valeurZoom = this.canvas.getZoom() / 1.1 >= 1 ? this.canvas.getZoom() / 1.1 : 1;
this.canvas.zoomToPoint(new fabric.Point(posx, posy), valeurZoom);
}
}
Problem is when i zoom in, and then zoom out, the initial view have an offset, and i don't know what to do, when my zoom is back to 1, i want that canvas shows exactly what it showed before, with image centered and no offset.
How can i do?
this is what i have in the begening and what i want when zoom is back to 1
this is what i have when i zoom back to 1 and i don't want the offset in red
Lempkin,
When you zoom out to initial zoom (to 1) try use this functionality:
var centerOfCanvas = new fabric.Point(canvas.getWidth() / 2, canvas.getHeight() / 2);
canvas.zoomToPoint(centerOfCanvas , 1);
canvas.renderAll();
When you want to zoom out on a center all the time use this logic:
var centerOfCanvas = new fabric.Point(canvas.getWidth() / 2, canvas.getHeight() / 2);
canvas.zoomToPoint(centerOfCanvas , canvas.getZoom() / 1.1);
canvas.renderAll();
If you want to zoom out/zoom in in the mouse position use your logic, but when you zoom is equal to 1 reset to center position of the mouse.
Reset to default:
canvas.viewportTransform = [1,0,0,1,0,0]

D3 force layout with CSS positioning

I'm placing circles on a map corresponding to GPS coordinates. Each circle is contained within an svg container which is placed on the page using CSS top and left properties. In my implementation, these containers often sit atop one another.
I am trying to implement collision detection and/or add a slight negative charge to these containers so that overlaps cause containers to distance themselves from one another.
Thus far, my tests with force layouts have either resulted in no change, or resulted in an error ('cannot set property index of null' or 'cannot set property x of null'). It's apparent that I'm doing something wrong but I have been unable to identify a path to resolution from the articles I've read online.
Any ideas on how I can stop the containers from sitting atop one another?
var self = this;
var data = [{lat: 127, lon: 36, name: 'a', radius: 9},{lat:127, lon: 36, name: 'b', radius: 9}];
// Position SVG containers correctly
var latLngToPx = function(d) {
var temp = new google.maps.LatLng(d.lat, d.lon);
temp = self.map.projection.fromLatLngToDivPixel(temp);
d.x = temp.x;
d.y = temp.y;
return d3.select(this)
.style('left', d.x + 'px')
.style('top', d.y + 'px');
};
var collide = function(node) {
var r = node.radius + 16,
nx1 = node.x - r,
nx2 = node.x + r,
ny1 = node.y - r,
ny2 = node.y + r;
return function(quad, x1, y1, x2, y2) {
if (quad.point && (quad.point !== node)) {
var x = node.x - quad.point.x,
y = node.y - quad.point.y,
l = Math.sqrt(x * x + y * y),
r = node.radius + quad.point.radius;
if (l < r) {
l = (l - r) / l * 0.5;
node.x -= x *= l;
node.y -= y *= l;
quad.point.x += x;
quad.point.y += y;
}
}
return x1 > nx2 || x2 < nx1 || y1 > ny2 || y2 < ny1;
};
}
var svgBind = d3.select(settings[type].layer).selectAll('svg')
.data(data, function(d){ return d.name; })
.each(latLngToPx);
var svg = svgBind.enter().append('svg')
.each(latLngToPx)
// svg[0] contains the svg elements
var nodes = svg[0];
var force = d3.layout.force()
.nodes(nodes)
.charge(-100)
.start();
force.on('tick', function(){
var q = d3.geom.quadtree(nodes),
i = 0,
n = nodes.length;
while (++i < n) {
q.visit(collide(nodes[i]));
}
svg
.style('left', function(d){ return (d.x - lm.config.offset) + 'px';})
.style('top', function(d){ return (d.y - lm.config.offset) + 'px';});
});
var circ = svg.append('circle')
.attr('r', settings[type].r)
.attr('cx',10)
.attr('cy',10)
You shouldn't need to do the collision detection yourself -- the force layout should take care of that for you. Here are the basic steps you need to take.
To each data element that represents a circle, add x and y members that contain their current (screen) coordinates. This is what the force layout will operate on.
Pass the array of these elements to the force layout as nodes. There's no need to set links to start with, although you might want to do so later to control the placement of nodes with respect to each other.
Start the force layout.
For each tick, redraw the elements at the appropriate position.
Tweak the parameters of the force layout to your liking.
You are doing most of this already, I'm just mentioning it again to clarify. The code would look something like this.
function latLngToPx(d) {
var temp = new google.maps.LatLng(d.lat, d.lon);
temp = self.map.projection.fromLatLngToDivPixel(temp);
d.x = temp.x;
d.y = temp.y;
};
data.forEach(function(d) { latLngToPx(d); });
var nodes = d3.select("body").selectAll("svg").data(data).enter().append("svg");
var force = d3.layout.force().nodes(data);
force.on("tick", function() {
nodes.style('left', function(d){ return (d.x - lm.config.offset) + 'px';})
.style('top', function(d){ return (d.y - lm.config.offset) + 'px';});
});

How is the getBBox() SVGRect calculated?

I have a g element that contains one or more path elements. As I mentioned in another question, I scale and translate the g element by computing a transform attribute so that it fits on a grid in another part of the canvas.
The calculation is done using the difference between two rectangles, the getBBox() from the g element and the rectangle around the grid.
Here is the question -- after I do the transform, I update the contents of the g element and call getBBox() again, without removing the transform. The resulting rectangle appears to be calculated without considering the transform. I would have expected it to reflect the change. Is this behavior consistent with the SVG specification? How do I get the bounding box of the transformed rectangle?
This, BTW, is in an HTML 5 document running in Firefox 4, if that makes any difference.
Update: Apparently this behavior seems pretty clearly in violation of the specification. From the text here at w3c:
SVGRect getBBox()
Returns the tight bounding box in current user space (i.e., after application of the ‘transform’ attribute, if any) on the geometry of all contained graphics elements, exclusive of stroking, clipping, masking and filter effects). Note that getBBox must return the actual bounding box at the time the method was called, even in case the element has not yet been rendered.
Am I reading this correctly? If so this seems to be an errata in the SVG implementation Firefox uses; I haven't had a chance to try any other. I would file a bug report if someone could point me to where.
People often get confused by the behavioral difference of getBBox and getBoundingClientRect.
getBBox is a SVG Element's native method as equivalent to find the offset/clientwidth of HTML DOM element. The width and height is never going to change even when the element is rotated. It cannot be used for HTML DOM Elements.
getBoundingClientRect is common to both HTML and SVG elements. The bounded rectangle width and height will change when the element is rotated or when more elements are grouped.
The behaviour you see is correct, and consistent with the spec.
The transform gets applied, then the bbox is calculated in "current user units", i.e. the current user space. So if you want to see the result of a transform on the element you'd need to look at the bbox of a parent node or similar.
It's a bit confusing, but explained a lot better in the SVG Tiny 1.2 spec for SVGLocatable
That contains a number of examples that clarify what it's supposed to do.
there are at least 2 easy but somewhat hacky ways to do what you ask... if there are nicer (less hacky) ways, i haven't found them yet
EASY HACKy #1:
a) set up a rect that matches the "untransformed" bbox that group.getBBox() is returning
b) apply the group's "unapplied transform" to that rect
c) rect.getBBox() should now return the bbox you're looking for
EASY HACKY #2: (only tested in chrome)
a) use element.getBoundingClientRect(), which returns enough info for you to construct the bbox you're looking for
Apparently getBBox() doesn't take the transformations into consideration.
I can point you here, unfortunately I wasn't able to make it working: http://tech.groups.yahoo.com/group/svg-developers/message/22891
SVG groups have nasty practice - not to accumulate all transformations made. I have my way to cope with this issue. I'm using my own attributes to store current transformation data which I include in any further transformation. Use XML compatible attributes like alttext, value, name....or just x and y for storing accumulated value as atribute.
Example:
<g id="group" x="20" y="100" transform="translate(20, 100)">
<g id="subgroup" alttext="45" transform="rotate(45)">
<line...etc...
Therefore when I'm making transformations I'm taking those handmade attribute values, and when writing it back, I'm writing both transform and same value with attributes I made just for keeping all accumulated values.
Example for rotation:
function symbRot(evt) {
evt.target.ondblclick = function () {
stopBlur();
var ptx=symbG.parentNode.lastChild.getAttribute("cx");
var pty=symbG.parentNode.lastChild.getAttribute("cy");
var currRot=symbG.getAttributeNS(null, "alttext");
var rotAng;
if (currRot == 0) {
rotAng = 90
} else if (currRot == 90) {
rotAng = 180
} else if (currRot == 180) {
rotAng = 270
} else if (currRot == 270) {
rotAng = 0
};
symbG.setAttributeNS(null, "transform", "rotate(" + rotAng + "," + ptx + ", " + pty + ")");
symbG.setAttributeNS(null, "alttext", rotAng );
};
}
The following code takes into account the transformations (matrix or otherwise) from parents, itself, as well as children. So, it will work on a <g> element for example.
You will normally want to pass the parent <svg> as the third argument—toElement—as to return the computed bounding box in the coordinate space of the <svg> (which is generally the coordinate space we care about).
/**
* #param {SVGElement} element - Element to get the bounding box for
* #param {boolean} [withoutTransforms=false] - If true, transforms will not be calculated
* #param {SVGElement} [toElement] - Element to calculate bounding box relative to
* #returns {SVGRect} Coordinates and dimensions of the real bounding box
*/
function getBBox(element, withoutTransforms, toElement) {
var svg = element.ownerSVGElement;
if (!svg) {
return { x: 0, y: 0, cx: 0, cy: 0, width: 0, height: 0 };
}
var r = element.getBBox();
if (withoutTransforms) {
return {
x: r.x,
y: r.y,
width: r.width,
height: r.height,
cx: r.x + r.width / 2,
cy: r.y + r.height / 2
};
}
var p = svg.createSVGPoint();
var matrix = (toElement || svg).getScreenCTM().inverse().multiply(element.getScreenCTM());
p.x = r.x;
p.y = r.y;
var a = p.matrixTransform(matrix);
p.x = r.x + r.width;
p.y = r.y;
var b = p.matrixTransform(matrix);
p.x = r.x + r.width;
p.y = r.y + r.height;
var c = p.matrixTransform(matrix);
p.x = r.x;
p.y = r.y + r.height;
var d = p.matrixTransform(matrix);
var minX = Math.min(a.x, b.x, c.x, d.x);
var maxX = Math.max(a.x, b.x, c.x, d.x);
var minY = Math.min(a.y, b.y, c.y, d.y);
var maxY = Math.max(a.y, b.y, c.y, d.y);
var width = maxX - minX;
var height = maxY - minY;
return {
x: minX,
y: minY,
width: width,
height: height,
cx: minX + width / 2,
cy: minY + height / 2
};
}
I made a helper function, which returns various metrics of svg element (also bbox of transformed element).
The code is here:
SVGElement.prototype.getTransformToElement =
SVGElement.prototype.getTransformToElement || function(elem) {
return elem.getScreenCTM().inverse().multiply(this.getScreenCTM());
};
function get_metrics(el) {
function pointToLineDist(A, B, P) {
var nL = Math.sqrt((B.x - A.x) * (B.x - A.x) + (B.y - A.y) * (B.y - A.y));
return Math.abs((P.x - A.x) * (B.y - A.y) - (P.y - A.y) * (B.x - A.x)) / nL;
}
function dist(point1, point2) {
var xs = 0,
ys = 0;
xs = point2.x - point1.x;
xs = xs * xs;
ys = point2.y - point1.y;
ys = ys * ys;
return Math.sqrt(xs + ys);
}
var b = el.getBBox(),
objDOM = el,
svgDOM = objDOM.ownerSVGElement;
// Get the local to global matrix
var matrix = svgDOM.getTransformToElement(objDOM).inverse(),
oldp = [[b.x, b.y], [b.x + b.width, b.y], [b.x + b.width, b.y + b.height], [b.x, b.y + b.height]],
pt, newp = [],
obj = {},
i, pos = Number.POSITIVE_INFINITY,
neg = Number.NEGATIVE_INFINITY,
minX = pos,
minY = pos,
maxX = neg,
maxY = neg;
for (i = 0; i < 4; i++) {
pt = svgDOM.createSVGPoint();
pt.x = oldp[i][0];
pt.y = oldp[i][1];
newp[i] = pt.matrixTransform(matrix);
if (newp[i].x < minX) minX = newp[i].x;
if (newp[i].y < minY) minY = newp[i].y;
if (newp[i].x > maxX) maxX = newp[i].x;
if (newp[i].y > maxY) maxY = newp[i].y;
}
// The next refers to the transformed object itself, not bbox
// newp[0] - newp[3] are the transformed object's corner
// points in clockwise order starting from top left corner
obj.newp = newp; // array of corner points
obj.width = pointToLineDist(newp[1], newp[2], newp[0]) || 0;
obj.height = pointToLineDist(newp[2], newp[3], newp[0]) || 0;
obj.toplen = dist(newp[0], newp[1]);
obj.rightlen = dist(newp[1], newp[2]);
obj.bottomlen = dist(newp[2], newp[3]);
obj.leftlen = dist(newp[3], newp[0]);
// The next refers to the transformed object's bounding box
obj.BBx = minX;
obj.BBy = minY;
obj.BBx2 = maxX;
obj.BBy2 = maxY;
obj.BBwidth = maxX - minX;
obj.BBheight = maxY - minY;
return obj;
}
and full functional example is here:
http://jsbin.com/acowaq/1

Resources