Setting skew transformation center - svg

For operations such as scale, rotate, Raphael.js provides individual methods, through which we can specify the origin of that transformation.
But for skew there is no method like ele.skew(xskewAmount,yskewAmount,xtransfOrigin,ytransfOrigin).
So I went for the ele.transform method, like ele.transform("m1,0,.5,1,0,0") to perform a xskew. But I can't specify an origin here, so the element is getting translated incorrectly.
I'm need of following info:
Is there any method through which i can set the transform origin for
skew
how much distance will the element be translated(unwantedly), if i
skew an element. so that i can reposition the element manually.
my code: http://jsfiddle.net/tYqdk/1/
Please note the Skewx button at the bottom of the page.

i know this is old but here goes.
W3C SVG 1.1 - 7 Coordinate Systems, Transformations and Units
7.4 Coordinate system transformations
7.5 Nested transformations
7.6 The ‘transform’ attribute
In section 7.6 they describe the rotation transformation about a point (cx,cy)
Simply - it's this matrix multiplication:
translate(<cx>, <cy>) • rotate(<rotate-angle>) • translate(-<cx>, -<cy>)
To apply this to skewX use:
translate(<cx>, <cy>) • skewX(<skew-angle>) • translate(-<cx>, -<cy>)
note: these matrix multiplications are described in 7.5 Nested transformations
generic matrix
translate matrix
skewX matrix
var skewer = function(element, angle, x, y) {
var box, radians, svg, transform;
// x and y are defined in terms of the elements bounding box
// (0,0)
// --------------
// | |
// | |
// --------------
// (1,1)
// it defaults to the center (0.5, 0.5)
// this can easily be modifed to use absolute coordinates
if (isNaN(x)) {
x = 0.5;
}
if (isNaN(y)) {
y = 0.5;
}
box = element.getBBox();
x = x * box.width + box.x;
y = y * box.height + box.y;
radians = angle * Math.PI / 180.0;
svg = document.querySelector('svg');
transform = svg.createSVGTransform();
//creates this matrix
// | 1 0 0 | => see first 2 rows of
// | 0 1 0 | generic matrix above for mapping
// translate(<cx>, <cy>)
transform.matrix.e = x;
transform.matrix.f = y;
// appending transform will perform matrix multiplications
element.transform.baseVal.appendItem(transform);
transform = svg.createSVGTransform();
// skewX(<skew-angle>)
transform.matrix.c = Math.tan(radians);
element.transform.baseVal.appendItem(transform);
transform = svg.createSVGTransform();
// translate(-<cx>, -<cy>)
transform.matrix.e = -x;
transform.matrix.f = -y;
element.transform.baseVal.appendItem(transform);
};
i forked your jsfiddle
update - a new fiddle using built-in SVGMatrix methods. I believe it's easier to read and understand

Related

How can I use Google Maps Circle, Rectangle and Polygon in Node JS?

Anyone knows if I am able to user Google Maps Circle, Rectangle and Polygon classes in Node JS? In the frontend is easy with Google Maps Javascript SDK, but I can't figure out how to get a hold of this library within Node JS.
I need to be able to check if points are with bounds, something in the lines of:
const location = google.maps.LatLng(lat, lng);
const circle = new google.maps.Circle({
center: area.center,
radius: area.radius,
});
const doesContain = circle.getBounds().contains(location);
Thanks ahead!
Alright boys, after giving some thought I realized it's easier to create my own code for checking if a geometry contains a point than depend on Google Maps library to do so.
Although this does not offer and the functionality Google Maps SDK offers, it does solve the geometry problem.
For anyone else looking for other Google Maps SDK functionalities, checkout this Node.js Client for Google Maps Services. Though it does not include the geometry functions I was looking for.
Solution
Without further ado here is my code:
class Circle {
/**
* Circle constructor
* #param {array} center Center coordinate [lat, lng]
* #param {number} radius Radius of the circle in meters
*/
constructor(center, radius) {
this.name = "Circle";
this.center = center;
this.radius = radius;
}
/**
* Checks if a point is within the circle
* #param {array} point Coordinates of a point [lat,lng]
* #returns true if point is within, false otherwhise
*/
contains(point) {
const { center, radius } = this;
const distance = this.distance(center, point);
if (distance > radius) return false;
return true;
}
/**
* Calculate the distance between two points (in meters)
* #param {array} p1 [lat,lng] point 1
* #param {array} p2 p1 [lat,lng] point 2
* #returns Distance between the points in meters
*/
distance(p1, p2) {
var R = 6378.137; // Radius of earth in KM
var dLat = (p2[0] * Math.PI) / 180 - (p1[0] * Math.PI) / 180;
var dLon = (p2[1] * Math.PI) / 180 - (p1[1] * Math.PI) / 180;
var a =
Math.sin(dLat / 2) * Math.sin(dLat / 2) +
Math.cos((p1[0] * Math.PI) / 180) *
Math.cos((p2[0] * Math.PI) / 180) *
Math.sin(dLon / 2) *
Math.sin(dLon / 2);
var c = 2 * Math.atan2(Math.sqrt(a), Math.sqrt(1 - a));
var d = R * c;
return d * 1000; // meters
}
}
class Rectangle {
/**
* Rectangle constructor
* #param {arrar} sw South-west coorodinate of the rectangle [lat,lng]
* #param {array} ne North-east coordinate of the rectangle [lat, lng]
*/
constructor(sw, ne) {
this.name = "Rectangle";
this.sw = sw;
this.ne = ne;
}
/**
* Checks if a point is within the reactangle
* #param {array} point Coordinates of a point [lat,lng]
* #returns true if point is within, false otherwhise
*/
contains(point) {
const { sw, ne } = this;
const x = point[0];
const y = point[1];
if (x < sw[0] || x > ne[0] || y < sw[1] || y > ne[1]) return false;
return true;
}
}
class Polygon {
/**
* Polygon constructor
* #param {array} points Array of vertices/points of the polygon [lat,lng]
*/
constructor(points) {
this.name = "Polygon";
this.points = points;
}
/**
*
* #returns {obj} Returns the coordinate of the min/max bounds that surounds the polygon
* (south-west coordinate, north-east coordinage as in [lat,lng] format)
*/
getBounds() {
const { points } = this;
let arrX = [];
let arrY = [];
for (let i in points) {
arrX.push(points[i][0]);
arrY.push(points[i][1]);
}
return {
sw: [Math.min.apply(null, arrX), Math.min.apply(null, arrY)],
ne: [Math.max.apply(null, arrX), Math.max.apply(null, arrY)],
};
}
/**
* Checks if a point is within the polygon
* #param {array} point Coordinates of a point [lat,lng]
* #returns true if point is within, false otherwhise
*/
contains(point) {
const x = point[0];
const y = point[1];
const bounds = this.getBounds();
// Check if point P lies within the min/max boundary of our polygon
if (x < bounds.sw[0] || x > bounds.ne[0] || y < bounds.sw[1] || y > bounds.ne[1])
return false;
let intersect = 0;
const { points } = this;
// Geofencing method (aka Even–odd rule)
// See more at: https://en.wikipedia.org/wiki/Even%E2%80%93odd_rule
// Now for each path of our polygon we'll count how many times our imaginary
// line crosses our paths, if it crosses even number of times, our point P is
// outside of our polygon, odd number our point is within our polygon
for (let i = 0; i < points.length; i++) {
// Check if pont P lies on a vertices of our polygon
if (x === points[i][0] && y === points[i][1]) return true;
let j = i !== points.length - 1 ? i + 1 : 0;
// Check if Py (y-component of our point P) is with the y-boundary of our path
if (
(points[i][1] < points[j][1] && y >= points[i][1] && y <= points[j][1]) ||
(points[i][1] > points[j][1] && y >= points[j][1] && y <= points[i][1])
) {
// Check if Px (x-componet of our point P) crosses our path
let sx =
points[i][0] +
((points[j][0] - points[i][0]) * (y - points[i][1])) /
(points[j][1] - points[i][1]);
if (sx >= x) intersect += 1;
}
}
return intersect % 2 === 0 ? false : true;
}
}
module.exports = { Circle, Rectangle, Polygon };
Explanation
The Circle and Rectangle class is pretty straight forward, it's trivial to determine if a point lies within a boundary. The Polygon class is a bit more complicated because of obvious reasons.
The method used here to determine if a point P is within a polygon is called Geofencing (aka Even–odd rule), a common method in geospacial analysis.
Step 1
First we check if the point P falls within the max/min boundaries of the polygon (image 1), if it doesn't, we return false, problem solved.
Image 1 -- Polygon boundaries, P1 is within the polygon boundaries, P2 is not.
Step 2
Then we check if the point lies on a vertices (points) of the polygon, if it does, we return true, problem solved. (Image 2)
Image 2 -- Polygon boundaries, point P is on a vertices, return true.
Step 3
This next step is the most gratifying one, by now we know the point is with the polygon boundaries (from step 1) but we don't know if it's within it or not. The way to solve this we cast an imaginary line departing from the point to any direction, if it crosses the path of polygon even number of times, the point is outside of the polygon, if it crosses an odd number of times, the point is within the polygon. Like so:
Image 3 -- An imaginary line from P1 crosses the polygon paths an odd number of times (3 times), it's within the polygon boundaries. A imaginary line from P2 crosses an even number of times (4 times), it lies outside of the polygon.
Since we can pick any direction we want to cast the imaginary line from, we'll pick along the x-axis to simplify things, like so:
Image 4 -- Casting the imaginary line from point P parallel to the x-axis t0 simplify determining how many times it intersects our polygon.
To determine how many times the imaginary line intersects our polygon, we have to check each path of the polygon at a time. To do this, we break it down into two steps (see image 5 for references):
For each segment/path of the polygon we check if our point Py (y-component of our point P) is within the the boundaries of the path in question (Y1 and Y2). If it is not, we know our point is does not intersects that specific path and we can move on to the next one. If it is within the path's y-boundaries, then we have to check if it crosses our path in the x-direction (next step).
Assuming the step before is true, to check intersection in the x-direction we have calculate the equation for the path (using line equation: y2 - y1 = m(x2 - x1)) and plug in our Py component to solve for our intersection (in my code I call this Sx). Then we check if Sx is greater than Px, if so, then our imaginary line intersects the path in the x positive direction.
It's important to note that the imaginary line starts at our point P and we only count intersections in that direction we originally picked, in this case x-axis+. This is why Sx has to be grater than or equal to Px, otherwise the test fails.
Image 5 -- We break down each path of the polygon to determine the number of intersections.
Once this path is done we move to the next one and so on. In this case the line crosses 3 times our paths, and therefore we know it's within our polygon.
This is a very clever and simple way if you think about it, it works for any shape, it's truly amazing.
Read more
https://en.wikipedia.org/wiki/Even%E2%80%93odd_rule
Examples
Example 1 - Simple shapes
const p = new Polygon([
[-3, 3],
[-4, 1],
[-3, 0],
[-2, -1],
[0, 0],
[3, 2],
[0, 1],
[-1, 4],
]);
console.log("Contains: ", p.contains([-1, 1])); // returns true
JSFiddle 1
Example 2 - Complex shapes (overlapping areas)
This method works for more complex shapes, when the polygon coordinates creates overlappping areas and they cancel each other out.
const p = new Polygon([
[-2, 0],
[2, 0],
[2, 4],
[-2, 4],
[-2, 0],
[0, 2],
[2, 0],
[0, -2],
[-2, 0],
]);
console.log("Contains: ", p.contains([0, 1])); // returns false
JSFiddle 2
Side note
If you need to quickly plot points just to get a view of a shape/grid, this plotting tool helped a lot to get a visual of what's going on. Very often I thought my code had a bug when in fact my coordinates was skewed and code was correct.
https://www.desmos.com/calculator
I only wish it let you draw lines between points. Either way I found it helpful.

Find intersection point ray/triangle in a right-hand coordinate system

I would like to get the intersection point of a line (defined by a vector and origin) on a triangle.
My engine use right handed coordinate system, so X pointing forward, Y pointing left and Z pointing up.
---- Edit ----
With Antares's help, I convert my points to engine space with:
p0.x = -pt0.y;
p0.y = pt0.z;
p0.z = pt0.x;
But I don't know how to do the same with the direction vector.
I use the function from this stackoverflow question, original poster use this tutorial.
First we look for the distance t from origin to intersection point, in order to find its coordinates.
But I've got a negative t, and code return true when ray is outside the triangle. I set it outside visualy.
It return sometime false when I'm in the triangle.
Here is the fonction I use to get the intersection point, I already checked that it works, with 'classic' values, as in the original post.
float kEpsilon = 0.000001;
V3f crossProduct(V3f point1, V3f point2){
V3f vector;
vector.x = point1.y * point2.z - point2.y * point1.z;
vector.y = point2.x * point1.z - point1.x * point2.z;
vector.z = point1.x * point2.y - point1.y * point2.x;
return vector;
}
float dotProduct(V3f dot1, V3f dot2){
float dot = dot1.x * dot2.x + dot1.y * dot2.y + dot1.z * dot2.z;
return dot;
}
//orig: ray origin, dir: ray direction, Triangle vertices: p0, p1, p2.
bool rayTriangleIntersect(V3f orig, V3f dir, V3f p0, V3f p1, V3f p2){
// compute plane's normal
V3f p0p1, p0p2;
p0p1.x = p1.x - p0.x;
p0p1.y = p1.y - p0.y;
p0p1.z = p1.z - p0.z;
p0p2.x = p2.x - p0.x;
p0p2.y = p2.y - p0.y;
p0p2.z = p2.z - p0.z;
// no need to normalize
V3f N = crossProduct(p0p1, p0p2); // N
// Step 1: finding P
// check if ray and plane are parallel ?
float NdotRayDirection = dotProduct(N, dir); // if the result is 0, the function will return the value false (no intersection).
if (fabs(NdotRayDirection) < kEpsilon){ // almost 0
return false; // they are parallel so they don't intersect !
}
// compute d parameter using equation 2
float d = dotProduct(N, p0);
// compute t (equation P=O+tR P intersection point ray origin O and its direction R)
float t = -((dotProduct(N, orig) - d) / NdotRayDirection);
// check if the triangle is in behind the ray
//if (t < 0){ return false; } // the triangle is behind
// compute the intersection point using equation
V3f P;
P.x = orig.x + t * dir.x;
P.y = orig.y + t * dir.y;
P.z = orig.z + t * dir.z;
// Step 2: inside-outside test
V3f C; // vector perpendicular to triangle's plane
// edge 0
V3f edge0;
edge0.x = p1.x - p0.x;
edge0.y = p1.y - p0.y;
edge0.z = p1.z - p0.z;
V3f vp0;
vp0.x = P.x - p0.x;
vp0.y = P.y - p0.y;
vp0.z = P.z - p0.z;
C = crossProduct(edge0, vp0);
if (dotProduct(N, C) < 0) { return false; }// P is on the right side
// edge 1
V3f edge1;
edge1.x = p2.x - p1.x;
edge1.y = p2.y - p1.y;
edge1.z = p2.z - p1.z;
V3f vp1;
vp1.x = P.x - p1.x;
vp1.y = P.y - p1.y;
vp1.z = P.z - p1.z;
C = crossProduct(edge1, vp1);
if (dotProduct(N, C) < 0) { return false; } // P is on the right side
// edge 2
V3f edge2;
edge2.x = p0.x - p2.x;
edge2.y = p0.y - p2.y;
edge2.z = p0.z - p2.z;
V3f vp2;
vp2.x = P.x - p2.x;
vp2.y = P.y - p2.y;
vp2.z = P.z - p2.z;
C = crossProduct(edge2, vp2);
if (dotProduct(N, C) < 0) { return false; } // P is on the right side;
return true; // this ray hits the triangle
}
My problem is I get t: -52.603783
intersection point P : [-1143.477295, -1053.412842, 49.525799]
This give me, relative to a 640X480 texture, the uv point: [-658, 41].
Probably because my engine use Z pointing up?
My engine use right handed coordinate system, so X pointing forward, Y pointing left and Z pointing up.
You have a slightly incorrect idea of a right handed coordinate system... please check https://en.wikipedia.org/wiki/Cartesian_coordinate_system#In_three_dimensions.
As the name suggests, X is pointing right (right hand's thumb to the right), Y is pointing up (straight index finger) and Z (straight middle finger) is pointing "forward" (actually -Z is forward, and Z is backward in the camera coordinate system).
Actually... your coordinate components are right hand sided, but the interpretation as X is forward etc. is unusual.
If you suspect the problem could be with the coordinate system of your engine (OGRE maybe? plain OpenGL? Or something selfmade?), then you need to transform your point and direction coordinates into the coordinate system of your algorithm. The algorithm you presented works in camera coordinate system, if I am not mistaken. Of course you need to transform the resulting intersection point back to the interpretation you use in the engine.
To turn the direction of a vector component around (e.g. the Z coordinate) you can use multiplication with -1 to achieve the effect.
Edit:
One more thing: I realized that the algorithm uses directional vectors as well, not just points. The rearranging of components does only work for points, not directions, if I recall correctly. Maybe you have to do a matrix multiplication with the CameraView transformation matrix (or its inverse M^-1 or was it the transpose M^T, I am not sure). I can't help you there, I hope you can figure it out or just do trial&error.
My problem is I get t: -52.603783
intersection point P : [-1143.477295, -1053.412842, 49.525799] This give me, relative to a 640X480 texture, the uv point: [-658, 41]
I reckon you think your values are incorrect. Which values do you expect to get for t and UV coordinates? Which ones would be "correct" for your input?
Hope this gets you started. GL, HF with your project! :)
#GUNNM: Concerning your feedback that you do not know how to handle the direction vector, here are some ideas that might be useful to you.
As I said, there should be a matrix multiplication way. Look for key words like "transforming directional vector with a matrix" or "transforming normals (normal vectors) with a matrix". This should yield something like: "use the transpose of the used transformation matrix" or "the inverse of the matrix" or something like that.
A workaround could be: You can "convert" a directional vector to a point, by thinking of a direction as "two points" forming a vector: A starting point and another point which lies in the direction you want to point.
The starting point of your ray, you already have available. Now you need to make sure that your directional vector is interpreted as "second point" not as "directional vector".
If your engine handles a ray like in the first case you would have:
Here is my starting point (0,0,0) and here is my directional vector (5,6,-7) (I made those numbers up and take the origin as starting point to have a simple example). So this is just the usual "start + gaze direction" case.
In the second case you would have:
Here is my start at (0,0,0) and my second point is a point on my directional vector (5,6,-7), e.g. any t*direction. Which for t=1 should give exactly the point where your directional vector is pointing to if it is considered a vector (and the start point being the origin (0,0,0)).
Now you need to check how your algorithm is handling that direction. If it does somewhere ray=startpoint+direction, then it interprets it as point + vector, resulting in a movement shift of the starting point while keeping the orientation and direction of the vector.
If it does ray=startpoint-direction then it interprets it as two points from which a directional vector is formed by subtracting.
To make a directional vector from two points you usually just need to subtract them. This gives a "pure direction" though, without defined orientation (which can be +t or -t). So if you need this direction to be fixed, you may take the absolute of your "vector sliding value" t in later computations for example (may be not the best/fastest way of doing it).

SVG or Canvas bevel & emboss to find center-line of text

The photoshop bevel & emboss effect makes it easy to find the central 'ridge' which coincides with the center-line of text characters. This is done by increasing the appropriate effect settings to max-out the bevel, thereby creating such a ridge.
This Photoshop example was processed to further accentuate the center ridge
Is it possible to achieve the same effect with an SVG filter or Canvas technique in the browser?
Once this effect is in place, I could obtain the coordinates of the center line which I want.
Alternatively, is there an existing algorithm to get this center line via mathematical means from a raster image or vector shape?
SVG filters are a powerful feature that can be like photoshop in the browser. You can achieve the desired result by chaining a handful of filter primitives together.
<filter id="filterData">
<feGaussianBlur stdDeviation="5" />
<feDiffuseLighting surfaceScale="500">
<feDistantLight azimuth="90" elevation="90" />
</feDiffuseLighting>
<feComposite result="composite" operator="in" in2="SourceGraphic" />
</filter>
The first primitive blurs the text. Then a lighting primitive uses the result of the blurred primitive as a bump map to give the text depth. You will have to play with the surfaceScale attribute depending to the thickness of the text. The composite primitive will cut the final result to the area of the unfiltered text, the 'SourceGraphic'.
[codepen example] https://codepen.io/lahaymd/pen/EdNXam
Somehow this one tickled my fancy, although I am not sure this is an efficient way to get a result.
What is the center line? I define it as the set of all points inside the contour that fullfill the following condition: There must be at least one straight line going through the point where the distance to the nearest contour line is a local maximum along the line just at that point. In practice, testing a horizontal and a vertical line is enough.
I tried to implement that using two functions from the SVGGeometryElement interface: .getPointAtLength() and .isPointInFill(). The second one has so far only been implemented in Chrome, so that is the only browser this will work with.
The <text> element does not implement the SVGGeometryElement interface, so it must be converted to a <path>. That is something that cannot be done in a browser, you'll need an appropriate grafics program for that.
Finding, for 1000 * 500 points, which of ca. 5000 points along the contour of the two letters is the nearest one is a lot of computation. Therefore this contains a crude mechanism to only test those contour points that are in the vincinity. Nonetheless, give it a few seconds to complete. If you compute only one letter at that size and halve the canvas size, the execution time will aproximately quarter.
const width = 1000;
const height = 500;
const letter = document.querySelector('path');
const svg = document.querySelector('svg');
const canvas = document.querySelector('canvas');
const ctx = canvas.getContext('2d');
ctx.fillStyle = 'white';
function isInside(x, y) {
const point = svg.createSVGPoint();
point.x = x;
point.y = y;
return letter.isPointInFill(point);
}
// a 21 * 11 array of arrays
const fields = new Array(21).fill(0).map(() => {
return new Array(11).fill(0).map(() => []);
});
// a list of points along the contour
const length = Math.floor(letter.getTotalLength());
Array.from(new Array(length), (x, i) => {
return letter.getPointAtLength(i);
}).forEach(point => {
// find out if a contour point is inside a 100 * 100 rectangle
let rx1= Math.round(point.x / 100) * 2;
let ry1 = Math.round(point.y / 100) * 2;
// or a 100 * 100 rectangle that is offset by 50
let rx2 = Math.round((point.x + 50) / 100) * 2 - 1;
let ry2 = Math.round((point.y + 50) / 100) * 2 - 1;
// push the point into all four lists for the rectangles it is part of
fields[rx1][ry1].push(point);
fields[rx1][ry2].push(point);
fields[rx2][ry1].push(point);
fields[rx2][ry2].push(point);
});
const data = new Float32Array(width * height);
for (let y = 0; y < height; y++) {
for (let x = 0; x < width; x++) {
// only handle points inside the contour
if (isInside(x, y)) {
// find out which 50 * 50 rectangle the inside point is part of
const rx = Math.round(x / 50);
const ry = Math.round(y / 50);
// find the nearest contour point from the list for the
// appropriate 100 * 100 rectangle
const d = fields[rx][ry].reduce((min, point) => {
const dist = Math.hypot(point.x - x, point.y - y)
return Math.min(min, dist);
}, 100);
// store that distance value
data[y * width + x] = d;
}
}
}
data.forEach((v, i, a) => {
// find out if the distance to the nearest contour point
// is a local maximum, vertically or horizontally
const vert = a[i - width] < v && a[i + width] < v;
const hor = a[i - 1] < v && a[i + 1] < v;
if (vert || hor) {
// color that point as part of the center line
ctx.fillRect(i % width, Math.floor(i / width), 1, 1);
}
});
<svg width="1000" height="500" style="position:absolute">
<path id="letter" d="M 374.512,316.992 H 220.703 L 193.75,379.687 Q 183.789,402.832 183.789,414.258 183.789,423.34 192.285,430.371 201.074,437.109 229.785,439.16 V 450 H 104.688 V 439.16 Q 129.59,434.766 136.914,427.734 151.855,413.672 170.02,370.605 L 309.766,43.6523 H 320.02 L 458.301,374.121 Q 475,413.965 488.477,425.977 502.246,437.695 526.562,439.16 V 450 H 369.824 V 439.16 Q 393.555,437.988 401.758,431.25 410.254,424.512 410.254,414.844 410.254,401.953 398.535,374.121 Z M 366.309,295.312 298.926,134.766 229.785,295.312 Z M 810.742,247.266 Q 852.051,256.055 872.559,275.391 900.977,302.344 900.977,341.309 900.977,370.898 882.227,398.145 863.477,425.098 830.664,437.695 798.145,450 731.055,450 H 543.555 V 439.16 H 558.496 Q 583.398,439.16 594.238,423.34 600.977,413.086 600.977,379.687 V 123.047 Q 600.977,86.1328 592.48,76.4648 581.055,63.5742 558.496,63.5742 H 543.555 V 52.7344 H 715.234 Q 763.281,52.7344 792.285,59.7656 836.23,70.3125 859.375,97.2656 882.52,123.926 882.52,158.789 882.52,188.672 864.355,212.402 846.191,235.84 810.742,247.266 Z M 657.227,231.445 Q 668.066,233.496 681.836,234.668 695.898,235.547 712.598,235.547 755.371,235.547 776.758,226.465 798.437,217.09 809.863,198.047 821.289,179.004 821.289,156.445 821.289,121.582 792.871,96.9727 764.453,72.3633 709.961,72.3633 680.664,72.3633 657.227,78.8086 Z M 657.227,421.289 Q 691.211,429.199 724.316,429.199 777.344,429.199 805.176,405.469 833.008,381.445 833.008,346.289 833.008,323.145 820.41,301.758 807.812,280.371 779.395,268.066 750.977,255.762 709.082,255.762 690.918,255.762 678.027,256.348 665.137,256.934 657.227,258.398 Z"/>
</svg>
<canvas width="1000" height="500" style="position:absolute"></canvas>

Combining two Matrix Transformations under the same Transformation with SVG

My current task is attempting to combine objects with similar matrices under the same transformation matrix. The two matrices will always have the first 4 digits of it's transform be equal. I am having difficulty calculating the x="???" and y="???" for the second tspan. Any help towards the proper equation would be greatly appreciated.
Input
<svg>
<text transform="matrix(0 1 1 0 100 100)"><tspan x=0 y=0>foo</tspan></text>
<text transform="matrix(0 1 1 0 110 110)"><tspan x=0 y=0>bar</tspan></text>
</svg>
Output
<svg>
<text transform="matrix(0 1 1 0 100 100)">
<tspan x="0" y="0">foo</tspan>
<tspan x="???" y="???">bar</tspan>
</text>
</svg>
EDIT 1
I guess my question is more along the lines of given a point (x,y), how do I apply an existing matrix transformation to that point so that the position will not move, but the element will now be nested inside of another element.
EDIT 2
I have got this code to work for matrices with 0s in the (a,d) or (b,c) positions. Slanted/Skewed matrices I still have not got working. Any thoughts on this?
var aX = floatX[0];
var bX = floatX[1];
var cX = floatX[2];
var dX = floatX[3];
var eX = floatX[4];
var fX = floatX[5];
var aY = floatY[0];
var bY = floatY[1];
var cY = floatY[2];
var dY = floatY[3];
var eY = floatY[4];
var fY = floatY[5];
var xX = (eX * aX) + (fX * bX);
var xY = (eX * cX) + (fX * dX);
var yX = (eY * aY) + (fY * bY);
var yY = (eY * cY) + (fY * dY);
var c1 = cX - aX;
var c2 = dX + bX;
return new float[] { (yX - xX) / (c1 * c2), (yY - xY) / (c1 * c2) };
One thought that may work if my logic isn't flawed, is to find the transform for one element to the other, which can then be used to transform a point of 0,0 (as that's the original x,y) to a new location.
Once we know what the difference in transforms is (assuming that the first 4 figures in the matrix are the same as mentioned in the question, it won't work otherwise), we can figure what the difference in x,y is.
First, there's a bit of code as some browsers have removed this feature..
SVGElement.prototype.getTransformToElement = SVGElement.prototype.getTransformToElement || function(elem) {
return elem.getScreenCTM().inverse().multiply(this.getScreenCTM());
};
This is an svg method that some browsers support, but including as a polyfill in case yours doesn't (like Chrome). It finds the transform from one element to another.
We can then use this, to find the transform from the first to the second text element.
var text1 = document.querySelector('#myText1')
var text2 = document.querySelector('#myText2')
var transform = text2.getTransformToElement( text1 )
Or if you don't want the polyfill, this 'may' work (matrices aren't a strong point of mine!). getCTM() gets the current transformation matrix of an element.
var transform = text1.getCTM().inverse().multiply( text2.getCTM() )
Now we know what the transform between them was. We also know the original x,y was 0,0. So we can create an svg point 0,0 and then transform it with that matrix we've just figured, to find the new x,y.
var pt = document.querySelector('svg').createSVGPoint();
pt.x = 0; pt.y = 0;
var npt = pt.matrixTransform( transform );
Then just a delayed example to show it being moved. Set the tspan with the new x,y we've just figured from the previous transform.
setTimeout( function() {
alert('new x,y is ' + npt.x + ',' + npt.y)
tspan2.setAttribute('x', npt.x);
tspan2.setAttribute('y', npt.y);
},2000);
jsfiddle with polyfill
jsfiddle without polyfill

How is the getBBox() SVGRect calculated?

I have a g element that contains one or more path elements. As I mentioned in another question, I scale and translate the g element by computing a transform attribute so that it fits on a grid in another part of the canvas.
The calculation is done using the difference between two rectangles, the getBBox() from the g element and the rectangle around the grid.
Here is the question -- after I do the transform, I update the contents of the g element and call getBBox() again, without removing the transform. The resulting rectangle appears to be calculated without considering the transform. I would have expected it to reflect the change. Is this behavior consistent with the SVG specification? How do I get the bounding box of the transformed rectangle?
This, BTW, is in an HTML 5 document running in Firefox 4, if that makes any difference.
Update: Apparently this behavior seems pretty clearly in violation of the specification. From the text here at w3c:
SVGRect getBBox()
Returns the tight bounding box in current user space (i.e., after application of the ‘transform’ attribute, if any) on the geometry of all contained graphics elements, exclusive of stroking, clipping, masking and filter effects). Note that getBBox must return the actual bounding box at the time the method was called, even in case the element has not yet been rendered.
Am I reading this correctly? If so this seems to be an errata in the SVG implementation Firefox uses; I haven't had a chance to try any other. I would file a bug report if someone could point me to where.
People often get confused by the behavioral difference of getBBox and getBoundingClientRect.
getBBox is a SVG Element's native method as equivalent to find the offset/clientwidth of HTML DOM element. The width and height is never going to change even when the element is rotated. It cannot be used for HTML DOM Elements.
getBoundingClientRect is common to both HTML and SVG elements. The bounded rectangle width and height will change when the element is rotated or when more elements are grouped.
The behaviour you see is correct, and consistent with the spec.
The transform gets applied, then the bbox is calculated in "current user units", i.e. the current user space. So if you want to see the result of a transform on the element you'd need to look at the bbox of a parent node or similar.
It's a bit confusing, but explained a lot better in the SVG Tiny 1.2 spec for SVGLocatable
That contains a number of examples that clarify what it's supposed to do.
there are at least 2 easy but somewhat hacky ways to do what you ask... if there are nicer (less hacky) ways, i haven't found them yet
EASY HACKy #1:
a) set up a rect that matches the "untransformed" bbox that group.getBBox() is returning
b) apply the group's "unapplied transform" to that rect
c) rect.getBBox() should now return the bbox you're looking for
EASY HACKY #2: (only tested in chrome)
a) use element.getBoundingClientRect(), which returns enough info for you to construct the bbox you're looking for
Apparently getBBox() doesn't take the transformations into consideration.
I can point you here, unfortunately I wasn't able to make it working: http://tech.groups.yahoo.com/group/svg-developers/message/22891
SVG groups have nasty practice - not to accumulate all transformations made. I have my way to cope with this issue. I'm using my own attributes to store current transformation data which I include in any further transformation. Use XML compatible attributes like alttext, value, name....or just x and y for storing accumulated value as atribute.
Example:
<g id="group" x="20" y="100" transform="translate(20, 100)">
<g id="subgroup" alttext="45" transform="rotate(45)">
<line...etc...
Therefore when I'm making transformations I'm taking those handmade attribute values, and when writing it back, I'm writing both transform and same value with attributes I made just for keeping all accumulated values.
Example for rotation:
function symbRot(evt) {
evt.target.ondblclick = function () {
stopBlur();
var ptx=symbG.parentNode.lastChild.getAttribute("cx");
var pty=symbG.parentNode.lastChild.getAttribute("cy");
var currRot=symbG.getAttributeNS(null, "alttext");
var rotAng;
if (currRot == 0) {
rotAng = 90
} else if (currRot == 90) {
rotAng = 180
} else if (currRot == 180) {
rotAng = 270
} else if (currRot == 270) {
rotAng = 0
};
symbG.setAttributeNS(null, "transform", "rotate(" + rotAng + "," + ptx + ", " + pty + ")");
symbG.setAttributeNS(null, "alttext", rotAng );
};
}
The following code takes into account the transformations (matrix or otherwise) from parents, itself, as well as children. So, it will work on a <g> element for example.
You will normally want to pass the parent <svg> as the third argument—toElement—as to return the computed bounding box in the coordinate space of the <svg> (which is generally the coordinate space we care about).
/**
* #param {SVGElement} element - Element to get the bounding box for
* #param {boolean} [withoutTransforms=false] - If true, transforms will not be calculated
* #param {SVGElement} [toElement] - Element to calculate bounding box relative to
* #returns {SVGRect} Coordinates and dimensions of the real bounding box
*/
function getBBox(element, withoutTransforms, toElement) {
var svg = element.ownerSVGElement;
if (!svg) {
return { x: 0, y: 0, cx: 0, cy: 0, width: 0, height: 0 };
}
var r = element.getBBox();
if (withoutTransforms) {
return {
x: r.x,
y: r.y,
width: r.width,
height: r.height,
cx: r.x + r.width / 2,
cy: r.y + r.height / 2
};
}
var p = svg.createSVGPoint();
var matrix = (toElement || svg).getScreenCTM().inverse().multiply(element.getScreenCTM());
p.x = r.x;
p.y = r.y;
var a = p.matrixTransform(matrix);
p.x = r.x + r.width;
p.y = r.y;
var b = p.matrixTransform(matrix);
p.x = r.x + r.width;
p.y = r.y + r.height;
var c = p.matrixTransform(matrix);
p.x = r.x;
p.y = r.y + r.height;
var d = p.matrixTransform(matrix);
var minX = Math.min(a.x, b.x, c.x, d.x);
var maxX = Math.max(a.x, b.x, c.x, d.x);
var minY = Math.min(a.y, b.y, c.y, d.y);
var maxY = Math.max(a.y, b.y, c.y, d.y);
var width = maxX - minX;
var height = maxY - minY;
return {
x: minX,
y: minY,
width: width,
height: height,
cx: minX + width / 2,
cy: minY + height / 2
};
}
I made a helper function, which returns various metrics of svg element (also bbox of transformed element).
The code is here:
SVGElement.prototype.getTransformToElement =
SVGElement.prototype.getTransformToElement || function(elem) {
return elem.getScreenCTM().inverse().multiply(this.getScreenCTM());
};
function get_metrics(el) {
function pointToLineDist(A, B, P) {
var nL = Math.sqrt((B.x - A.x) * (B.x - A.x) + (B.y - A.y) * (B.y - A.y));
return Math.abs((P.x - A.x) * (B.y - A.y) - (P.y - A.y) * (B.x - A.x)) / nL;
}
function dist(point1, point2) {
var xs = 0,
ys = 0;
xs = point2.x - point1.x;
xs = xs * xs;
ys = point2.y - point1.y;
ys = ys * ys;
return Math.sqrt(xs + ys);
}
var b = el.getBBox(),
objDOM = el,
svgDOM = objDOM.ownerSVGElement;
// Get the local to global matrix
var matrix = svgDOM.getTransformToElement(objDOM).inverse(),
oldp = [[b.x, b.y], [b.x + b.width, b.y], [b.x + b.width, b.y + b.height], [b.x, b.y + b.height]],
pt, newp = [],
obj = {},
i, pos = Number.POSITIVE_INFINITY,
neg = Number.NEGATIVE_INFINITY,
minX = pos,
minY = pos,
maxX = neg,
maxY = neg;
for (i = 0; i < 4; i++) {
pt = svgDOM.createSVGPoint();
pt.x = oldp[i][0];
pt.y = oldp[i][1];
newp[i] = pt.matrixTransform(matrix);
if (newp[i].x < minX) minX = newp[i].x;
if (newp[i].y < minY) minY = newp[i].y;
if (newp[i].x > maxX) maxX = newp[i].x;
if (newp[i].y > maxY) maxY = newp[i].y;
}
// The next refers to the transformed object itself, not bbox
// newp[0] - newp[3] are the transformed object's corner
// points in clockwise order starting from top left corner
obj.newp = newp; // array of corner points
obj.width = pointToLineDist(newp[1], newp[2], newp[0]) || 0;
obj.height = pointToLineDist(newp[2], newp[3], newp[0]) || 0;
obj.toplen = dist(newp[0], newp[1]);
obj.rightlen = dist(newp[1], newp[2]);
obj.bottomlen = dist(newp[2], newp[3]);
obj.leftlen = dist(newp[3], newp[0]);
// The next refers to the transformed object's bounding box
obj.BBx = minX;
obj.BBy = minY;
obj.BBx2 = maxX;
obj.BBy2 = maxY;
obj.BBwidth = maxX - minX;
obj.BBheight = maxY - minY;
return obj;
}
and full functional example is here:
http://jsbin.com/acowaq/1

Resources