Calculating point to line distance in GLSL - geometry

I am trying to calculate a point to line distance in GSLS - precisely in turbo.js turbo.js
This is part of a more general problem in which I try to find the [closest points on GeoJSON multiline] respective to a set of GeoJSON points - the number of calculations for a 500-points set on 1000 segments line ends up being 500k point-to-distance calculations.
This is way too much to handle in the browser (even in workers) so parallelism helps a lot.
The trick is that AFAIK I can only use a vec4 as an input, which means I can only do calculations on pairs of points.
So far I've progressed to calculating distance and bearing of all pairs - but can't make the last leg to calculating point-to-line distance.
So the question is - given 3 points a, b and c, and knowing
their position in lon and lat
their pairwise bearing and distance
Is it possible to calculate the distance from a to the line defined by b and c using transforms that use vec2, vec3 or vec4 as input argument?
As a sub-problem - I know how to calculate the distance if the height of the triangle (a, b, c) doesn't intersect the line (a, b) because it's min(distance(a, b), distance(a, c)).
But then, how do I calculate if it intersects?

I'm not totally sure I understand your question.
It sounds like for 500 input points you want to know, for 1000 line segments, for each point, which segment is closest.
If that's what you're asking then put all the points in a floating point textures (another word for a texture is a 2D array). Draw a -1 to +1 quad that's the size of the number of results (500 results so 50x10 or 25x20 etc..) Pass in the resolution of the textures. Use gl_FragCoord to calculate an index to get the input, A, and loop over all the other lines. Read the results via readPixels by encoding the index of the closest pair as a color.
precision highp float;
uniform sampler2D aValues;
uniform vec2 aDimensions; // the size of the aValues texture in pixels (texels)
uniform sampler2D bValues;
uniform vec2 bDimensions; // the size of the bValues texture in pixels (texels)
uniform sampler2D cValues;
uniform vec2 cDimensions; // the size of the cValues texture in pixels (texels)
uniform vec2 outputDimensions; // the size of the thing we're drawing to (canvas)
// this code, given a sampler2D, the size of the texture, and an index
// computes a UV coordinate to pull one RGBA value out of a texture
// as though the texture was a 1D array.
vec3 getPoint(in sampler2D tex, in vec2 dimensions, in float index) {
vec2 uv = (vec2(
floor(mod(index, dimensions.x)),
floor(index / dimensions.x)) + 0.5) / dimensions;
return texture2D(tex, uv).xyz;
}
// from https://stackoverflow.com/a/6853926/128511
float distanceFromPointToLine(in vec3 a, in vec3 b, in vec3 c) {
vec3 ba = a - b;
vec3 bc = c - b;
float d = dot(ba, bc);
float len = length(bc);
float param = 0.0;
if (len != 0.0) {
param = clamp(d / (len * len), 0.0, 1.0);
}
vec3 r = b + bc * param;
return distance(a, r);
}
void main() {
// gl_FragCoord is the coordinate of the pixel that is being set by the fragment shader.
// It is the center of the pixel so the bottom left corner pixel will be (0.5, 0.5).
// the pixel to the left of that is (1.5, 0.5), The pixel above that is (0.5, 1.5), etc...
// so we can compute back into a linear index
float ndx = floor(gl_FragCoord.y) * outputDimensions.x + floor(gl_FragCoord.x);
// find the closest points
float minDist = 10000000.0;
float minIndex = -1.0;
vec3 a = getPoint(aValues, aDimensions, ndx);
for (int i = 0; i < ${bPoints.length / 4}; ++i) {
vec3 b = getPoint(bValues, bDimensions, float(i));
vec3 c = getPoint(cValues, cDimensions, float(i));
float dist = distanceFromPointToLine(a, b, c);
if (dist < minDist) {
minDist = dist;
minIndex = float(i);
}
}
// convert to 8bit color. The canvas defaults to RGBA 8bits per channel
// so take our integer index (minIndex) and convert to float values that
// will end up as the same 32bit index when read via readPixels as
// 32bit values.
gl_FragColor = vec4(
mod(minIndex, 256.0),
mod(floor(minIndex / 256.0), 256.0),
mod(floor(minIndex / (256.0 * 256.0)), 256.0) ,
floor(minIndex / (256.0 * 256.0 * 256.0))) / 255.0;
}
I'm only going to guess though that in general this is better solved with some spatial structure that somehow makes it so you don't have to check every line with every point but something like the code above should work and be very parallel. Each result will be computed by another GPU core.
const v3 = twgl.v3;
// note: I'm using twgl to make the code smaller.
// This is not lesson in WebGL. You should already know what it means
// to setup buffers and attributes and set uniforms and create textures.
// What's important is the technique, not the minutia of WebGL. If you
// don't know how to do those things you need a much bigger tutorial
// on WebGL like https://webglfundamentals.org
function main() {
const gl = document.createElement('canvas').getContext('webgl');
const ext = gl.getExtension('OES_texture_float');
if (!ext) {
alert('need OES_texture_float');
return;
}
const r = max => Math.random() * max;
const hsl = (h, s, l) => `hsl(${h * 360},${s * 100 | 0}%,${l * 100 | 0}%)`;
function createPoints(numPoints) {
const points = [];
for (let i = 0; i < numPoints; ++i) {
points.push(r(300), r(150), 0, 0); // RGBA
}
return points;
}
function distanceFromPointToLineSquared(a, b, c) {
const ba = v3.subtract(a, b);
const bc = v3.subtract(c, b);
const dot = v3.dot(ba, bc);
const lenSq = v3.lengthSq(bc);
let param = 0;
if (lenSq !== 0) {
param = Math.min(1, Math.max(0, dot / lenSq));
}
const r = v3.add(b, v3.mulScalar(bc, param));
return v3.distanceSq(a, r);
}
const aPoints = createPoints(6);
const bPoints = createPoints(15);
const cPoints = createPoints(15);
// do it in JS to check
{
// compute closest lines to points
const closest = [];
for (let i = 0; i < aPoints.length; i += 4) {
const a = aPoints.slice(i, i + 3);
let minDistSq = Number.MAX_VALUE;
let minIndex = -1;
for (let j = 0; j < bPoints.length; j += 4) {
const b = bPoints.slice(j, j + 3);
const c = cPoints.slice(j, j + 3);
const distSq = distanceFromPointToLineSquared(a, b, c);
if (distSq < minDistSq) {
minDistSq = distSq;
minIndex = j / 4;
}
}
closest.push(minIndex);
}
drawResults(document.querySelector('#js'), closest);
}
const vs = `
attribute vec4 position;
void main() {
gl_Position = position;
}
`;
const fs = `
precision highp float;
uniform sampler2D aValues;
uniform vec2 aDimensions; // the size of the aValues texture in pixels (texels)
uniform sampler2D bValues;
uniform vec2 bDimensions; // the size of the bValues texture in pixels (texels)
uniform sampler2D cValues;
uniform vec2 cDimensions; // the size of the cValues texture in pixels (texels)
uniform vec2 outputDimensions; // the size of the thing we're drawing to (canvas)
// this code, given a sampler2D, the size of the texture, and an index
// computes a UV coordinate to pull one RGBA value out of a texture
// as though the texture was a 1D array.
vec3 getPoint(in sampler2D tex, in vec2 dimensions, in float index) {
vec2 uv = (vec2(
floor(mod(index, dimensions.x)),
floor(index / dimensions.x)) + 0.5) / dimensions;
return texture2D(tex, uv).xyz;
}
// from https://stackoverflow.com/a/6853926/128511
float distanceFromPointToLine(in vec3 a, in vec3 b, in vec3 c) {
vec3 ba = a - b;
vec3 bc = c - b;
float d = dot(ba, bc);
float len = length(bc);
float param = 0.0;
if (len != 0.0) {
param = clamp(d / (len * len), 0.0, 1.0);
}
vec3 r = b + bc * param;
return distance(a, r);
}
void main() {
// gl_FragCoord is the coordinate of the pixel that is being set by the fragment shader.
// It is the center of the pixel so the bottom left corner pixel will be (0.5, 0.5).
// the pixel to the left of that is (1.5, 0.5), The pixel above that is (0.5, 1.5), etc...
// so we can compute back into a linear index
float ndx = floor(gl_FragCoord.y) * outputDimensions.x + floor(gl_FragCoord.x);
// find the closest points
float minDist = 10000000.0;
float minIndex = -1.0;
vec3 a = getPoint(aValues, aDimensions, ndx);
for (int i = 0; i < ${bPoints.length / 4}; ++i) {
vec3 b = getPoint(bValues, bDimensions, float(i));
vec3 c = getPoint(cValues, cDimensions, float(i));
float dist = distanceFromPointToLine(a, b, c);
if (dist < minDist) {
minDist = dist;
minIndex = float(i);
}
}
// convert to 8bit color. The canvas defaults to RGBA 8bits per channel
// so take our integer index (minIndex) and convert to float values that
// will end up as the same 32bit index when read via readPixels as
// 32bit values.
gl_FragColor = vec4(
mod(minIndex, 256.0),
mod(floor(minIndex / 256.0), 256.0),
mod(floor(minIndex / (256.0 * 256.0)), 256.0) ,
floor(minIndex / (256.0 * 256.0 * 256.0))) / 255.0;
}
`;
// compile shader, link program, lookup locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for a -1 to +1 quad
const bufferInfo = twgl.primitives.createXYQuadBufferInfo(gl);
// make an RGBA float texture for each set of points
// calls gl.createTexture, gl.bindTexture, gl.texImage2D, gl.texParameteri
const aTex = twgl.createTexture(gl, {
src: aPoints,
width: aPoints.length / 4,
type: gl.FLOAT,
minMag: gl.NEAREST,
});
const bTex = twgl.createTexture(gl, {
src: bPoints,
width: bPoints.length / 4,
type: gl.FLOAT,
minMag: gl.NEAREST,
});
const cTex = twgl.createTexture(gl, {
src: cPoints,
width: cPoints.length / 4,
type: gl.FLOAT,
minMag: gl.NEAREST,
});
const numOutputs = aPoints.length / 4;
gl.canvas.width = numOutputs;
gl.canvas.height = 1;
gl.viewport(0, 0, numOutputs, 1);
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// calls gl.activeTexture, gl.bindTexture, gl.uniform
twgl.setUniforms(programInfo, {
aValues: aTex,
aDimensions: [aPoints.length / 4, 1],
bValues: cTex,
bDimensions: [bPoints.length / 4, 1],
cValues: bTex,
cDimensions: [cPoints.length / 4, 1],
outputDimensions: [aPoints.length / 4, 1],
});
// draw the quad
gl.drawElements(gl.TRIANGLES, 6, gl.UNSIGNED_SHORT, 0);
// get result
const pixels = new Uint8Array(numOutputs * 4);
const results = new Uint32Array(pixels.buffer);
gl.readPixels(0, 0, numOutputs, 1, gl.RGBA, gl.UNSIGNED_BYTE, pixels);
drawResults(document.querySelector('#glsl'), results);
function drawResults(canvas, closest) {
const ctx = canvas.getContext('2d');
// draw the lines
ctx.beginPath();
for (let j = 0; j < bPoints.length; j += 4) {
const b = bPoints.slice(j, j + 2);
const c = cPoints.slice(j, j + 2);
ctx.moveTo(...b);
ctx.lineTo(...c);
}
ctx.strokeStyle = '#888';
ctx.stroke();
// draw the points and closest lines
for (let i = 0; i < aPoints.length; i += 4) {
const a = aPoints.slice(i, i + 2);
const ndx = closest[i / 4] * 4;
const b = bPoints.slice(ndx, ndx + 2);
const c = cPoints.slice(ndx, ndx + 2);
const color = hsl(i / aPoints.length, 1, 0.4);
ctx.fillStyle = color;
ctx.strokeStyle = color;
ctx.fillRect(a[0] - 2, a[1] - 2, 5, 5);
ctx.beginPath();
ctx.moveTo(...b);
ctx.lineTo(...c);
ctx.stroke();
}
}
}
main();
canvas { border: 1px solid black; margin: 5px; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<div>glsl</div>
<canvas id="glsl"></canvas>
<div>js</div>
<canvas id="js"></canvas>
If you use WebGL2 then you can use texelFetch so getPoint becomes
vec3 getPoint(in sampler2D tex, in int index) {
ivec2 size = textureSize(tex, 0);
ivec2 uv = ivec2(index % size.x, index / size.x);
return texelFetch(tex, uv, 0).xyz;
}
and you don't need to pass in the size of the input textures, only the output size. Also you could make your output R32U and output unsigned integer indices so no need to encode the result.
note: The code assumes you are doing less then 2048 values for each a, b and c so much of the code assumes 1 dimensional textures. If you need more than 2048 you'll need to adjust the code to make rectangular textures of a size that fits your data for example if you had 9000 values then a 9x1000 texture would work. If you have 8999 values then you still need a 9x1000 texture just padded to make a rectangle since textures are 2D arrays.
Also note that calling readPixels is considered slow. For example, if you just wanted to draw the results as above, instead of rendering to the canvas and reading the values out via readPixels you could render the result to a texture, then pass the texture into another shader.
addendum
This is probably the wrong place for this but as a terse explanation of GLSL for stuff like this you can think of GLSL as a fancy version of Array.prototype.map. When you use map you don't choose what is being written to directly. It happens indirectly.
const a = [1, 2, 3, 4, 5];
const b = a.map((v, index) => { return v * 2 + index; });
The { return v * 2 + index} part is analogous to a shader. In JavaScript the function inside map returns in value. in GLSL ES 1.0 the shader sets gl_FragColor as the output. In the Javascript index is the index of the array being written to (and happens to be the index of the input array as well). In GLSL gl_FragCoord serves the same role.
Otherwise, the output of the vertex shader determines which pixels (which array elements of a 2D array) will get written to so that makes it a more selective version of map. In the code above we're drawing a -1 to +1 quad effectively saying "map over all pixels".
In fact here's a version of the above code, no GLSL, just JavaScript, but the JavaScript re-structured to look more like GLSL.
const v3 = twgl.v3;
function main() {
const r = max => Math.random() * max;
const hsl = (h, s, l) => `hsl(${h * 360},${s * 100 | 0}%,${l * 100 | 0}%)`;
function createPoints(numPoints) {
const points = [];
for (let i = 0; i < numPoints; ++i) {
points.push(r(300), r(150), 0, 0); // RGBA
}
return points;
}
function distanceFromPointToLineSquared(a, b, c) {
const ba = v3.subtract(a, b);
const bc = v3.subtract(c, b);
const dot = v3.dot(ba, bc);
const lenSq = v3.lengthSq(bc);
let param = 0;
if (lenSq !== 0) {
param = Math.min(1, Math.max(0, dot / lenSq));
}
const r = v3.add(b, v3.mulScalar(bc, param));
return v3.distanceSq(a, r);
}
const aPoints = createPoints(6);
const bPoints = createPoints(15);
const cPoints = createPoints(15);
const gl_FragCoord = {};
let gl_FragColor;
const aValues = aPoints;
const aDimensions = {}; // N/A
const bValues = bPoints;
const bDimensions = {}; // N/A
const cValues = cPoints;
const cDimensions = {}; // N/A
const outputDimensions = {x: aPoints.length / 4, y: 1 };
function getPoint(sampler, dimension, ndx) {
return sampler.slice(ndx * 4, ndx * 4 + 3);
}
function javaScriptFragmentShader() {
// gl_FragCoord is the coordinate of the pixel that is being set by the fragment shader.
// It is the center of the pixel so the bottom left corner pixel will be (0.5, 0.5).
// the pixel to the left of that is (1.5, 0.5), The pixel above that is (0.5, 1.5), etc...
// so we can compute back into a linear index
const ndx = Math.floor(gl_FragCoord.y) * outputDimensions.x + Math.floor(gl_FragCoord.x);
// find the closest points
let minDist = 10000000.0;
let minIndex = -1.0;
const a = getPoint(aValues, aDimensions, ndx);
for (let i = 0; i < bPoints.length / 4; ++i) {
const b = getPoint(bValues, bDimensions, i);
const c = getPoint(cValues, cDimensions, i);
const dist = distanceFromPointToLineSquared(a, b, c);
if (dist < minDist) {
minDist = dist;
minIndex = i;
}
}
// convert to 8bit color. The canvas defaults to RGBA 8bits per channel
// so take our integer index (minIndex) and convert to float values that
// will end up as the same 32bit index when read via readPixels as
// 32bit values.
gl_FragColor = [
minIndex % 256.0,
Math.floor(minIndex / 256.0) % 256.0,
Math.floor(minIndex / (256.0 * 256.0)) % 256.0,
Math.floor(minIndex / (256.0 * 256.0 * 256.0)),
].map(v => v / 255.0);
}
// do it in JS to check
{
// compute closest lines to points
const closest = [];
const width = aPoints.length / 4;
const height = 1;
// WebGL drawing each pixel
for (let y = 0; y < height; ++y) {
for (let x = 0; x < width; ++x) {
gl_FragCoord.x = x + 0.5; // because pixels represent a rectangle one unit wide in pixel space
gl_FragCoord.y = y + 0.5; // so the center of each pixel in the middle of that rectangle
javaScriptFragmentShader();
const index = gl_FragColor[0] * 255 +
gl_FragColor[1] * 255 * 256 +
gl_FragColor[2] * 255 * 256 * 256 +
gl_FragColor[3] * 255 * 256 * 256 * 256;
closest.push(index);
}
}
drawResults(document.querySelector('#js'), closest);
}
function drawResults(canvas, closest) {
const ctx = canvas.getContext('2d');
// draw the lines
ctx.beginPath();
for (let j = 0; j < bPoints.length; j += 4) {
const b = bPoints.slice(j, j + 2);
const c = cPoints.slice(j, j + 2);
ctx.moveTo(...b);
ctx.lineTo(...c);
}
ctx.strokeStyle = '#888';
ctx.stroke();
// draw the points and closest lines
for (let i = 0; i < aPoints.length; i += 4) {
const a = aPoints.slice(i, i + 2);
const ndx = closest[i / 4] * 4;
const b = bPoints.slice(ndx, ndx + 2);
const c = cPoints.slice(ndx, ndx + 2);
const color = hsl(i / aPoints.length, 1, 0.4);
ctx.fillStyle = color;
ctx.strokeStyle = color;
ctx.fillRect(a[0] - 2, a[1] - 2, 5, 5);
ctx.beginPath();
ctx.moveTo(...b);
ctx.lineTo(...c);
ctx.stroke();
}
}
}
main();
canvas { border: 1px solid black; margin: 5px; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas id="js"></canvas>

Related

Raytracer renders objects too large

I am following this course to learn computer graphics and write my first ray tracer.
I already have some visible results, but they seem to be too large.
The overall algorithm the course outlines is this:
Image Raytrace (Camera cam, Scene scene, int width, int height)
{
Image image = new Image (width, height) ;
for (int i = 0 ; i < height ; i++)
for (int j = 0 ; j < width ; j++) {
Ray ray = RayThruPixel (cam, i, j) ;
Intersection hit = Intersect (ray, scene) ;
image[i][j] = FindColor (hit) ;
}
return image ;
}
I perform all calculations in camera space (where the camera is at (0, 0, 0)). Thus RayThruPixel returns me a ray in camera coordinates, Intersect returns an intersection point also in camera coordinates, and the image pixel array is a direct mapping from the intersectionr results.
The below image is the rendering of a sphere at (0, 0, -40000) world coordinates and radius 0.15, and camera at (0, 0, 2) world coordinates looking towards (0, 0, 0) world coordinates. I would normally expect the sphere to be a lot smaller given its small radius and far away Z coordinate.
The same thing happens with rendering triangles too. In the below image I have 2 triangles that form a square, but it's way too zoomed in. The triangles have coordinates between -1 and 1, and the camera is looking from world coordinates (0, 0, 4).
This is what the square is expected to look like:
Here is the code snippet I use to determine the collision with the sphere. I'm not sure if I should divide the radius by the z coordinate here - without it, the circle is even larger:
Sphere* sphere = dynamic_cast<Sphere*>(object);
float t;
vec3 p0 = ray->origin;
vec3 p1 = ray->direction;
float a = glm::dot(p1, p1);
vec3 center2 = vec3(modelview * object->transform * glm::vec4(sphere->center, 1.0f)); // camera coords
float b = 2 * glm::dot(p1, (p0 - center2));
float radius = sphere->radius / center2.z;
float c = glm::dot((p0 - center2), (p0 - center2)) - radius * radius;
float D = b * b - 4 * a * c;
if (D > 0) {
// two roots
float sqrtD = glm::sqrt(D);
float root1 = (-b + sqrtD) / (2 * a);
float root2 = (-b - sqrtD) / (2 * a);
if (root1 > 0 && root2 > 0) {
t = glm::min(root1, root2);
found = true;
}
else if (root2 < 0 && root1 >= 0) {
t = root1;
found = true;
}
else {
// should not happen, implies sthat both roots are negative
}
}
else if (D == 0) {
// one root
float root = -b / (2 * a);
t = root;
found = true;
}
else if (D < 0) {
// no roots
// continue;
}
if (found) {
hitVector = p0 + p1 * t;
hitNormal = glm::normalize(result->hitVector - center2);
}
Here I generate the ray going through the relevant pixel:
Ray* RayThruPixel(Camera* camera, int x, int y) {
const vec3 a = eye - center;
const vec3 b = up;
const vec3 w = glm::normalize(a);
const vec3 u = glm::normalize(glm::cross(b, w));
const vec3 v = glm::cross(w, u);
const float aspect = ((float)width) / height;
float fovyrad = glm::radians(camera->fovy);
const float fovx = 2 * atan(tan(fovyrad * 0.5) * aspect);
const float alpha = tan(fovx * 0.5) * (x - (width * 0.5)) / (width * 0.5);
const float beta = tan(fovyrad * 0.5) * ((height * 0.5) - y) / (height * 0.5);
return new Ray(/* origin= */ vec3(modelview * vec4(eye, 1.0f)), /* direction= */ glm::normalize(vec3( modelview * glm::normalize(vec4(alpha * u + beta * v - w, 1.0f)))));
}
And intersection with a triangle:
Triangle* triangle = dynamic_cast<Triangle*>(object);
// vertices in camera coords
vec3 vertex1 = vec3(modelview * object->transform * vec4(*vertices[triangle->index1], 1.0f));
vec3 vertex2 = vec3(modelview * object->transform * vec4(*vertices[triangle->index2], 1.0f));
vec3 vertex3 = vec3(modelview * object->transform * vec4(*vertices[triangle->index3], 1.0f));
vec3 N = glm::normalize(glm::cross(vertex2 - vertex1, vertex3 - vertex1));
float D = -glm::dot(N, vertex1);
float m = glm::dot(N, ray->direction);
if (m == 0) {
// no intersection because ray parallel to plane
}
else {
float t = -(glm::dot(N, ray->origin) + D) / m;
if (t < 0) {
// no intersection because ray goes away from triange plane
}
vec3 Phit = ray->origin + t * ray->direction;
vec3 edge1 = vertex2 - vertex1;
vec3 edge2 = vertex3 - vertex2;
vec3 edge3 = vertex1 - vertex3;
vec3 c1 = Phit - vertex1;
vec3 c2 = Phit - vertex2;
vec3 c3 = Phit - vertex3;
if (glm::dot(N, glm::cross(edge1, c1)) > 0
&& glm::dot(N, glm::cross(edge2, c2)) > 0
&& glm::dot(N, glm::cross(edge3, c3)) > 0) {
found = true;
hitVector = Phit;
hitNormal = N;
}
}
Given that the output image is a circle, and that the same problem happens with triangles as well, my guess is the problem isn't from the intersection logic itself, but rather something wrong with the coordinate spaces or transformations. Could calculating everything in camera space be causing this?
I eventually figured it out by myself. I first noticed the problem was here:
return new Ray(/* origin= */ vec3(modelview * vec4(eye, 1.0f)),
/* direction= */ glm::normalize(vec3( modelview *
glm::normalize(vec4(alpha * u + beta * v - w, 1.0f)))));
When I removed the direction vector transformation (leaving it at just glm::normalize(alpha * u + beta * v - w)) I noticed the problem disappeared - the square was rendered correctly. I was prepared to accept it as an answer, although I wasn't completely sure why.
Then I noticed that after doing transformations on the object, the camera wasn't positioned properly, which makes sense - we're not pointing the rays in the correct direction.
I realized that my entire approach of doing the calculations in camera space was wrong. If I still wanted to use this approach, the rays would have to be transformed, but in a different way that would involve some complex math I wasn't ready to deal with.
I instead changed my approach to do transformations and intersections in world space and only use camera space at the lighting stage. We have to use camera space at some point, since we want to actually look in the direction of the object we are rendering.

Upscaling using color interpolation for lighting?

I'm writing a lighting system for 2D games using a rather common method of 2D radiosity. The idea is to generate a JFA voronoi of the game scene (black, alpha = 1.0 for occluders and color, alpha = 1.0 for emitters) and generate an SDF from the JFA. Next you raymarch every pixel on screen for N rays with M max steps on the SDF with random angle offsets for each pixel. You then sample the emitter/occluder surface at the end point of each ray, step back into empty space and sample again for light emitted in the nearest empty space. This gives you a nice result as seen below:
That isn't the problem, it works great. The problem is efficiency. The idea behind fixing this is to render the GI at 1/N sample size (width/N, height/N) and then upscale the GI using interpolation. As I've done below:
This is the problem. The upscaling I've accomplished using weighted color-interpolation, but it produces these nasty results near occluders:
Here's the full shader:
The uniforms passed are the GI downsampled texture (in_GIField), Scene (emitters/occluders only) Texture (gm_basetexture), Signed Distance Field (in_SDField), Resolution (in_Screen) and the Downsample ratio (in_Sample).
/*
UPSCALING SHADER:
Find the nearest 4 boundign samples to the current pixel (xyDelta & xyShift)
Calculate all of the sample's weights based on whether they're marchable or source pixels.
Final perform a composite weighted interpolation for the current pixel to the nearest 4 samples.
*/
varying vec2 in_Coord;
uniform float in_Sample;
uniform vec2 in_Screen;
uniform sampler2D in_GIField;
uniform sampler2D in_SDField;
#define TPI 9.4247779607693797153879301498385
#define PI 3.1415926535897932384626433832795
#define TAU 6.2831853071795864769252867665590
#define EPSILON 0.001 // floating point precision check
#define dot(f) dot(f,f) // shorthand dot of a single float
float ATAN2(float yy, float xx) { return mod(atan(yy, xx), TAU); }
float DIRECT(vec2 v1, vec2 v2) { vec2 v3 = v2 - v1; return ATAN2(-v3.y, v3.x); }
float DIFFERENCE(float src, float dst) { return mod(dst - src + TPI, TAU) - PI; }
float V2_F16(vec2 v) { return v.x + (v.y / 255.0); }
float VMAX(vec3 v) { return max(v.r, max(v.g, v.b)); }
vec2 SAMPLEXY(vec2 xycoord) { return (floor(xycoord / in_Sample) * in_Sample) + (in_Sample*0.5); }
vec3 TONEMAP(vec3 color, float dist) { return color * (1.0 / (1.0 + dot(dist / min(in_Screen.x, in_Screen.y)))); }
float TESTMARCH(vec2 pix, vec2 end) {
float aspect = in_Screen.x / in_Screen.y,
dst = distance(pix, end);
vec2 dir = normalize((end*in_Screen) - (pix*in_Screen)) / in_Screen;
for(float i = 0.0; i < in_Sample; i += 1.0) {
vec2 test = vec2(pix.x * aspect, pix.y) + (dir * (i/in_Screen));
test.x /= aspect;
vec4 sourceCol = texture2D(gm_BaseTexture, test);
float source = max(sourceCol.r, max(sourceCol.g, sourceCol.b));
if (source < EPSILON && sourceCol.a > 0.0) return 0.0;
}
return 1.0;
}
vec3 WCOMPOSITE(vec3 colors[4], float weights[4], vec2 uv) {
// (uv * A * B) + (B * (1.0 - A)) //0, 2, 1, 3
float weightA = (uv.y * weights[0] * weights[2]) + (weights[2] * (1.0 - weights[0])),
weightB = (uv.y * weights[1] * weights[3]) + (weights[3] * (1.0 - weights[1]));
vec3 colorA = mix(colors[0], colors[2], weightA),
colorB = mix(colors[1], colors[3], weightB);
return mix(colorA, colorB, uv.x);
}
void main() {
vec2 xyCoord = in_Coord * in_Screen;
vec2 xyLight = SAMPLEXY(xyCoord);
vec2 xyDelta = sign(sign(xyCoord - xyLight) - 1.0);
vec2 xyShift[4];
xyShift[0] = vec2(0.,0.) + xyDelta;
xyShift[1] = vec2(1.,0.) + xyDelta;
xyShift[2] = vec2(0.,1.) + xyDelta;
xyShift[3] = vec2(1.,1.) + xyDelta;
vec2 xyField[4]; vec3 xyColor[4]; float notSource[4]; float xyWghts[4];
for(int i = 0; i < 4; i++) {
xyField[i] = (xyLight + (xyShift[i] * in_Sample)) * (1.0/in_Screen);
xyColor[i] = texture2D(in_GIField, xyField[i]).rgb;
notSource[i] = 1.0 - sign(texture2D(gm_BaseTexture, xyField[i]).a);
xyWghts[i] = TESTMARCH(in_Coord, xyField[i]) * sign(VMAX(xyColor[i])) * notSource[i];
}
vec2 uvCoord = mod(xyCoord-xyLight, in_Sample) * (1.0/in_Sample);
vec3 xyFinal = WCOMPOSITE(xyColor, xyWghts, uvCoord);
vec4 xySource = texture2D(gm_BaseTexture, in_Coord);
float isSource = sign(xySource.a);
gl_FragColor = vec4((isSource * xySource.rgb) + ((1.0-isSource) * xyFinal), 1.0);
}
EDIT: This DOES produce the intended result in empty space, but ends up with nasty artifacting near emitters and occluders. I tried to solve this in the for-loop in the main function by weighting out the emitter/occluder (source pixels in the scene texture) colors, but this isn't working.
See shader code attached (Shadertoy). I noticed that the weighting function will actually produce some colors with a weight of 0 (as expected as originally written). I currently don't have a solution for how to remove colors from the interpolation process entirely.
Full Source Code
Full Color Shader Code

How can I understand the following SVG code?

I have code from a web site, and it looks like it should be simple, but too simple for SVG. How can I determine if this is truly SVG, and what it does? I am especially interested in what looks like nested & and dots[.], then split, map.
Snippet:
// the shape of the dragon, converted from a SVG image
'! ((&(&*$($,&.)/-.0,4%3"7$;(#/EAA<?:<9;;88573729/7,6(8&;'.split("").map(function(a,i) {
shape[i] = a.charCodeAt(0) - 32;
});
Full code:
//7 Dragons
//Rauri
// full source for entry into js1k dragons: http://js1k.com/2014-dragons/demo/1837
// thanks to simon for grunt help and sean for inspiration help
// js1k shim
var a = document.getElementsByTagName('canvas')[0];
var b = document.body;
var d = function(e){ return function(){ e.parentNode.removeChild(e); }; }(a);
// unprefix some popular vendor prefixed things (but stick to their original name)
var AudioContext =
window.AudioContext ||
window.webkitAudioContext;
var requestAnimationFrame =
window.requestAnimationFrame ||
window.mozRequestAnimationFrame ||
window.webkitRequestAnimationFrame ||
window.msRequestAnimationFrame ||
function(f){ setTimeout(f, 1000/30); };
// stretch canvas to screen size (once, wont onresize!)
a.style.width = (a.width = innerWidth - 0) + 'px';
a.style.height = (a.height = innerHeight - 0) + 'px';
var c = a.getContext('2d');
// end shim
var sw = a.width,
sh = a.height,
M = Math,
Mc = M.cos,
Ms = M.sin,
ran = M.random,
pfloat = 0,
pi = M.PI,
dragons = [],
shape = [],
loop = function() {
a.width = sw; // clear screen
for ( j = 0; j < 7; j++) {
if ( !dragons[j] ) dragons[j] = dragon(j); // create dragons initially
dragons[j]();
}
pfloat++;
requestAnimationFrame(loop);
},
dragon = function(index) {
var scale = 0.1 + index * index / 49,
gx = ran() * sw / scale,
gy = sh / scale,
lim = 300, // this gets inlined, no good!
speed = 3 + ran() * 5,
direction = pi, //0, //ran() * pi * 2, //ran(0,TAU),
direction1 = direction,
spine = [];
return function() {
// check if dragon flies off screen
if (gx < -lim || gx > sw / scale + lim || gy < -lim || gy > sh / scale + lim) {
// flip them around
var dx = sw / scale / 2 - gx,
dy = sh / scale / 2 - gy;
direction = direction1 = M.atan(dx/dy) + (dy < 0 ? pi : 0);
} else {
direction1 += ran() * .1 - .05;
direction -= (direction - direction1) * .1;
}
// move the dragon forwards
gx += Ms(direction) * speed;
gy += Mc(direction) * speed;
// calculate a spine - a chain of points
// the first point in the array follows a floating position: gx,gy
// the rest of the chain of points following each other in turn
for (i=0; i < 70; i++) {
if (i) {
if (!pfloat) spine[i] = {x: gx, y: gy}
var p = spine[i - 1],
dx = spine[i].x - p.x,
dy = spine[i].y - p.y,
d = M.sqrt(dx * dx + dy * dy),
perpendicular = M.atan(dy/dx) + pi / 2 + (dx < 0 ? pi : 0);
// make each point chase the previous, but never get too close
if (d > 4) {
var mod = .5;
} else if (d > 2){
mod = (d - 2) / 4;
} else {
mod = 0;
}
spine[i].x -= dx * mod;
spine[i].y -= dy * mod;
// perpendicular is used to map the coordinates on to the spine
spine[i].px = Mc(perpendicular);
spine[i].py = Ms(perpendicular);
if (i == 20) { // average point in the middle of the wings so the wings remain symmetrical
var wingPerpendicular = perpendicular;
}
} else {
// i is 0 - first point in spine
spine[i] = {x: gx, y: gy, px: 0, py: 0};
}
}
// map the dragon to the spine
// the x co-ordinates of each point of the dragon shape are honoured
// the y co-ordinates of each point of the dragon are mapped to the spine
c.moveTo(spine[0].x,spine[0].y)
for (i=0; i < 154; i+=2) { // shape.length * 2 - it's symmetrical, so draw up one side and back down the other
if (i < 77 ) { // shape.length
// draw the one half from nose to tail
var index = i; // even index is x, odd (index + 1) is y of each coordinate
var L = 1;
} else {
// draw the other half from tail back to nose
index = 152 - i;
L = -1;
}
var x = shape[index];
var spineNode = spine[shape[index+1]]; // get the equivalent spine position from the dragon shape
if (index >= 56) { // draw tail
var wobbleIndex = 56 - index; // table wobbles more towards the end
var wobble = Ms(wobbleIndex / 3 + pfloat * 0.1) * wobbleIndex * L;
x = 20 - index / 4 + wobble;
// override the node for the correct tail position
spineNode = spine[ index * 2 - 83 ];
} else if (index > 13) { // draw "flappy wings"
// 4 is hinge point
x = 4 + (x-4) * (Ms(( -x / 2 + pfloat) / 25 * speed / 4) + 2) * 2; // feed x into sin to make wings "bend"
// override the perpindicular lines for the wings
spineNode.px = Mc(wingPerpendicular);
spineNode.py = Ms(wingPerpendicular);
}
c.lineTo(
(spineNode.x + x * L * spineNode.px) * scale,
(spineNode.y + x * L * spineNode.py) * scale
);
}
c.fill();
}
}
// the shape of the dragon, converted from a SVG image
'! ((&(&*$($,&.)/-.0,4%3"7$;(#/EAA<?:<9;;88573729/7,6(8&;'.split("").map(function(a,i) {
shape[i] = a.charCodeAt(0) - 32;
});
loop();
While the context this is used in is <canvas>, the origin may well be a SVG <polyline>.
In a first step, the letters are mapped to numbers. A bit of obscuration, but nothing too serious: get the number representing the letter and write it to an array.
const shape = [];
'! ((&(&*$($,&.)/-.0,4%3"7$;(#/EAA<?:<9;;88573729/7,6(8&;'.split("").map(function(a,i) {
shape[i] = a.charCodeAt(0) - 32;
});
results in an array
[1,0,8,8,6,8,6,10,4,8,4,12,6,14,9,15,13,14,16,12,20,5,19,2,23,4,27,8,32,15,37,33,33,28,31,26,28,25,27,27,24,24,21,23,19,23,18,25,15,23,12,22,8,24,6,27]
Now just write this array to a points attribute of a polyline, joining the numbers with a space character:
const outline = document.querySelector('#outline');
const shape = [];
'! ((&(&*$($,&.)/-.0,4%3"7$;(#/EAA<?:<9;;88573729/7,6(8&;'.split("").map(function(a,i) {
shape[i] = a.charCodeAt(0) - 32;
});
outline.setAttribute('points', shape.join(' '))
#outline {
stroke: black;
stroke-width: 0.5;
fill:none;
}
<svg viewBox="0 0 77 77" width="300" height="300">
<polyline id="outline" />
</svg>
and you get the basic outline of (half) a dragon. The rest is repetition and transformation to make things a bit more complex.

Simulate virtual camera which preserves color information

I have a virtual scanner that generates a 2.5-D view of a point cloud (i.e. a 2D-projection of a 3D point cloud) depending on camera position. I'm using the vtkCamera.GetProjectionTransformMatrix() to get transformation matrix from world/global to camera coordinates.
However, if the input point cloud has color information for points I would like to preserve it.
Here are the relevant lines:
boost::shared_ptr<pcl::visualization::PCLVisualizer> vis; // camera location, viewpoint and up direction for vis were already defined before
vtkSmartPointer<vtkRendererCollection> rens = vis->getRendererCollection();
vtkSmartPointer<vtkRenderWindow> win = vis->getRenderWindow();
win->SetSize(xres, yres); // xres and yres are predefined resolutions
win->Render();
float dwidth = 2.0f / float(xres),
dheight = 2.0f / float(yres);
float *depth = new float[xres * yres];
win->GetZbufferData(0, 0, xres - 1, yres - 1, &(depth[0]));
vtkRenderer *ren = rens->GetFirstRenderer();
vtkCamera *camera = ren->GetActiveCamera();
vtkSmartPointer<vtkMatrix4x4> projection_transform = camera->GetProjectionTransformMatrix(ren->GetTiledAspectRatio(), 0, 1);
Eigen::Matrix4f mat1;
for (int i = 0; i < 4; ++i)
for (int j = 0; j < 4; ++j)
mat1(i, j) = static_cast<float> (projection_transform->Element[i][j]);
mat1 = mat1.inverse().eval();
Now, mat1 is used to transform coordinates to camera-view:
pcl::PointCloud<pcl::PointXYZ>::Ptr &cloud;
int ptr = 0;
for (int y = 0; y < yres; ++y)
{
for (int x = 0; x < xres; ++x, ++ptr)
{
pcl::PointXYZ &pt = (*cloud)[ptr];
if (depth[ptr] == 1.0)
{
pt.x = pt.y = pt.z = std::numeric_limits<float>::quiet_NaN();
continue;
}
Eigen::Vector4f world_coords(dwidth * float(x) - 1.0f,
dheight * float(y) - 1.0f,
depth[ptr],
1.0f);
world_coords = mat1 * world_coords;
float w3 = 1.0f / world_coords[3];
world_coords[0] *= w3;
world_coords[1] *= w3;
world_coords[2] *= w3;
pt.x = static_cast<float> (world_coords[0]);
pt.y = static_cast<float> (world_coords[1]);
pt.z = static_cast<float> (world_coords[2]);
}
}
I want the virtual scanner to return pcl::PointXYZRGB point cloud with color information.
Any help on how to implement this from someone experienced in VTK would save some of my time.
It's possible that I missed a relevant question already asked here - in that case, please point me to it. Thanks.
If I understand correctly that you want to get the color in which the point was rendered into the win RenderWindow, you should be able to get the data from the rendering buffer by calling
float* pixels = win->GetRGBAPixelData(0, 0, xres - 1, yres - 1, 0/1).
This should give you each pixel of the rendering buffer as an array in the format [R0, G0, B0, A0, R1, G1, B1, A1, R2....]. The last parameter which I wrote as 0/1 is whether the data should be taken from front or back opengl buffers. I presume by default double buffering should be on, so then you want to read from back buffer (use '1'), but I am not sure.
Once you have that, you can get the color in your second loop for all pixels that belong to points (depth[ptr] != 1.0) as:
pt.R = pixels[4*ptr];
pt.G = pixels[4*ptr + 1];
pt.B = pixels[4*ptr + 2];
You should call win->ReleaseRGBAPixelData(pixels) once you're done with it.

Graphic algorithm Unions, intersect, subtract

I need a good source for reading up on how to create a algorithm to take two polylines (a path comprised of many lines) and performing a union, subtraction, or intersection between them. This is tied to a custom API so I need to understand the underlying algorithm.
Plus any sources in a VB dialect would be doubly helpful.
This catalogue of implementations of intersection algorithms from the Stony Brook Algorithm Repository might be useful. The repository is managed by Steven Skiena,
author of a very well respected book on algorithms: The Algorithm Design Manual.
That's his own Amazon exec link by the way :)
Several routines for you here. Hope you find them useful :-)
// routine to calculate the square of either the shortest distance or largest distance
// from the CPoint to the intersection point of a ray fired at an angle flAngle
// radians at an array of line segments
// this routine returns TRUE if an intersection has been found in which case flD
// is valid and holds the square of the distance.
// and returns FALSE if no valid intersection was found
// If an intersection was found, then intersectionPoint is set to the point found
bool CalcIntersection(const CPoint &cPoint,
const float flAngle,
const int nVertexTotal,
const CPoint *pVertexList,
const BOOL bMin,
float &flD,
CPoint &intersectionPoint)
{
float d, dsx, dsy, dx, dy, lambda, mu, px, py;
int p0x, p0y, p1x, p1y;
// get source position
const float flSx = (float)cPoint.x;
const float flSy = -(float)cPoint.y;
// calc trig functions
const float flTan = tanf(flAngle);
const float flSin = sinf(flAngle);
const float flCos = cosf(flAngle);
const bool bUseSin = fabsf(flSin) > fabsf(flCos);
// initialise distance
flD = (bMin ? FLT_MAX : 0.0f);
// for each line segment in protective feature
for(int i = 0; i < nVertexTotal; i++)
{
// get coordinates of line (negate the y value so the y-axis is upwards)
p0x = pVertexList[i].x;
p0y = -pVertexList[i].y;
p1x = pVertexList[i + 1].x;
p1y = -pVertexList[i + 1].y;
// calc. deltas
dsx = (float)(cPoint.x - p0x);
dsy = (float)(-cPoint.y - p0y);
dx = (float)(p1x - p0x);
dy = (float)(p1y - p0y);
// calc. denominator
d = dy * flTan - dx;
// if line & ray are parallel
if(fabsf(d) < 1.0e-7f)
continue;
// calc. intersection point parameter
lambda = (dsy * flTan - dsx) / d;
// if intersection is not valid
if((lambda <= 0.0f) || (lambda > 1.0f))
continue;
// if sine is bigger than cosine
if(bUseSin){
mu = ((float)p0x + lambda * dx - flSx) / flSin;
} else {
mu = ((float)p0y + lambda * dy - flSy) / flCos;
}
// if intersection is valid
if(mu >= 0.0f){
// calc. intersection point
px = (float)p0x + lambda * dx;
py = (float)p0y + lambda * dy;
// calc. distance between intersection point & source point
dx = px - flSx;
dy = py - flSy;
d = dx * dx + dy * dy;
// compare with relevant value
if(bMin){
if(d < flD)
{
flD = d;
intersectionPoint.x = RoundValue(px);
intersectionPoint.y = -RoundValue(py);
}
} else {
if(d > flD)
{
flD = d;
intersectionPoint.x = RoundValue(px);
intersectionPoint.y = -RoundValue(py);
}
}
}
}
// return
return(bMin ? (flD != FLT_MAX) : (flD != 0.0f));
}
// Routine to calculate the square of the distance from the CPoint to the
// intersection point of a ray fired at an angle flAngle radians at a line.
// This routine returns TRUE if an intersection has been found in which case flD
// is valid and holds the square of the distance.
// Returns FALSE if no valid intersection was found.
// If an intersection was found, then intersectionPoint is set to the point found.
bool CalcIntersection(const CPoint &cPoint,
const float flAngle,
const CPoint &PointA,
const CPoint &PointB,
const bool bExtendLine,
float &flD,
CPoint &intersectionPoint)
{
// get source position
const float flSx = (float)cPoint.x;
const float flSy = -(float)cPoint.y;
// calc trig functions
float flTan = tanf(flAngle);
float flSin = sinf(flAngle);
float flCos = cosf(flAngle);
const bool bUseSin = fabsf(flSin) > fabsf(flCos);
// get coordinates of line (negate the y value so the y-axis is upwards)
const int p0x = PointA.x;
const int p0y = -PointA.y;
const int p1x = PointB.x;
const int p1y = -PointB.y;
// calc. deltas
const float dsx = (float)(cPoint.x - p0x);
const float dsy = (float)(-cPoint.y - p0y);
float dx = (float)(p1x - p0x);
float dy = (float)(p1y - p0y);
// Calc. denominator
const float d = dy * flTan - dx;
// If line & ray are parallel
if(fabsf(d) < 1.0e-7f)
return false;
// calc. intersection point parameter
const float lambda = (dsy * flTan - dsx) / d;
// If extending line to meet point, don't check for ray missing line
if(!bExtendLine)
{
// If intersection is not valid
if((lambda <= 0.0f) || (lambda > 1.0f))
return false; // Ray missed line
}
// If sine is bigger than cosine
float mu;
if(bUseSin){
mu = ((float)p0x + lambda * dx - flSx) / flSin;
} else {
mu = ((float)p0y + lambda * dy - flSy) / flCos;
}
// if intersection is valid
if(mu >= 0.0f)
{
// calc. intersection point
const float px = (float)p0x + lambda * dx;
const float py = (float)p0y + lambda * dy;
// calc. distance between intersection point & source point
dx = px - flSx;
dy = py - flSy;
flD = (dx * dx) + (dy * dy);
intersectionPoint.x = RoundValue(px);
intersectionPoint.y = -RoundValue(py);
return true;
}
return false;
}
// Fillet (with a radius of 0) two lines. From point source fired at angle (radians) to line Line1A, Line1B.
// Modifies line end point Line1B. If the ray does not intersect line, then it is rotates every 90 degrees
// and tried again until fillet is complete.
void Fillet(const CPoint &source, const float fThetaRadians, const CPoint &Line1A, CPoint &Line1B)
{
if(Line1A == Line1B)
return; // No line
float dist;
if(CalcIntersection(source, fThetaRadians, Line1A, Line1B, true, dist, Line1B))
return;
if(CalcIntersection(source, CalcBaseFloat(TWO_PI, fThetaRadians + PI * 0.5f), Line1A, Line1B, true, dist, Line1B))
return;
if(CalcIntersection(source, CalcBaseFloat(TWO_PI, fThetaRadians + PI), Line1A, Line1B, true, dist, Line1B))
return;
if(!CalcIntersection(source, CalcBaseFloat(TWO_PI, fThetaRadians + PI * 1.5f), Line1A, Line1B, true, dist, Line1B))
ASSERT(FALSE); // Could not find intersection?
}
// routine to determine if an array of line segments cross gridSquare
// x and y give the float coordinates of the corners
BOOL CrossGridSquare(int nV, const CPoint *pV,
const CRect &extent, const CRect &gridSquare)
{
// test extents
if( (extent.right < gridSquare.left) ||
(extent.left > gridSquare.right) ||
(extent.top > gridSquare.bottom) ||
(extent.bottom < gridSquare.top))
{
return FALSE;
}
float a, b, c, dx, dy, s, x[4], y[4];
int max_x, max_y, min_x, min_y, p0x, p0y, p1x, p1y, sign, sign_old;
// construct array of vertices for grid square
x[0] = (float)gridSquare.left;
y[0] = (float)gridSquare.top;
x[1] = (float)(gridSquare.right);
y[1] = y[0];
x[2] = x[1];
y[2] = (float)(gridSquare.bottom);
x[3] = x[0];
y[3] = y[2];
// for each line segment
for(int i = 0; i < nV; i++)
{
// get end-points
p0x = pV[i].x;
p0y = pV[i].y;
p1x = pV[i + 1].x;
p1y = pV[i + 1].y;
// determine line extent
if(p0x > p1x){
min_x = p1x;
max_x = p0x;
} else {
min_x = p0x;
max_x = p1x;
}
if(p0y > p1y){
min_y = p1y;
max_y = p0y;
} else {
min_y = p0y;
max_y = p1y;
}
// test to see if grid square is outside of line segment extent
if( (max_x < gridSquare.left) ||
(min_x > gridSquare.right) ||
(max_y < gridSquare.top) ||
(min_y > gridSquare.bottom))
{
continue;
}
// calc. line equation
dx = (float)(p1x - p0x);
dy = (float)(p1y - p0y);
a = dy;
b = -dx;
c = -dy * (float)p0x + dx * (float)p0y;
// evaluate line eqn. at first grid square vertex
s = a * x[0] + b * y[0] + c;
if(s < 0.0f){
sign_old = -1;
} else if(s > 1.0f){
sign_old = 1;
} else {
sign_old = 0;
}
// evaluate line eqn. at other grid square vertices
for (int j = 1; j < 4; j++)
{
s = a * x[j] + b * y[j] + c;
if(s < 0.0f){
sign = -1;
} else if(s > 1.0f){
sign = 1;
} else {
sign = 0;
}
// if there has been a chnage in sign
if(sign != sign_old)
return TRUE;
}
}
return FALSE;
}
// calculate the square of the shortest distance from point s
// and the line segment between p0 and p1
// t is the point on the line from which the minimum distance
// is measured
float CalcShortestDistanceSqr(const CPoint &s,
const CPoint &p0,
const CPoint &p1,
CPoint &t)
{
// if point is at a vertex
if((s == p0) || (s == p1))
return(0.0F);
// calc. deltas
int dx = p1.x - p0.x;
int dy = p1.y - p0.y;
int dsx = s.x - p0.x;
int dsy = s.y - p0.y;
// if both deltas are zero
if((dx == 0) && (dy == 0))
{
// shortest distance is distance is to either vertex
float l = (float)(dsx * dsx + dsy * dsy);
t = p0;
return(l);
}
// calc. point, p, on line that is closest to sourcePosition
// p = p0 + l * (p1 - p0)
float l = (float)(dsx * dx + dsy * dy) / (float)(dx * dx + dy * dy);
// if intersection is beyond p0
if(l <= 0.0F){
// shortest distance is to p0
l = (float)(dsx * dsx + dsy * dsy);
t = p0;
// else if intersection is beyond p1
} else if(l >= 1.0F){
// shortest distance is to p1
dsx = s.x - p1.x;
dsy = s.y - p1.y;
l = (float)(dsx * dsx + dsy * dsy);
t = p1;
// if intersection is between line end points
} else {
// calc. perpendicular distance
float ldx = (float)dsx - l * (float)dx;
float ldy = (float)dsy - l * (float)dy;
t.x = p0.x + RoundValue(l * (float)dx);
t.y = p0.y + RoundValue(l * (float)dy);
l = ldx * ldx + ldy * ldy;
}
return(l);
}
// Calculates the bounding rectangle around a set of points
// Returns TRUE if the rectangle is not empty (has area), FALSE otherwise
// Opposite of CreateRectPoints()
BOOL CalcBoundingRectangle(const CPoint *pVertexList, const int nVertexTotal, CRect &rect)
{
rect.SetRectEmpty();
if(nVertexTotal < 2)
{
ASSERT(FALSE); // Must have at least 2 points
return FALSE;
}
// First point, set rectangle (no area at this point)
rect.left = rect.right = pVertexList[0].x;
rect.top = rect.bottom = pVertexList[0].y;
// Increst rectangle by looking at other points
for(int n = 1; n < nVertexTotal; n++)
{
if(rect.left > pVertexList[n].x) // Take minimum
rect.left = pVertexList[n].x;
if(rect.right < pVertexList[n].x) // Take maximum
rect.right = pVertexList[n].x;
if(rect.top > pVertexList[n].y) // Take minimum
rect.top = pVertexList[n].y;
if(rect.bottom < pVertexList[n].y) // Take maximum
rect.bottom = pVertexList[n].y;
}
rect.NormalizeRect(); // Normalise rectangle
return !(rect.IsRectEmpty());
}

Resources