The algorithm scale down height and width of image and make it look like a center_crop image. i want to get orginal image height and width.This code is from gpu image library. please help
private void adjustImageScaling() {
float outputWidth = mOutputWidth;
float outputHeight = mOutputHeight;
if (mRotation == Rotation.ROTATION_270 || mRotation == Rotation.ROTATION_90) {
outputWidth = mOutputHeight;
outputHeight = mOutputWidth;
}
float ratio1 = outputWidth / mImageWidth;
float ratio2 = outputHeight / mImageHeight;
float ratioMax = Math.max(ratio1, ratio2);
int imageWidthNew = Math.round(mImageWidth * ratioMax);
int imageHeightNew = Math.round(mImageHeight * ratioMax);
float ratioWidth = imageWidthNew / outputWidth;
float ratioHeight = imageHeightNew / outputHeight;
float[] cube = CUBE;
float[] textureCords = TextureRotationUtil.getRotation(mRotation, mFlipHorizontal, mFlipVertical);
if (mScaleType == GPUImage.ScaleType.CENTER_CROP) {
float distHorizontal = (1 - 1 / ratioWidth) / 2;
float distVertical = (1 - 1 / ratioHeight) / 2;
textureCords = new float[]{
addDistance(textureCords[0], distHorizontal), addDistance(textureCords[1], distVertical),
addDistance(textureCords[2], distHorizontal), addDistance(textureCords[3], distVertical),
addDistance(textureCords[4], distHorizontal), addDistance(textureCords[5], distVertical),
addDistance(textureCords[6], distHorizontal), addDistance(textureCords[7], distVertical),
};
} else {
cube = new float[]{
CUBE[0] / ratioHeight, CUBE[1] / ratioWidth,
CUBE[2] / ratioHeight, CUBE[3] / ratioWidth,
CUBE[4] / ratioHeight, CUBE[5] / ratioWidth,
CUBE[6] / ratioHeight, CUBE[7] / ratioWidth,
};
}
mGLCubeBuffer.clear();
mGLCubeBuffer.put(cube).position(0);
mGLTextureBuffer.clear();
mGLTextureBuffer.put(textureCords).position(0);
}
Related
my result , but the shadow is so hard.
for (SceneLight* light : scene->lights)
{
Vector3D dir_to_light;
float dist_to_light;
float pdf;
int num_light_samples = light->is_delta_light() ? 1 : ns_area_light;
double scale = 1.0 / num_light_samples;
for (int i = 0; i < num_light_samples; i++) {
Spectrum light_L = light->sample_L(hit_p, &dir_to_light, &dist_to_light, &pdf);
Vector3D w_in = w2o * dir_to_light;
double cos_theta = std::max(0.0, w_in[2]);
Spectrum f = isect.bsdf->f(w_out, w_in);
Ray shadow_ray(hit_p + EPS_D * dir_to_light, dir_to_light, dist_to_light - (EPS_D * dir_to_light).norm(), 0);
if (!bvh->intersect(shadow_ray))
{
L_out += (f * light_L * (cos_theta * scale / pdf));
}
}
}
**
abolve is my some code and render result. The shadow looks so hard.If i want to make the shadow softer, What can I do? I am writing path tracing.
Thanks.**
I am trying to combine Vuforia SDK and jMonkeyEngine. The cube is placed on the target (ImageTarget) so far. But when I move the camera the cube moves a little bit too. I want that the cube remains at the center of the target (like the teapot in VuforiaSamples ImageTarget). Do you have an idea how I can solve the problem?
I think this is the relevant code:
public void initForegroundCamera()
{
foregroundCamera = new Camera(settings.getWidth(), settings.getHeight());
foregroundCamera.setLocation(new Vector3f(0.0f, 0.0f, 0.0f));
// Get perspective transformation
CameraCalibration cameraCalibration = CameraDevice.getInstance().getCameraCalibration();
VideoBackgroundConfig config = Renderer.getInstance().getVideoBackgroundConfig();
float viewportWidth = config.getSize().getData()[0];
float viewportHeight = config.getSize().getData()[1];
float cameraWidth = cameraCalibration.getSize().getData()[0];
float cameraHeight = cameraCalibration.getSize().getData()[1];
float screenWidth = settings.getWidth();
float screenHeight = settings.getHeight();
Vec2F size = new Vec2F(cameraWidth, cameraHeight);
Vec2F focalLength = cameraCalibration.getFocalLength();
float fovRadians = 2 * (float) Math.atan(0.5f * (size.getData()[1] / focalLength.getData()[1]));
float fovDegrees = fovRadians * 180.0f / (float) Math.PI;
float aspectRatio = (size.getData()[0] / size.getData()[1]);
// Adjust for screen / camera size distortion
float viewportDistort = 1.0f;
if (viewportWidth != screenWidth)
{
viewportDistort = viewportWidth / screenWidth;
fovDegrees = fovDegrees * viewportDistort;
aspectRatio = aspectRatio / viewportDistort;
Log.v(TAG, "viewportDistort: " + viewportDistort + " fovDegreed: " + fovDegrees + " aspectRatio: " + aspectRatio);
}
if (viewportHeight != screenHeight)
{
viewportDistort = viewportHeight / screenHeight;
fovDegrees = fovDegrees / viewportDistort;
aspectRatio = aspectRatio * viewportDistort;
Log.v(TAG, "viewportDistort: " + viewportDistort + " fovDegreed: " + fovDegrees + " aspectRatio: " + aspectRatio);
}
setCameraPerspectiveFromVuforia(fovDegrees, aspectRatio);
setCameraViewportFromVuforia(viewportWidth, viewportHeight, cameraWidth, cameraHeight);
ViewPort foregroundViewPort = renderManager.createMainView("ForegroundView", foregroundCamera);
foregroundViewPort.attachScene(rootNode);
foregroundViewPort.setClearFlags(false, true, false);
foregroundViewPort.setBackgroundColor(ColorRGBA.Blue);
sceneInitialized = true;
}
private void ProcessTrackable(TrackableResult result, int i)
{
// Show the 3D object corresponding on the found trackable
Spatial model = rootNode.getChild(0);
model.setCullHint(CullHint.Dynamic);
Matrix44F modelViewMatrix_Vuforia = Tool.convertPose2GLMatrix(result.getPose());
Matrix44F inverseMatrix_Vuforia = MathHelpers.Matrix44FInverse(modelViewMatrix_Vuforia);
Matrix44F inverseTransposedMatrix_Vuforia = MathHelpers.Matrix44FTranspose(inverseMatrix_Vuforia);
float[] modelViewMatrix = inverseTransposedMatrix_Vuforia.getData();
// Get camera position
float cam_x = modelViewMatrix[12];
float cam_y = modelViewMatrix[13];
float cam_z = modelViewMatrix[14];
// Get camera rotation
float cam_right_x = modelViewMatrix[0];
float cam_right_y = modelViewMatrix[1];
float cam_right_z = modelViewMatrix[2];
float cam_up_x = modelViewMatrix[4];
float cam_up_y = modelViewMatrix[5];
float cam_up_z = modelViewMatrix[6];
float cam_dir_x = modelViewMatrix[8];
float cam_dir_y = modelViewMatrix[9];
float cam_dir_z = modelViewMatrix[10];
setCameraPoseFromVuforia(cam_x, cam_y, cam_z);
setCameraOrientationFromVuforia(cam_right_x, cam_right_y, cam_right_z, cam_up_x, cam_up_y, cam_up_z, cam_dir_x, cam_dir_y, cam_dir_z);
}
//we modify the left axis of the JME camera to match the coodindate system used by Vuforia
private void setCameraPerspectiveFromVuforia(float fovY, float aspectRatio)
{
foregroundCamera.setFrustumPerspective(fovY, aspectRatio, 1.0f, 1000.0f);
foregroundCamera.update();
}
private void setCameraPoseFromVuforia(float camX, float camY, float camZ)
{
foregroundCamera.setLocation(new Vector3f(camX, camY, camZ));
foregroundCamera.update();
}
private void setCameraOrientationFromVuforia(float camRightX, float camRightY, float camRightZ, float camUpX, float camUpY, float camUpZ, float camDirX, float camDirY, float camDirZ)
{
foregroundCamera.setAxes(new Vector3f(-camRightX, -camRightY, -camRightZ), new Vector3f(-camUpX, -camUpY, -camUpZ), new Vector3f( camDirX, camDirY, camDirZ));
foregroundCamera.update();
}
I have also implemented Vuforia with JMonkey Engine. I must admit that the shaky movement of 3D models is noticeable even when I hold my phone still, whereas using the OpenGl rendering engine with Vuforia alone doesn't produce such results.
Reason might be that what is being rendered on screen is Quad with the texture being output from the camera, and this also takes a while to change every frame. Moreover while running my app the phone gets very hot so I guess it is a heavy load for processor as well.
I'm builing a fractal application and need to generate a smooth color scheme, and I found a nice algorithm at Smooth spectrum for Mandelbrot Set rendering.
But that required me to call Color.HSBtoRGB and that method is not available in WinRT / Windows Store apps.
Is there some other built-in method to do this conversion?
Other tips on how to convert HSB to RGB?
I ended up using the HSB to RGB conversion algorithm found at http://www.adafruit.com/blog/2012/03/14/constant-brightness-hsb-to-rgb-algorithm/, I adopted the inital (long) version. Perhaps this can be further optimized but for my purpose this was perfect!
As the hsb2rgb method is in C and I needed C#, I'm sharing my version here:
private byte[] hsb2rgb(int index, byte sat, byte bright)
{
int r_temp, g_temp, b_temp;
byte index_mod;
byte inverse_sat = (byte)(sat ^ 255);
index = index % 768;
index_mod = (byte)(index % 256);
if (index < 256)
{
r_temp = index_mod ^ 255;
g_temp = index_mod;
b_temp = 0;
}
else if (index < 512)
{
r_temp = 0;
g_temp = index_mod ^ 255;
b_temp = index_mod;
}
else if ( index < 768)
{
r_temp = index_mod;
g_temp = 0;
b_temp = index_mod ^ 255;
}
else
{
r_temp = 0;
g_temp = 0;
b_temp = 0;
}
r_temp = ((r_temp * sat) / 255) + inverse_sat;
g_temp = ((g_temp * sat) / 255) + inverse_sat;
b_temp = ((b_temp * sat) / 255) + inverse_sat;
r_temp = (r_temp * bright) / 255;
g_temp = (g_temp * bright) / 255;
b_temp = (b_temp * bright) / 255;
byte[] color = new byte[3];
color[0] = (byte)r_temp;
color[1] = (byte)g_temp;
color[2] = (byte)b_temp;
return color;
}
To call it based on the code linked in the original post I needed to make some minor modifications:
private byte[] SmoothColors1(int maxIterationCount, ref Complex z, int iteration)
{
double smoothcolor = iteration + 1 - Math.Log(Math.Log(z.Magnitude)) / Math.Log(2);
byte[] color = hsb2rgb((int)(10 * smoothcolor), (byte)(255 * 0.6f), (byte)(255 * 1.0f));
if (iteration >= maxIterationCount)
{
// Make sure the core is black
color[0] = 0;
color[1] = 0;
color[2] = 0;
}
return color;
}
What I'm doing is testing to see what the level of intersection between a circle and rectangle. I would like to find whether the rectangle is completely inside the circle, partially intersecting it, or if there is no intersection at all.
I've attached the code that I've come up with today, it simply checks the distances from the center of the circle to the corners of the rectangle to determine the level of intersection.
What I'm wondering is there a more efficient way of doing this?
EDIT:
Here is my updated, working code. fullIntersect is my own, I found the partialIntersect snippet on Circle-Rectangle collision detection (intersection). I'm going to leave this open, as I'm still curious as to whether there is a better way of doing this.
public boolean fullIntersect(float circleX, float circleY, float radius)
{
float radsq = radius * radius;
double xsq = Math.pow(circleX - xPosition, 2);
double xpwsq = Math.pow(circleX - (xPosition + width), 2);
double ysq = Math.pow(circleY - yPosition, 2);
double yphsq = Math.pow(circleY - (yPosition + height), 2);
if(xsq + ysq > radsq || xsq + yphsq > radsq || xpwsq + yphsq > radsq || xpwsq + ysq > radsq)
return false;
return true;
/* this is what the one if statement does
double disBotLeft = xsq + ysq;
double disTopLeft = xsq + yphsq;
double disTopRight = xpwsq + yphsq;
double disBotRight = xpwsq + ysq;
if(disBotRight > radsq) return false;
if(disBotLeft > radsq) return false;
if(disTopLeft > radsq) return false;
if(disTopRight > radsq) return false;
return true;
*/
}
public int intersects(float circleX, float circleY, float radius)
{
if(!enabled) return 0;
double wo2 = width / 2.0d;
double ho2 = height / 2.0d;
double circleDistanceX = Math.abs(circleX - xPosition - wo2);
double circleDistanceY = Math.abs(circleY - yPosition - ho2);
if (circleDistanceX > (wo2 + radius)) { return 0; }
if (circleDistanceY > (ho2 + radius)) { return 0; }
if(fullIntersect(circleX, circleY, radius)) { return 2; }
if (circleDistanceX <= (wo2)) { return 1; }
if (circleDistanceY <= (ho2)) { return 1; }
double cornerDistance_sq = Math.pow(circleDistanceX - wo2,2) +
Math.pow(circleDistanceY - ho2,2);
return cornerDistance_sq <= (radius*radius) ? 1 : 0;
}
I think your code does not consider these intersections:
I'll delete this answer as soon as you enhance your code/question.
I need a good source for reading up on how to create a algorithm to take two polylines (a path comprised of many lines) and performing a union, subtraction, or intersection between them. This is tied to a custom API so I need to understand the underlying algorithm.
Plus any sources in a VB dialect would be doubly helpful.
This catalogue of implementations of intersection algorithms from the Stony Brook Algorithm Repository might be useful. The repository is managed by Steven Skiena,
author of a very well respected book on algorithms: The Algorithm Design Manual.
That's his own Amazon exec link by the way :)
Several routines for you here. Hope you find them useful :-)
// routine to calculate the square of either the shortest distance or largest distance
// from the CPoint to the intersection point of a ray fired at an angle flAngle
// radians at an array of line segments
// this routine returns TRUE if an intersection has been found in which case flD
// is valid and holds the square of the distance.
// and returns FALSE if no valid intersection was found
// If an intersection was found, then intersectionPoint is set to the point found
bool CalcIntersection(const CPoint &cPoint,
const float flAngle,
const int nVertexTotal,
const CPoint *pVertexList,
const BOOL bMin,
float &flD,
CPoint &intersectionPoint)
{
float d, dsx, dsy, dx, dy, lambda, mu, px, py;
int p0x, p0y, p1x, p1y;
// get source position
const float flSx = (float)cPoint.x;
const float flSy = -(float)cPoint.y;
// calc trig functions
const float flTan = tanf(flAngle);
const float flSin = sinf(flAngle);
const float flCos = cosf(flAngle);
const bool bUseSin = fabsf(flSin) > fabsf(flCos);
// initialise distance
flD = (bMin ? FLT_MAX : 0.0f);
// for each line segment in protective feature
for(int i = 0; i < nVertexTotal; i++)
{
// get coordinates of line (negate the y value so the y-axis is upwards)
p0x = pVertexList[i].x;
p0y = -pVertexList[i].y;
p1x = pVertexList[i + 1].x;
p1y = -pVertexList[i + 1].y;
// calc. deltas
dsx = (float)(cPoint.x - p0x);
dsy = (float)(-cPoint.y - p0y);
dx = (float)(p1x - p0x);
dy = (float)(p1y - p0y);
// calc. denominator
d = dy * flTan - dx;
// if line & ray are parallel
if(fabsf(d) < 1.0e-7f)
continue;
// calc. intersection point parameter
lambda = (dsy * flTan - dsx) / d;
// if intersection is not valid
if((lambda <= 0.0f) || (lambda > 1.0f))
continue;
// if sine is bigger than cosine
if(bUseSin){
mu = ((float)p0x + lambda * dx - flSx) / flSin;
} else {
mu = ((float)p0y + lambda * dy - flSy) / flCos;
}
// if intersection is valid
if(mu >= 0.0f){
// calc. intersection point
px = (float)p0x + lambda * dx;
py = (float)p0y + lambda * dy;
// calc. distance between intersection point & source point
dx = px - flSx;
dy = py - flSy;
d = dx * dx + dy * dy;
// compare with relevant value
if(bMin){
if(d < flD)
{
flD = d;
intersectionPoint.x = RoundValue(px);
intersectionPoint.y = -RoundValue(py);
}
} else {
if(d > flD)
{
flD = d;
intersectionPoint.x = RoundValue(px);
intersectionPoint.y = -RoundValue(py);
}
}
}
}
// return
return(bMin ? (flD != FLT_MAX) : (flD != 0.0f));
}
// Routine to calculate the square of the distance from the CPoint to the
// intersection point of a ray fired at an angle flAngle radians at a line.
// This routine returns TRUE if an intersection has been found in which case flD
// is valid and holds the square of the distance.
// Returns FALSE if no valid intersection was found.
// If an intersection was found, then intersectionPoint is set to the point found.
bool CalcIntersection(const CPoint &cPoint,
const float flAngle,
const CPoint &PointA,
const CPoint &PointB,
const bool bExtendLine,
float &flD,
CPoint &intersectionPoint)
{
// get source position
const float flSx = (float)cPoint.x;
const float flSy = -(float)cPoint.y;
// calc trig functions
float flTan = tanf(flAngle);
float flSin = sinf(flAngle);
float flCos = cosf(flAngle);
const bool bUseSin = fabsf(flSin) > fabsf(flCos);
// get coordinates of line (negate the y value so the y-axis is upwards)
const int p0x = PointA.x;
const int p0y = -PointA.y;
const int p1x = PointB.x;
const int p1y = -PointB.y;
// calc. deltas
const float dsx = (float)(cPoint.x - p0x);
const float dsy = (float)(-cPoint.y - p0y);
float dx = (float)(p1x - p0x);
float dy = (float)(p1y - p0y);
// Calc. denominator
const float d = dy * flTan - dx;
// If line & ray are parallel
if(fabsf(d) < 1.0e-7f)
return false;
// calc. intersection point parameter
const float lambda = (dsy * flTan - dsx) / d;
// If extending line to meet point, don't check for ray missing line
if(!bExtendLine)
{
// If intersection is not valid
if((lambda <= 0.0f) || (lambda > 1.0f))
return false; // Ray missed line
}
// If sine is bigger than cosine
float mu;
if(bUseSin){
mu = ((float)p0x + lambda * dx - flSx) / flSin;
} else {
mu = ((float)p0y + lambda * dy - flSy) / flCos;
}
// if intersection is valid
if(mu >= 0.0f)
{
// calc. intersection point
const float px = (float)p0x + lambda * dx;
const float py = (float)p0y + lambda * dy;
// calc. distance between intersection point & source point
dx = px - flSx;
dy = py - flSy;
flD = (dx * dx) + (dy * dy);
intersectionPoint.x = RoundValue(px);
intersectionPoint.y = -RoundValue(py);
return true;
}
return false;
}
// Fillet (with a radius of 0) two lines. From point source fired at angle (radians) to line Line1A, Line1B.
// Modifies line end point Line1B. If the ray does not intersect line, then it is rotates every 90 degrees
// and tried again until fillet is complete.
void Fillet(const CPoint &source, const float fThetaRadians, const CPoint &Line1A, CPoint &Line1B)
{
if(Line1A == Line1B)
return; // No line
float dist;
if(CalcIntersection(source, fThetaRadians, Line1A, Line1B, true, dist, Line1B))
return;
if(CalcIntersection(source, CalcBaseFloat(TWO_PI, fThetaRadians + PI * 0.5f), Line1A, Line1B, true, dist, Line1B))
return;
if(CalcIntersection(source, CalcBaseFloat(TWO_PI, fThetaRadians + PI), Line1A, Line1B, true, dist, Line1B))
return;
if(!CalcIntersection(source, CalcBaseFloat(TWO_PI, fThetaRadians + PI * 1.5f), Line1A, Line1B, true, dist, Line1B))
ASSERT(FALSE); // Could not find intersection?
}
// routine to determine if an array of line segments cross gridSquare
// x and y give the float coordinates of the corners
BOOL CrossGridSquare(int nV, const CPoint *pV,
const CRect &extent, const CRect &gridSquare)
{
// test extents
if( (extent.right < gridSquare.left) ||
(extent.left > gridSquare.right) ||
(extent.top > gridSquare.bottom) ||
(extent.bottom < gridSquare.top))
{
return FALSE;
}
float a, b, c, dx, dy, s, x[4], y[4];
int max_x, max_y, min_x, min_y, p0x, p0y, p1x, p1y, sign, sign_old;
// construct array of vertices for grid square
x[0] = (float)gridSquare.left;
y[0] = (float)gridSquare.top;
x[1] = (float)(gridSquare.right);
y[1] = y[0];
x[2] = x[1];
y[2] = (float)(gridSquare.bottom);
x[3] = x[0];
y[3] = y[2];
// for each line segment
for(int i = 0; i < nV; i++)
{
// get end-points
p0x = pV[i].x;
p0y = pV[i].y;
p1x = pV[i + 1].x;
p1y = pV[i + 1].y;
// determine line extent
if(p0x > p1x){
min_x = p1x;
max_x = p0x;
} else {
min_x = p0x;
max_x = p1x;
}
if(p0y > p1y){
min_y = p1y;
max_y = p0y;
} else {
min_y = p0y;
max_y = p1y;
}
// test to see if grid square is outside of line segment extent
if( (max_x < gridSquare.left) ||
(min_x > gridSquare.right) ||
(max_y < gridSquare.top) ||
(min_y > gridSquare.bottom))
{
continue;
}
// calc. line equation
dx = (float)(p1x - p0x);
dy = (float)(p1y - p0y);
a = dy;
b = -dx;
c = -dy * (float)p0x + dx * (float)p0y;
// evaluate line eqn. at first grid square vertex
s = a * x[0] + b * y[0] + c;
if(s < 0.0f){
sign_old = -1;
} else if(s > 1.0f){
sign_old = 1;
} else {
sign_old = 0;
}
// evaluate line eqn. at other grid square vertices
for (int j = 1; j < 4; j++)
{
s = a * x[j] + b * y[j] + c;
if(s < 0.0f){
sign = -1;
} else if(s > 1.0f){
sign = 1;
} else {
sign = 0;
}
// if there has been a chnage in sign
if(sign != sign_old)
return TRUE;
}
}
return FALSE;
}
// calculate the square of the shortest distance from point s
// and the line segment between p0 and p1
// t is the point on the line from which the minimum distance
// is measured
float CalcShortestDistanceSqr(const CPoint &s,
const CPoint &p0,
const CPoint &p1,
CPoint &t)
{
// if point is at a vertex
if((s == p0) || (s == p1))
return(0.0F);
// calc. deltas
int dx = p1.x - p0.x;
int dy = p1.y - p0.y;
int dsx = s.x - p0.x;
int dsy = s.y - p0.y;
// if both deltas are zero
if((dx == 0) && (dy == 0))
{
// shortest distance is distance is to either vertex
float l = (float)(dsx * dsx + dsy * dsy);
t = p0;
return(l);
}
// calc. point, p, on line that is closest to sourcePosition
// p = p0 + l * (p1 - p0)
float l = (float)(dsx * dx + dsy * dy) / (float)(dx * dx + dy * dy);
// if intersection is beyond p0
if(l <= 0.0F){
// shortest distance is to p0
l = (float)(dsx * dsx + dsy * dsy);
t = p0;
// else if intersection is beyond p1
} else if(l >= 1.0F){
// shortest distance is to p1
dsx = s.x - p1.x;
dsy = s.y - p1.y;
l = (float)(dsx * dsx + dsy * dsy);
t = p1;
// if intersection is between line end points
} else {
// calc. perpendicular distance
float ldx = (float)dsx - l * (float)dx;
float ldy = (float)dsy - l * (float)dy;
t.x = p0.x + RoundValue(l * (float)dx);
t.y = p0.y + RoundValue(l * (float)dy);
l = ldx * ldx + ldy * ldy;
}
return(l);
}
// Calculates the bounding rectangle around a set of points
// Returns TRUE if the rectangle is not empty (has area), FALSE otherwise
// Opposite of CreateRectPoints()
BOOL CalcBoundingRectangle(const CPoint *pVertexList, const int nVertexTotal, CRect &rect)
{
rect.SetRectEmpty();
if(nVertexTotal < 2)
{
ASSERT(FALSE); // Must have at least 2 points
return FALSE;
}
// First point, set rectangle (no area at this point)
rect.left = rect.right = pVertexList[0].x;
rect.top = rect.bottom = pVertexList[0].y;
// Increst rectangle by looking at other points
for(int n = 1; n < nVertexTotal; n++)
{
if(rect.left > pVertexList[n].x) // Take minimum
rect.left = pVertexList[n].x;
if(rect.right < pVertexList[n].x) // Take maximum
rect.right = pVertexList[n].x;
if(rect.top > pVertexList[n].y) // Take minimum
rect.top = pVertexList[n].y;
if(rect.bottom < pVertexList[n].y) // Take maximum
rect.bottom = pVertexList[n].y;
}
rect.NormalizeRect(); // Normalise rectangle
return !(rect.IsRectEmpty());
}