How do I get the vertexes of a box collider in world space in Unity3d? (with correct rotation and scale). Just doing local-world on the object does not work.
obj.transform.localToWorldMatrix.MultiplyPoint3x4(extents); and doing similar things for the other corners is what I tried.
Since you can easily get the BoxCollider's centre and extents you can get the local vertex position easily by adding or subtracting the extent from the centre, then transforming it into global world space with a method like Vector3.TransformPoint()
The following script prints the global position of each vertex of a BoxCollider attached to the same GameObject as the script, every Update() call.
using System.Collections;
public class DebugPrintColliderVertices : MonoBehaviour
{
const uint NUM_VERTICES = 8;
private BoxCollider boxCollider;
private Transform[] vertices;
void Awake()
{
boxCollider = (BoxCollider)this.gameObject.GetComponent(typeof(BoxCollider));
if (boxCollider == null)
{
Debug.Log ("Collider not found on " + this.gameObject.name + ". Ending script.");
this.enabled = false;
}
vertices = new Transform[NUM_VERTICES];
}
void Update()
{
Vector3 colliderCentre = boxCollider.center;
Vector3 colliderExtents = boxCollider.extents;
for (int i = 0; i != NUM_VERTICES ; ++i)
{
Vector3 extents = colliderExtents;
extents.Scale (new Vector3((i & 1) == 0 ? 1 : -1, (i & 2) == 0 ? 1 : -1, (i & 4) == 0 ? 1 : -1));
Vector3 vertexPosLocal = colliderCentre + extents;
Vector3 vertexPosGlobal = boxCollider.transform.TransformPoint(vertexPosLocal);
// display vector3 to six decimal places
Debug.Log ("Vertex " + i + " # " + vertexPosGlobal.ToString("F6"));
}
}
}
Note that the BoxCollider's size is multiplied by the transform.scale of the GameObject it's attached to. I haven't included that calculation in said script, but finding the overall rotation/scale should be fairly trivial from this point.
Related
im trying to get the most dominant color of an image (perfect case: getting the 5 dominant colors sorted by most used). Is there a way to make that in processing? I tried already a code i found but with that im only getting the average color:
color extractColorFromImage(PImage img)
{
img.loadPixels();
int r = 0, g = 0, b = 0;
for (int i=0; i<img.pixels.length; i++)
{
color c = img.pixels[i];
r += c>>16&0xFF;
g += c>>8&0xFF;
b += c&0xFF;
}
r /= img.pixels.length; g /= img.pixels.length; b /= img.pixels.length;
return color(r, g, b);
}
So its not really that what i need. I already read that i could do it with HSV, k-means and so on.... and any way to do it in processing?
Example: Here i want to get the color red as the dominant color, with the example above im getting a dark orange. Red-Blue Picture
What about this?
Set the image in a bitmap and analyze every pixel. Just add up the amount of times a pixel is in the image.
static Dictionary<Color, int> CalcImageColors(Bitmap image)
{
var frequency = new Dictionary<Color, int>();
for (var h = 0; h < image.Height; h++)
{
for (var w = 0; w < image.Width; w++)
{
var pixel = image.GetPixel(w, h);
if (frequency.ContainsKey(pixel))
{
frequency[pixel]++;
}
else
{
frequency.Add(pixel, 1);
}
}
}
return frequency.OrderByDescending(x => x.Value).ToDictionary(x => x.Key, x => x.Value);
}
The RGB colour space may not always work as you'd expect in terms of mixing/averaging colours. You should convert to a perceptual colour space like CIE LAB. To do that you need to first convert from RGB to CIE XYZ then from CIE XYZ to CIE RGB. For more info check out these pages on CIE XYZ and CIE LAB.
In terms of Processing, here's a prototype using RGB<>CIE XYZ<>CIE LAB colour conversion code from this answer (with small tweaks to compile in the Processing IDE (which is antsy about using the static keyword)):
void setup(){
PImage src = loadImage("http://i.stack.imgur.com/0H1OM.png");
size(src.width*4,src.height);
noStroke();
//display original image
image(src,0,0);
//display RGB average color
fill(extractColorFromImage(src));
rect(src.width,0,src.width,src.height);
//display (perceptual)Lab average color
fill(extractAverageColorFromImage(src));
rect(src.width*2,0,src.width,src.height);
//display the most dominant colour
fill(extractDominantColorFromImage(src));
rect(src.width*3,0,src.width,src.height);
}
color extractDominantColorFromImage(PImage img){
//create a hashmap - the key is the colour, the value associated is the number of pixels per colour
HashMap<Integer,Integer> colorCounter = new HashMap<Integer,Integer>();
int numPixels = img.pixels.length;
for (int i=0; i<numPixels; i++){
int colorKey = img.pixels[i];
//if the colour has already been added to the hashmap, increment the count
if(colorCounter.containsKey(colorKey)){
colorCounter.put(colorKey,colorCounter.get(colorKey)+1);
}else{//otherwise count it as 1
colorCounter.put(colorKey,1);
}
}
//find the most dominant colour - note you can implement this to return more than one if you need to
int max = 0;//what's the highest density of pixels per one colour
int dominantColor = 0;//which colour is it
//for each key (colour) in the keyset
for(int colorKey : colorCounter.keySet()){
//get the pixel count per colour
int count = colorCounter.get(colorKey);
//if this one is the highest, updated the max value and keep track of the colour
if(count > max){
max = count;
dominantColor = colorKey;
}
}
//return the winner (colour with most pixels associated)
return dominantColor;
}
color extractColorFromImage(PImage img)
{
img.loadPixels();
int r = 0, g = 0, b = 0;
for (int i=0; i<img.pixels.length; i++)
{
color c = img.pixels[i];
r += c>>16&0xFF;
g += c>>8&0xFF;
b += c&0xFF;
}
r /= img.pixels.length; g /= img.pixels.length; b /= img.pixels.length;
return color(r, g, b);
}
color extractAverageColorFromImage(PImage img){
float[] average = new float[3];
CIELab lab = new CIELab();
int numPixels = img.pixels.length;
for (int i=0; i<numPixels; i++){
color rgb = img.pixels[i];
float[] labValues = lab.fromRGB(new float[]{red(rgb),green(rgb),blue(rgb)});
average[0] += labValues[0];
average[1] += labValues[1];
average[2] += labValues[2];
}
average[0] /= numPixels;
average[1] /= numPixels;
average[2] /= numPixels;
float[] rgb = lab.toRGB(average);
return color(rgb[0] * 255,rgb[1] * 255,rgb[2] * 255);
}
//from https://stackoverflow.com/questions/4593469/java-how-to-convert-rgb-color-to-cie-lab
import java.awt.color.ColorSpace;
public class CIELab extends ColorSpace {
#Override
public float[] fromCIEXYZ(float[] colorvalue) {
double l = f(colorvalue[1]);
double L = 116.0 * l - 16.0;
double a = 500.0 * (f(colorvalue[0]) - l);
double b = 200.0 * (l - f(colorvalue[2]));
return new float[] {(float) L, (float) a, (float) b};
}
#Override
public float[] fromRGB(float[] rgbvalue) {
float[] xyz = CIEXYZ.fromRGB(rgbvalue);
return fromCIEXYZ(xyz);
}
#Override
public float getMaxValue(int component) {
return 128f;
}
#Override
public float getMinValue(int component) {
return (component == 0)? 0f: -128f;
}
#Override
public String getName(int idx) {
return String.valueOf("Lab".charAt(idx));
}
#Override
public float[] toCIEXYZ(float[] colorvalue) {
double i = (colorvalue[0] + 16.0) * (1.0 / 116.0);
double X = fInv(i + colorvalue[1] * (1.0 / 500.0));
double Y = fInv(i);
double Z = fInv(i - colorvalue[2] * (1.0 / 200.0));
return new float[] {(float) X, (float) Y, (float) Z};
}
#Override
public float[] toRGB(float[] colorvalue) {
float[] xyz = toCIEXYZ(colorvalue);
return CIEXYZ.toRGB(xyz);
}
CIELab() {
super(ColorSpace.TYPE_Lab, 3);
}
private double f(double x) {
if (x > 216.0 / 24389.0) {
return Math.cbrt(x);
} else {
return (841.0 / 108.0) * x + N;
}
}
private double fInv(double x) {
if (x > 6.0 / 29.0) {
return x*x*x;
} else {
return (108.0 / 841.0) * (x - N);
}
}
// private Object readResolve() {
// return getInstance();
// }
// private static class Holder {
// static final CIELab INSTANCE = new CIELab();
// }
// private static final long serialVersionUID = 5027741380892134289L;
private final ColorSpace CIEXYZ =
ColorSpace.getInstance(ColorSpace.CS_CIEXYZ);
private final double N = 4.0 / 29.0;
}
You can see a preview bellow with the original image, then (in this order):
the RGB average
the LAB average
the most dominant colour
Break your problem down into smaller steps.
Step 1: Can you iterate over the pixels in the image? Check out the get() function to help with that. Or you can use the for loop in your code. But first, try just printing out the RGB value of each cell.
Step 2: When you have that working, try keeping track of the count of each color you see. The way you do this depends on exactly what you want to do: should (255, 0, 0) and (200, 0, 0) both count as red? But one way might be to use a HashMap<color, Integer> that keeps track of the count of each color.
Step 3: Given the counts of each color, now you can output the dominant color. How you do this depends on the data structure you used in step 2.
If you get stuck on a specific step, post an MCVE and we'll go from there. Good luck!
You might want to look at this tutorial on finding dominant colors in an image. - it's a more mathematical take on the problem. The idea is to use statistics on the image to figure out the main colors. Source code is available for OpenCV - so it should be possible to adapt it to use for Processing!
For context: I am going to analyze the breathing movement of parents during kangaroo mother care and I wish to respect their privacy by not recording them, but only the movement of stickers I placed on their chest and stomach.
So far, I'm able to track 2 colours based on webcam input through the code below. However, I would like to record only the tracked colours instead of the webcam feed as to preserve the privacy of the parent.
Does anybody know how to add a background colour, whilst still being able to track colour?
import processing.video.*;
Capture video;
final int TOLERANCE = 20;
float XRc = 0;// XY coordinate of the center of the first target
float YRc = 0;
float XRh = 0;// XY coordinate of the center of the second target
float YRh = 0;
int ii=0; //Mouse click counter
color trackColor; //The first color is the center of the robot
color trackColor2; //The second color is the head of the robot
void setup() {
size(640,480);
video = new Capture(this,640,480);
video.start();
trackColor = color(255,0,0);
trackColor2 = color(255,0,0);
smooth();
}
void draw() {
background(0);
if (video.available()) {
video.read();
}
video.loadPixels();
image(video,0,0);
float r2 = red(trackColor);
float g2 = green(trackColor);
float b2 = blue(trackColor);
float r3 = red(trackColor2);
float g3 = green(trackColor2);
float b3 = blue(trackColor2);
int somme_x = 0, somme_y = 0;
int compteur = 0;
int somme_x2 = 0, somme_y2 = 0;
int compteur2 = 0;
for(int x = 0; x < video.width; x++) {
for(int y = 0; y < video.height; y++) {
int currentLoc = x + y*video.width;
color currentColor = video.pixels[currentLoc];
float r1 = red(currentColor);
float g1 = green(currentColor);
float b1 = blue(currentColor);
if(dist(r1,g1,b1,r2,g2,b2) < TOLERANCE) {
somme_x += x;
somme_y += y;
compteur++;
}
else if(compteur > 0) {
XRc = somme_x / compteur;
YRc = somme_y / compteur;
}
if(dist(r1,g1,b1,r3,g3,b3) < TOLERANCE) {
somme_x2 += x;
somme_y2 += y;
compteur2++;
}
else if(compteur2 > 0) {
XRh = somme_x2 / compteur2;
YRh = somme_y2 / compteur2;
}
}
}
if(XRc != 0 || YRc != 0) { // Draw a circle at the first target
fill(trackColor);
strokeWeight(0.05);
stroke(0);
ellipse(XRc,YRc,20,20);
}
if(XRh != 0 || YRh != 0) {// Draw a circle at the second target
fill(trackColor2);
strokeWeight(0.05);
stroke(0);
ellipse(XRh,YRh,20,20);
}
}
void mousePressed() {
if (mousePressed && (mouseButton == RIGHT)) { // Save color where the mouse is clicked in trackColor variable
if(ii==0){
if (mouseY>480){mouseY=0;mouseX=0;}
int loc = mouseX + mouseY*video.width;
trackColor = video.pixels[loc];
ii=1;
}
else if(ii==1){
if (mouseY>480){mouseY=0;mouseX=0;}
int loc2 = mouseX + mouseY*video.width;
trackColor2 = video.pixels[loc2];
ii=2;
}
}
}
Try adding the background(0); right before you draw the first circle. It should cover the video and you can draw the circles on top of it.
Regards
Jose
I'm a newbie in kinect programming, i am working on a ball tracking using kinect and opencv..we all know that kinect provides Depth data, and with the code below:
DepthImagePoint righthandDepthPoint =
sensor.CoordinateMapper.MapSkeletonPointToDepthPoint
(
me.Joints[JointType.HandRight].Position,
DepthImageFormat.Resolution640x480Fps30
);
double rightdepthmeters = (righthandDepthPoint.Depth);
using this, I am able to get the depth of a right hand, using the function MapSkeletonPointToDepthPoint() by specifing the jointtype..
Is it possible to get the depth of other objects by specifying in the image where?
given the coordinate..I want to get the depth of the object in that coordinate?
Pulling depth data from the Kinect SDK can be extracted from the DepthImagePixel structure.
The example code below loops through the entire DepthImageFrame to examine each of the pixels. If you have a specific coordinate you wish to look at, remove the for loop and set the x and y to a specific value.
// global variables
private const DepthImageFormat DepthFormat = DepthImageFormat.Resolution320x240Fps30;
private const ColorImageFormat ColorFormat = ColorImageFormat.RgbResolution640x480Fps30;
private DepthImagePixel[] depthPixels;
// defined in an initialization function
this.depthWidth = this.sensor.DepthStream.FrameWidth;
this.depthHeight = this.sensor.DepthStream.FrameHeight;
this.depthPixels = new DepthImagePixel[this.sensor.DepthStream.FramePixelDataLength];
private void SensorAllFramesReady(object sender, AllFramesReadyEventArgs e)
{
if (null == this.sensor)
return;
bool depthReceived = false;
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
{
if (null != depthFrame)
{
// Copy the pixel data from the image to a temporary array
depthFrame.CopyDepthImagePixelDataTo(this.depthPixels);
depthReceived = true;
}
}
if (true == depthReceived)
{
// loop over each row and column of the depth
for (int y = 0; y < this.depthHeight; ++y)
{
for (int x = 0; x < this.depthWidth; ++x)
{
// calculate index into depth array
int depthIndex = x + (y * this.depthWidth);
// extract the given index
DepthImagePixel depthPixel = this.depthPixels[depthIndex];
Debug.WriteLine("Depth at [" + x + ", " + y + "] is: " + depthPixel.Depth);
}
}
}
}
I'm learning DirectX, using the book "Sherrod A., Jones W. - Beginning DirectX 11 Game Programming - 2011" Now I'm exploring the 4th chapter about drawing text.
Please, help we to fix my function, that I'm using to draw a string on the screen. I've already loaded font texture and in the function I create some sprites with letters and define texture coordinates for them. This compiles correctly, but doesn't draw anything. What's wrong?
bool DirectXSpriteGame :: DrawString(char* StringToDraw, float StartX, float StartY)
{
//VAR
HRESULT D3DResult; //The result of D3D functions
int i; //Counters
const int IndexA = static_cast<char>('A'); //ASCII index of letter A
const int IndexZ = static_cast<char>('Z'); //ASCII index of letter Z
int StringLenth = strlen(StringToDraw); //Lenth of drawing string
float ScreenCharWidth = static_cast<float>(LETTER_WIDTH) / static_cast<float>(SCREEN_WIDTH); //Width of the single char on the screen(in %)
float ScreenCharHeight = static_cast<float>(LETTER_HEIGHT) / static_cast<float>(SCREEN_HEIGHT); //Height of the single char on the screen(in %)
float TexelCharWidth = 1.0f / static_cast<float>(LETTERS_NUM); //Width of the char texel(in the texture %)
float ThisStartX; //The start x of the current letter, drawingh
float ThisStartY; //The start y of the current letter, drawingh
float ThisEndX; //The end x of the current letter, drawing
float ThisEndY; //The end y of the current letter, drawing
int LetterNum; //Letter number in the loaded font
int ThisLetter; //The current letter
D3D11_MAPPED_SUBRESOURCE MapResource; //Map resource
VertexPos* ThisSprite; //Vertecies of the current sprite, drawing
//VAR
//Clamping string, if too long
if(StringLenth > LETTERS_NUM)
{
StringLenth = LETTERS_NUM;
}
//Mapping resource
D3DResult = _DeviceContext -> Map(_vertexBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &MapResource);
if(FAILED(D3DResult))
{
throw("Failed to map resource");
}
ThisSprite = (VertexPos*)MapResource.pData;
for(i = 0; i < StringLenth; i++)
{
//Creating geometry for the letter sprite
ThisStartX = StartX + ScreenCharWidth * static_cast<float>(i);
ThisStartY = StartY;
ThisEndX = ThisStartX + ScreenCharWidth;
ThisEndY = StartY + ScreenCharHeight;
ThisSprite[0].Position = XMFLOAT3(ThisEndX, ThisEndY, 1.0f);
ThisSprite[1].Position = XMFLOAT3(ThisEndX, ThisStartY, 1.0f);
ThisSprite[2].Position = XMFLOAT3(ThisStartX, ThisStartY, 1.0f);
ThisSprite[3].Position = XMFLOAT3(ThisStartX, ThisStartY, 1.0f);
ThisSprite[4].Position = XMFLOAT3(ThisStartX, ThisEndY, 1.0f);
ThisSprite[5].Position = XMFLOAT3(ThisEndX, ThisEndY, 1.0f);
ThisLetter = static_cast<char>(StringToDraw[i]);
//Defining the letter place(number) in the font
if(ThisLetter < IndexA || ThisLetter > IndexZ)
{
//Invalid character, the last character in the font, loaded
LetterNum = IndexZ - IndexA + 1;
}
else
{
LetterNum = ThisLetter - IndexA;
}
//Unwraping texture on the geometry
ThisStartX = TexelCharWidth * static_cast<float>(LetterNum);
ThisStartY = 0.0f;
ThisEndY = 1.0f;
ThisEndX = ThisStartX + TexelCharWidth;
ThisSprite[0].TextureCoords = XMFLOAT2(ThisEndX, ThisEndY);
ThisSprite[1].TextureCoords = XMFLOAT2(ThisEndX, ThisStartY);
ThisSprite[2].TextureCoords = XMFLOAT2(ThisStartX, ThisStartY);
ThisSprite[3].TextureCoords = XMFLOAT2(ThisStartX, ThisStartY);
ThisSprite[4].TextureCoords = XMFLOAT2(ThisStartX, ThisEndY);
ThisSprite[5].TextureCoords = XMFLOAT2(ThisEndX, ThisEndY);
ThisSprite += VERTEX_IN_RECT_NUM;
}
for(i = 0; i < StringLenth; i++, ThisSprite -= VERTEX_IN_RECT_NUM);
_DeviceContext -> Unmap(_vertexBuffer, 0);
_DeviceContext -> Draw(VERTEX_IN_RECT_NUM * StringLenth, 0);
return true;
}
Although the piece of code constructing the Vertex Array seems correct to me at first glance, it seems like you are trying to Draw your vertices with a Shader which has not been set yet !
It is difficult to precisely answer you without looking at the whole code, but I can guess that you will need to do something like that :
1) Create Vertex and Pixel Shaders by compiling them first from their respective buffers
2) Create the Input Layout description, which describes the Input Buffers that will be read by the Input Assembler stage. It will have to match your VertexPos structure and your shader structure.
3) Set the Shader parameters.
4) Only now you can Set Shader rendering parameters : Set the InputLayout, as well as the Vertex and Pixel Shaders that will be used to render your triangles by something like :
_DeviceContext -> Unmap(_vertexBuffer, 0);
_DeviceContext->IASetInputLayout(myInputLayout);
_DeviceContext->VSSetShader(myVertexShader, NULL, 0); // Set Vertex shader
_DeviceContext->PSSetShader(myPixelShader, NULL, 0); // Set Pixel shader
_DeviceContext -> Draw(VERTEX_IN_RECT_NUM * StringLenth, 0);
This link should help you achieve what you want to do : http://www.rastertek.com/dx11tut12.html
Also, I recommend you to set an IndexBuffer and to use the method DrawIndexed to render your triangles for performance reasons : It will allow the graphics adapter to store vertices in a vertex cache, allowing recently-used vertex to be fetched from the cache instead of reading it from the vertex buffer.
More about this concern can be found on MSDN : http://msdn.microsoft.com/en-us/library/windows/desktop/bb147325(v=vs.85).aspx
Hope this helps!
P.S : Also, don't forget to release the resources after using them by calling Release().
I'm trying to create a scrollView, but I don't know how to determine the maximum range of the scroll (or even better, the maximum scroll position).
This is what I have tried to do without any positive results:
void Update()
{
//get the amount of space that my text is taking
textSize = gs.CalcHeight(new GUIContent(text), (screenWidth/2));
if(Input.touchCount > 0 && Input.GetTouch(0).phase == TouchPhase.Moved && Input.GetTouch(0).tapCount==1)
{
var touch = Input.touches[0];
//check if touch is within the scrollView area
if(touch.position.x >= (screenWidth/2) && touch.position.x <= (screenWidth) && touch.position.y >= 0 && touch.position.y <= (screenHeight))
{
if(touch.phase == TouchPhase.Moved)
{
scrollPosition[1] += touch.deltaPosition.y;
if(scrollPosition[1] < 0)
{
scrollPosition[1] = 0;
}
//I subtracted this cause i read that the scrollbars take 16px
if(scrollPosition[1] > (textSize-scrollViewRect.yMax-16)) //scrollViewRect is a Rect with the size of my scrollView
{
scrollPosition[1] = textSize-scrollViewRect.yMax-16;
}
}
}
}
}
void OnGUI()
{
screenWidth = camera.pixelWidth;
screenHeight = camera.pixelHeight;
GUILayout.BeginArea(new Rect(screenWidth/2, 0, screenWidth/2, screenHeight));
GUILayout.BeginScrollView(scrollPosition/*, GUILayout.Width (screenWidth/2), GUILayout.Height (screenHeight/2)*/, "box");
GUILayout.Label (new GUIContent(text), gs);
// End the scrollview
GUILayout.EndScrollView ();
scrollViewRect = GUILayoutUtility.GetLastRect();
GUILayout.EndArea();
}
This was done assuming that the scrollView is on the right half of the screen.
Can you guys help me out on this?
Thanks in advance.
As shown in the API documentation for GUI.BeginScrollView, you can assign a Vector2 to the declaration of BeginScrollView to track and update where the box has been scrolled to.
I'll give an example:
using UnityEngine;
using System.Collections;
public class ScrollSize : MonoBehaviour {
private int screenBoxX = Screen.width;
private int screenBoxY = Screen.height;
private int scrollBoxX = Screen.width * 2;
private int scrollBoxY = Screen.height * 3;
public Vector2 scrollPosition = Vector2.zero;
void Awake()
{
Debug.Log ("box size is " + scrollBoxX + " " + scrollBoxY);
}
void OnGUI()
{
scrollPosition = GUI.BeginScrollView(new Rect(0, 0, screenBoxX, screenBoxY),
scrollPosition, new Rect(0, 0, scrollBoxX, scrollBoxY));
GUI.EndScrollView();
// showing 4 decimal places
Debug.Log ("Box scrolled # " + scrollPosition.ToString("F4"));
}
}
For the explanation of this example, let's assume that the screen resolution is 800 by 600.
Therefore the viewable scrollbox on the screen will be 800 wide and 600 high, with the internal area of the scrollbox being 1600 by 1800.
We create the scrollbox by assigning Gui.BeginScrollView() as the value of Vector2, scrollPosition. As the scrollbox is scrolled horizontally and vertically, the Vector2 will update with the scrolled value.
If the scrollbox is scrolled to the top-leftmost position, the value of scrollPosition will be 0,0.
If the scrollbox is scrolled to the bottom-rightmost position, the value of scrollPosition will be 816, 616, the size of the box on screen plus the thickness of the scroll bars.
I know this is 3 years later but maybe it will help others too...
I found that setting the scrollPosition to a very high number will force the scrollbar to the bottom:
scrollPosition.y = 999.9f;
Then you can read the position like this:
scrollPosition = GUILayout.BeginScrollView(scrollPosition,
GUILayout.Width(370),
GUILayout.Height(135));
And it will be set to whatever the maximum is.
Hope this helps!