how to extract the detected region in opencv? - visual-c++

Everyone. I have shown below my code for tracking objects and it shows background subtraction result also. Here I am using frame differencing method. now my problem is that I have to extract that moving object from the color video file. I have done segmentation. But for detection I want to extract the region on which I have drawn bounding box. So can anybody help me in this...please. thank you in advance.
int main(int argc, char* argv[])
{
CvSize imgSize;
//CvCapture *capture = cvCaptureFromFile("S:\\offline object detection database\\video1.avi");
CvCapture *capture = cvCaptureFromFile("S:\\offline object detection database\\SINGLE PERSON Database\\Walk1.avi");
if(!capture){
printf("Capture failure\n");
return -1;
}
IplImage* frame=0;
frame = cvQueryFrame(capture);
if(!frame)
return -1;
imgSize = cvGetSize(frame);
IplImage* greyImage = cvCreateImage( imgSize, IPL_DEPTH_8U, 1);
IplImage* colourImage;
IplImage* movingAverage = cvCreateImage( imgSize, IPL_DEPTH_32F, 3);
IplImage* difference;
IplImage* temp;
IplImage* motionHistory = cvCreateImage( imgSize, IPL_DEPTH_8U, 3);
CvRect bndRect = cvRect(0,0,0,0);
CvPoint pt1, pt2;
CvFont font;
int prevX = 0;
int numPeople = 0;
char wow[65];
int avgX = 0;
bool first = true;
int closestToLeft = 0;
int closestToRight = 320;
for(;;)
{
colourImage = cvQueryFrame(capture);
if( !colourImage )
{
break;
}
if(first)
{
difference = cvCloneImage(colourImage);
temp = cvCloneImage(colourImage);
cvConvertScale(colourImage, movingAverage, 1.0, 0.0);
first = false;
}
else
{
cvRunningAvg(colourImage, movingAverage, 0.020, NULL);
}
cvConvertScale(movingAverage,temp, 1.0, 0.0);
cvAbsDiff(colourImage,temp,difference);
cvCvtColor(difference,greyImage,CV_RGB2GRAY);
cvThreshold(greyImage, greyImage, 80, 250, CV_THRESH_BINARY);
cvSmooth(greyImage, greyImage,2);
cvDilate(greyImage, greyImage, 0, 1);
cvErode(greyImage, greyImage, 0, 1);
cvShowImage("back", greyImage);
CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq* contour = 0;
cvFindContours( greyImage, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
for( ; contour != 0; contour = contour->h_next )
{
bndRect = cvBoundingRect(contour, 0);
pt1.x = bndRect.x;
pt1.y = bndRect.y;
pt2.x = bndRect.x + bndRect.width;
pt2.y = bndRect.y + bndRect.height;
avgX = (pt1.x + pt2.x) / 2;
if(avgX > 90 && avgX < 250)
{
if(closestToLeft >= 88 && closestToLeft <= 90)
{
if(avgX > prevX)
{
numPeople++;
closestToLeft = 0;
}
}
else if(closestToRight >= 250 && closestToRight <= 252)
{
if(avgX < prevX)
{
numPeople++;
closestToRight = 220;
}
}
cvRectangle(colourImage, pt1, pt2, CV_RGB(255,0,0), 1);
}
if(avgX > closestToLeft && avgX <= 90)
{
closestToLeft = avgX;
}
if(avgX < closestToRight && avgX >= 250)
{
closestToRight = avgX;
}
prevX = avgX;
}
cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.8, 0.8, 0, 2);
cvPutText(colourImage, _itoa(numPeople, wow, 10), cvPoint(60, 200), &font, cvScalar(0, 0, 300));
cvShowImage("My Window", colourImage);
cvShowImage("fore", greyImage);
cvWaitKey(10);
}
cvReleaseImage(&temp);
cvReleaseImage(&difference);
cvReleaseImage(&greyImage);
cvReleaseImage(&movingAverage);
cvDestroyWindow("My Window");
cvReleaseCapture(&capture);
return 0;
}

In OpenCV's legacy C API, you can extract a region of interest from an image with this command. In your code you would simply add this line, and the image would be treated as if it contained only the extracted region, pretty much:
cvSetImageROI(colourImage, bndrect);
In the OpenCV 2.0 API, your old image and "extracted region" image would be stored in separate Mat objects, but point to the same data:
Mat colourImage, extractedregion;
colourImage = imread("test.bmp");
extractedregion = colourImage(bndRect); // Creates only a header, no new image data
Many helpful OpenCV tutorials use the legacy API, but you should privilege the new one.

I know how to do it with the new OpenCV interface, rather than the "legacy" interface you are using.
It would be like this:
cv::Mat frame_m(frame);
...
cv::Mat region_m = frame_m(cv::Rect(bndRect));
IplImage region = region_m; // use &iplimg when an IplImage* is needed.
If you don't want to mix interfaces, it is time to learn the new one.

Related

How to switch between 'scenes' using switch statement

so I'm creating an interactive story, and I want to switch between scenes by clicking buttons (like a standard old pokemon game except there isn't movement, just clicking).
I should probably use a switch statement right? But I don't know how to implement the changing of scenes and loading scenes into the cases. I also don't know if I would need to use 'if' statements within the cases.
This might be confusing, because it is very confusing to me. I'd appreciate any help you guys offer! I am desperate ;)
Code so far is below:
//PImages
PImage startScreen;
PImage[] waves = new PImage[3];
PImage[] scenes = new PImage[1];
int switchVariable;
//Objects
Button play;
void setup() {
size(750, 600);
background(#A3E9EA);
//Initialising Objects
play = new Button(50, 480, 330);
//loading wave images
waves[0] = loadImage("wave1.png");
waves[1] = loadImage("wave2.png");
waves[2] = loadImage("wave3.png");
//loading start image
startScreen = loadImage("start-screen.png");
//loading scenes
scenes[0] = loadImage("scene-one.png");
//setting frame rate
frameRate(6);
}
void draw() {
background(#A3E9EA);
frameCount++;
println (frameCount);
//drawing wave animation
if (frameCount < 5) {
image(waves[0], 0, 0);
}
else {
image(waves[1], 0, 0);
}
if (frameCount < 15 & frameCount > 10) {
background(#A3E9EA);
image(waves[2], 0, 0);
frameCount = 0;
}
//drawing start screen
image(startScreen, 0, 0);
//displaying play button
if (play.visible) play.buttonDisplay();
}
void mousePressed() {
if (play.visible) {
float d = dist(play.x+110, play.y+22, mouseX, mouseY);
if (d <= play.radius){
background(#A3E9EA);
image(scenes[0], 0, 0);
}
}
}
Button Class:
class Button {
float radius;
float x;
float y;
PImage[] buttonImage = new PImage[2];
boolean visible;
Button(float _radius, float _x, float _y) {
radius = _radius;
visible = true;
x = _x;
y = _y;
buttonImage[0] = loadImage("play-game-button.png");
}
void buttonDisplay() {
image(buttonImage[0], x, y);
}
}
The source code below shows one possible approach to your question. It does not rely on ‘switch’ but instead uses a custom control with up and down buttons similar to a Java stepper. The stepper control value corresponds with images in the scenes array and the background() is set accordingly. A separate button class is required and code for this follows the demo.
color BLUE = color(64, 124, 188);
color LTGRAY = color(185, 180, 180);
color YELLOW = color(245, 250, 13);
color RED = color(255, 0, 0);
color BLACK = color(0, 0, 0);
color WHITE = color(255, 255, 255);
color GREEN = color(0, 255, 0);
color ORANGE = color(247, 168, 7);
PFont font;
PImage[] scenes = new PImage[6];
// **** Up/Down Buttons init, min, max values **** //
final int _initValue = 2;
final int _maxValue = 5;
final int _minValue = 0;
int stepperValue = 0;
Button _up;
Button _dwn;
Button _quit;
final int _displayX = 160;
final int _displayY = 30;
final int _displayW = 100;
final int _displayH = 24;
final int _txtSize = 18;
void stepperValueDisplay(int value) {
fill(WHITE); // background color
noStroke();
rect(_displayX, _displayY, _displayW, _displayH, 0);
fill(BLACK); // text color
textSize(_txtSize);
textAlign(CENTER);
String str = String.format("Scene %d", value);
text(str, _displayX, _displayY, _displayW, _displayH);
}
void scene_0(){
background(RED);
save("scene0.png");
}
void scene_1(){
background(GREEN);
save("scene1.png");
}
void scene_2(){
background(BLACK);
save("scene2.png");
}
void scene_3(){
background(YELLOW);
save("scene3.png");
}
void scene_4(){
background(LTGRAY);
save("scene4.png");
}
void scene_5(){
background(ORANGE);
save("scene5.png");
}
void setup() {
size(600, 600);
background(BLUE);
font = createFont("Menlo-Bold", 20);
_dwn = new Button( _displayX - 40, _displayY, 40, 24, "--", LTGRAY, BLACK);
_up = new Button( _displayX + _displayW, _displayY, 40, 24, "++", LTGRAY, BLACK);
stepperValue = _initValue;
_quit = new Button(width - 60, 20, 30, 24, "Q", LTGRAY, BLACK);
scene_0();
scene_1();
scene_2();
scene_3();
scene_4();
scene_5();
scenes[0] = loadImage("scene0.png");
scenes[1] = loadImage("scene1.png");
scenes[2] = loadImage("scene2.png");
scenes[3] = loadImage("scene3.png");
scenes[4] = loadImage("scene4.png");
scenes[5] = loadImage("scene5.png");
}
void draw() {
background(scenes[stepperValue]);
_up.display();
_dwn.display();
_quit.display();
stepperValueDisplay(stepperValue);
}
void mousePressed() {
if (mouseX > _quit.x && mouseX < _quit.x + _quit.w && mouseY > _quit.y && mouseY < _quit.y + _quit.h) {
exit();
}
if (mouseX > _up.x && mouseX < _up.x + _up.w && mouseY > _up.y && mouseY < _up.y + _up.h) {
stepperValue++;
if (stepperValue > _maxValue) {
stepperValue = _maxValue;
}
stepperValueDisplay(stepperValue);
}
if (mouseX > _dwn.x && mouseX < _dwn.x + _dwn.w && mouseY > _dwn.y && mouseY < _dwn.y + _dwn.h) {
stepperValue--;
if (stepperValue < _minValue) {
stepperValue = _minValue;
}
stepperValueDisplay(stepperValue);
}
}
Button Class:
int _btnTxtSize = 18;
class Button {
float x, y, w, h;
String title;
color bkgrndColor;
color txtColor;
// Constructor
Button(int xpos, int ypos, float wt, float ht, String titleStr, color background, color textColor) {
x = xpos;
y = ypos;
w = wt;
h = ht;
title = titleStr;
bkgrndColor = background;
txtColor = textColor;
}
void display(){
fill(bkgrndColor);
noStroke();
rect( x, y, w, h, 0);
fill(txtColor);
textSize(_btnTxtSize);
textAlign(CENTER);
text(title, x, y, w, h);
}
}

at::Tensor to UIImage

I have a PyTorch model and try run it on iOS. I have the next code:
at::Tensor tensor2 = torch::from_blob(imageBuffer2, {1, 1, 256, 256}, at::kFloat);
c10::InferenceMode guard;
auto output = _impl.forward({tensor1, tensor2});
torch::Tensor tensor_img = output.toTuple()->elements()[0].toTensor();
My question is "How I can convert tensor_img to UIImage?"
I found that functions in PyTorch documentation:
- (UIImage*)convertRGBBufferToUIImage:(unsigned char*)buffer
withWidth:(int)width
withHeight:(int)height {
char* rgba = (char*)malloc(width * height * 4);
for (int i = 0; i < width * height; ++i) {
rgba[4 * i] = buffer[3 * i];
rgba[4 * i + 1] = buffer[3 * i + 1];
rgba[4 * i + 2] = buffer[3 * i + 2];
rgba[4 * i + 3] = 255;
}
size_t bufferLength = width * height * 4;
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, rgba, bufferLength, NULL);
size_t bitsPerComponent = 8;
size_t bitsPerPixel = 32;
size_t bytesPerRow = 4 * width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
if (colorSpaceRef == NULL) {
NSLog(#"Error allocating color space");
CGDataProviderRelease(provider);
return nil;
}
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault | kCGImageAlphaPremultipliedLast;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
CGImageRef iref = CGImageCreate(width,
height,
bitsPerComponent,
bitsPerPixel,
bytesPerRow,
colorSpaceRef,
bitmapInfo,
provider,
NULL,
YES,
renderingIntent);
uint32_t* pixels = (uint32_t*)malloc(bufferLength);
if (pixels == NULL) {
NSLog(#"Error: Memory not allocated for bitmap");
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpaceRef);
CGImageRelease(iref);
return nil;
}
CGContextRef context = CGBitmapContextCreate(pixels,
width,
height,
bitsPerComponent,
bytesPerRow,
colorSpaceRef,
bitmapInfo);
if (context == NULL) {
NSLog(#"Error context not created");
free(pixels);
}
UIImage* image = nil;
if (context) {
CGContextDrawImage(context, CGRectMake(0.0f, 0.0f, width, height), iref);
CGImageRef imageRef = CGBitmapContextCreateImage(context);
if ([UIImage respondsToSelector:#selector(imageWithCGImage:scale:orientation:)]) {
float scale = [[UIScreen mainScreen] scale];
image = [UIImage imageWithCGImage:imageRef scale:scale orientation:UIImageOrientationUp];
} else {
image = [UIImage imageWithCGImage:imageRef];
}
CGImageRelease(imageRef);
CGContextRelease(context);
}
CGColorSpaceRelease(colorSpaceRef);
CGImageRelease(iref);
CGDataProviderRelease(provider);
if (pixels) {
free(pixels);
}
return image;
}
#end
If I correctly understand, that function can convert unsigned char * to UIImage. I think that I need convert my tensor_img to unsigned char*, but I don't understand how I can do it.
The 1st code its a torch bridge and 2nd code is UIImage helper which I run from Swift. Anyway, I resolve that issue, we can close it. Code example:
for (int i = 0; i < 3 * width * height; i++) {
[results addObject:#(floatBuffer[i])];
}
NSMutableData* data = [NSMutableData dataWithLength:sizeof(float) * 3 * width * height];
float* buffer = (float*)[data mutableBytes];
for (int j = 0; j < 3 * width * height; j++) {
buffer[j] = [results[j] floatValue];
}
return buffer;

PGraphics set different colormode not working

I have the following code:
import processing.video.*;
import oscP5.*;
import netP5.*;
//sending the data to wekinator
int numPixelsOrig, numPixels, boxWidth = 64, boxHeight = 48, numHoriz = 640/boxWidth, numVert = 480/boxHeight;
color[] downPix = new color[numHoriz * numVert];
PGraphics buffer;
Capture video;
OscP5 oscSend;
//recieving data from the wekinatorino
ArrayList<Blob> blobs = new ArrayList<Blob>();
int amt = 5;
int cons1 = 200, cons2 = 150;
float xspeed, yspeed, radius;
float p1, p2, p3, p4, p5, p6;
OscP5 oscRecieve;
NetAddress dest;
void setup() {
colorMode(RGB, 100);
size(600, 600, P2D);
buffer = createGraphics(600, 600, P3D);
buffer.beginDraw();
buffer.colorMode(HSB, 100);
buffer.endDraw();
String[] cameras = Capture.list();
if (cameras == null) {
video = new Capture(this, 640, 480);
}
if (cameras.length == 0) {
exit();
} else {
video = new Capture(this, 640, 480);
video.start();
numPixelsOrig = video.width * video.height;
}
oscSend = new OscP5(this, 9000);
oscRecieve = new OscP5(this, 12000);
dest = new NetAddress("127.0.0.1", 6448);
}
void draw() {
//println(blobs.size());
if (video.available() == true) {
video.read();
video.loadPixels();
int boxNum = 0;
int tot = boxWidth*boxHeight;
for (int x = 0; x < 640; x += boxWidth) {
for (int y = 0; y < 480; y += boxHeight) {
float red = 0, green = 0, blue = 0;
for (int i = 0; i < boxWidth; i++) {
for (int j = 0; j < boxHeight; j++) {
int index = (x + i) + (y + j) * 640;
red += red(video.pixels[index]);
green += green(video.pixels[index]);
blue += blue(video.pixels[index]);
}
}
downPix[boxNum] = color(red/tot, green/tot, blue/tot);
fill(downPix[boxNum]);
int index = x + 640*y;
red += red(video.pixels[index]);
green += green(video.pixels[index]);
blue += blue(video.pixels[index]);
noStroke();
rect(x, y, boxWidth, boxHeight);
boxNum++;
}
}
if (frameCount % 2 == 0)
sendOsc(downPix);
}
if (blobs.size() < amt) {
blobs.add(new Blob(new PVector(random(100 + (width - 200)), random(100 + (height- 200))), new PVector(-3, 3), random(100, 300)));
}
for (int i = blobs.size() - 1; i >= 0; i--) {
if (blobs.size() > amt) {
blobs.remove(i);
}
}
buffer.beginDraw();
buffer.loadPixels();
for (int x = 0; x < buffer.width; x++) {
for (int y = 0; y < buffer.height; y++) {
int index = x + y * buffer.width;
float sum = 0;
for (Blob b : blobs) {
float d = dist(x, y, b.pos.x, b.pos.y);
sum += 10 * b.r / d;
}
buffer.pixels[index] = color(sum, 255, 255); //constrain(sum, cons1, cons2)
}
}
buffer.updatePixels();
buffer.endDraw();
for (Blob b : blobs) {
b.update();
}
//if () {
//xspeed = map(p1, 0, 1, -5, 5);
//yspeed = map(p2, 0, 1, -5, 5);
//radius = map(p3, 0, 1, 100, 300);
//cons1 = int(map(p4, 0, 1, 0, 255));
//cons2 = int(map(p5, 0, 1, 0, 255));
//amt = int(map(p6, 0, 1, 1, 6));
//for (Blob b : blobs) {
// b.updateAlgorithm(xspeed, yspeed, radius);
//}
//}
image(buffer, 0, 0);
}
void sendOsc(int[] px) {
//println(px);
OscMessage msg = new OscMessage("/wek/inputs");
for (int i = 0; i < px.length; i++) {
msg.add(float(px[i]));
}
oscSend.send(msg, dest);
}
void oscEvent(OscMessage theOscMessage) {
if (theOscMessage.checkAddrPattern("/wek/outputs")==true) {
if (theOscMessage.checkTypetag("fff")) {
p1 = theOscMessage.get(0).floatValue();
p2 = theOscMessage.get(1).floatValue();
p3 = theOscMessage.get(2).floatValue();
p4 = theOscMessage.get(2).floatValue();
p5 = theOscMessage.get(2).floatValue();
p6 = theOscMessage.get(2).floatValue();
} else {
}
}
}
void mousePressed() {
xspeed = random(-5, 5);
yspeed = random(-5, 5);
radius = random(100, 300);
cons1 = int(random(255));
cons2 = int(random(255));
amt = int(random(6));
for (Blob b : blobs) {
b.updateAlgorithm(xspeed, yspeed, radius);
}
}
class Blob {
PVector pos;
PVector vel;
float r;
Blob(PVector pos, PVector vel, float r) {
this.pos = pos.copy();
this.vel = vel.copy();
this.r = r;
}
void update(){
pos.add(vel);
if (pos.x > width || pos.x < 0) {
vel.x *= -1;
}
if (pos.y > height || pos.y < 0) {
vel.y *= -1;
}
}
void updateAlgorithm(float vx, float vy, float nr){
vel.x = vx;
vel.y = vy;
r = nr;
}
}
Then I create some graphics in the buffer element. but the graphics aren't using my HSB color mode with the result i only see blue and white...
so how do i correct my code, or change the colorMode for a PGraphics element to HSB?
According to the PGraphics reference:
The beginDraw() and endDraw() methods (see above example) are
necessary to set up the buffer and to finalize it
therefore you should try this:
buffer = createGraphics(600, 600, P3D);
buffer.beginDraw();
buffer.colorMode(HSB, 100);
buffer.endDraw();
Here's a full test sketch to run and compare:
PGraphics buffer;
void setup(){
colorMode(RGB, 100);
size(600, 600, P2D);
//draw test gradient in RGB buffer
noStroke();
for(int i = 0 ; i < 10; i++){
fill(i * 10,100,100);
rect(0,i * 60,width,60);
}
buffer = createGraphics(600, 600, P3D);
buffer.beginDraw();
buffer.colorMode(HSB, 100);
buffer.endDraw();
//draw test gradient in HSB buffer
buffer.beginDraw();
buffer.noStroke();
for(int i = 0 ; i < 10; i++){
buffer.fill(i * 10,100,100);
buffer.rect(0,i * 60,width,60);
}
buffer.endDraw();
//finally render the buffer on screen, offset to the right for comparison
image(buffer,300,0);
}

Exporting video from processing 3

I am using processing-3.2.2 for audio visualization. I have the code to play the visualiser but I don't know how to save it as a video(.mp4) file.
Here's what I wrote:
import ddf.minim.*;
import ddf.minim.analysis.*;
Minim minim;
AudioPlayer player;
AudioMetaData meta;
BeatDetect beat;
int r = 200;
float rad = 70;
void setup()
{
size(500, 500);
//size(600, 400);
minim = new Minim(this);
player = minim.loadFile("son_final.mp3");
meta = player.getMetaData();
beat = new BeatDetect();
player.play();
//player.play();
background(-1);
noCursor();
}
void draw()
{
float t = map(mouseX, 0, width, 0, 1);
beat.detect(player.mix);
fill(#1A1F18,50);
noStroke();
rect(0, 0, width, height);
translate(width/2, height/2);
noFill();
fill(-2, 10);
if (beat.isOnset()) rad = rad*0.9;
else rad = 70;
ellipse(0, 0, 2*rad, 2*rad);
stroke(-1, 50);
int bsize = player.bufferSize();
for (int i = 0; i < bsize - 1; i+=5)
{
float x = (r)*cos(i*2*PI/bsize);
float y = (r)*sin(i*2*PI/bsize);
float x2 = (r + player.left.get(i)*100)*cos(i*2*PI/bsize);
float y2 = (r + player.left.get(i)*100)*sin(i*2*PI/bsize);
line(x, y, x2, y2);
}
beginShape();
noFill();
stroke(-1, 50);
for (int i = 0; i < bsize; i+=30)
{
float x2 = (r + player.left.get(i)*100)*cos(i*2*PI/bsize);
float y2 = (r + player.left.get(i)*100)*sin(i*2*PI/bsize);
vertex(x2, y2);
pushStyle();
stroke(-5);
strokeWeight(2);
point(x2, y2);
popStyle();
}
endShape();
if (flag) showMeta();
}
void showMeta() {
int time = meta.length();
textSize(50);
textAlign(CENTER);
text( (int)(time/1000-millis()/1000)/60 + ":"+ (time/1000-millis()/1000)%60, -7, 21);
}
boolean flag =false;
void mousePressed() {
if (dist(mouseX, mouseY, width/2, height/2)<150) flag =!flag;
}
void keyPressed() {
if(key=='e')exit();
}
I know I can import MovieMaker library but I don't know how to use it in this code. Please suggest some changes.
Thank you.
You can use the Video Export library for that. More info here: http://funprogramming.org/VideoExport-for-Processing/
Then all you'd do is create an instance of VideoExport and then call videoExport.saveFrame() at the end of your draw() function.
import com.hamoid.*;
VideoExport videoExport;
void setup() {
size(600, 600);
videoExport = new VideoExport(this, "basic.mp4");
}
void draw() {
background(#224488);
rect(frameCount * frameCount % width, 0, 40, height);
videoExport.saveFrame();
}
There are a ton of resources online. I recommend googling "Processing video export" for a ton of results. Then try something out and post an MCVE (not your entire project) showing exactly where you're stuck.

thumbnail, cut resize and upload to DB

I use this code to create thumbnails and then store the original and the thumbnail into a DB. It creates tn that are always of a fixed sized and if the original image is wither than it's higher it is cut and then resized to the fixed size.
The code is working however I would really appreciate some help modifying this code to do the following (I have tried it but didn't succeeded):
Make high-quality thumbnails
cut the height if the image is way taller
than it's width (If the width is
200px and height is 1000px what will
happen?)
Accept png and tiff.
This is the code so far:
public void imgUpload()
{
if (ImgUpload.PostedFile != null)
{
System.Drawing.Image image_file = System.Drawing.Image.FromStream(ImgUpload.PostedFile.InputStream);
string fileName = Server.HtmlEncode(ImgUpload.FileName);
string extension = System.IO.Path.GetExtension(fileName);
bool sizeError = false;
if(image_file.Width < 200)
sizeError = true;
if(image_file.Height < 250)
sizeError = true;
if ((extension.ToUpper() == ".JPG") && !sizeError)
{
//**** Resize image section ****
int image_height = image_file.Height;
int image_width = image_file.Width;
int original_width = image_width;
int original_height = image_height;
int max_height = 250;
int max_width = 200;
Rectangle rect;
if (image_width > image_height)
{
image_width = (image_width * max_height) / image_height;
image_height = max_height;
rect = new Rectangle(((image_width - max_width) / 2), 0, max_width, max_height);
}
else
{
image_height = (image_height * max_width) / image_width;
image_width = max_width;
rect = new Rectangle(0, ((image_height - max_height) / 2), max_width, max_height);
}
Bitmap bitmap_file = new Bitmap(image_file, image_width, image_height);
Bitmap new_bitmap_file = bitmap_file.Clone(rect, bitmap_file.PixelFormat);
System.IO.MemoryStream stream = new System.IO.MemoryStream();
new_bitmap_file.Save(stream, System.Drawing.Imaging.ImageFormat.Jpeg);
stream.Position = 0;
byte[] imageThumbnail = new byte[stream.Length + 1];
stream.Read(imageThumbnail, 0, imageThumbnail.Length);
Bitmap Original_bitmap_file = new Bitmap(image_file, original_width, original_height);
System.IO.MemoryStream Original_stream = new System.IO.MemoryStream();
Original_bitmap_file.Save(Original_stream, System.Drawing.Imaging.ImageFormat.Jpeg);
Original_stream.Position = 0;
byte[] imageOriginal = new byte[Original_stream.Length + 1];
Original_stream.Read(imageOriginal, 0, imageOriginal.Length);
//**** End resize image section ****
saveImage(imageThumbnail, imageOriginal, IDTextBox.Text);
lblOutput.Visible = false;
}
else
{
lblOutput.Text = "Please only upload .jpg files and make sure the size is minimum 200x250";
lblOutput.Visible = true;
}
}
else
{
lblOutput.Text = "No file selected";
lblOutput.Visible = true;
}
}
Here is an example of how to scale and crop an image:
Bitmap b = new Bitmap(200, 1000);
using (var g = Graphics.FromImage(b))
{
g.DrawLine(Pens.White, 0, 0, b.Width, b.Height);
}
b.Save("b.jpg", System.Drawing.Imaging.ImageFormat.Jpeg);
Bitmap thumb = new Bitmap(100, 100);
using (var g = Graphics.FromImage(thumb))
{
g.InterpolationMode = System.Drawing.Drawing2D.InterpolationMode.HighQualityBicubic;
g.DrawImage(b, new Rectangle(0,0,100,100), new Rectangle(0, 400, 200, 200), GraphicsUnit.Pixel);
}
thumb.Save("thumb.jpg", System.Drawing.Imaging.ImageFormat.Jpeg);
I think you can adapt the code to all what's and if's about when the ratio height/width is to high, etc.
I use the following code when creating thumbnails for http://www.inventerat.se and a few other sites and it works like a charm:
public static Bitmap WithinMaxSize(Image imgPhoto, int maxWidth, int maxHeight)
{
int sourceWidth = imgPhoto.Width;
int sourceHeight = imgPhoto.Height;
int destWidth;
int destHeight;
float sourceAspect = sourceWidth / (sourceHeight * 1.0F);
float maxAspect = maxWidth / (maxHeight * 1.0F);
if (sourceAspect > maxAspect)
{
// Width is limiting.
destWidth = maxWidth;
destHeight = (int)Math.Round(destWidth / sourceAspect);
}
else
{
// Height is limiting.
destHeight = maxHeight;
destWidth = (int)Math.Round(destHeight * sourceAspect);
}
Bitmap bmPhoto = new Bitmap(destWidth, destHeight);
Graphics grPhoto = Graphics.FromImage(bmPhoto);
grPhoto.Clear(Color.White);
grPhoto.CompositingMode = CompositingMode.SourceCopy;
grPhoto.CompositingQuality = CompositingQuality.HighQuality;
grPhoto.InterpolationMode = InterpolationMode.HighQualityBicubic;
grPhoto.DrawImage(imgPhoto, 0, 0, destWidth, destHeight);
grPhoto.Dispose();
return bmPhoto;
}
Just don't forget to dispose the Bitmap returned from this method.
It does not work very well if users are uploading very large images (e.g. > 5000 * 5000 pixels) though. If that is a requirement, I would recommend using ImageMagick or a similar imaging package.
To accept png and tiff, just adjust how You check the file extension so that You accept "png", "tif" and "tiff" as well. Support for tiff is a bit sketchy in .NET though since tiff itself is only a container for MANY different encodings.

Resources