I have the following code:
import processing.video.*;
import oscP5.*;
import netP5.*;
//sending the data to wekinator
int numPixelsOrig, numPixels, boxWidth = 64, boxHeight = 48, numHoriz = 640/boxWidth, numVert = 480/boxHeight;
color[] downPix = new color[numHoriz * numVert];
PGraphics buffer;
Capture video;
OscP5 oscSend;
//recieving data from the wekinatorino
ArrayList<Blob> blobs = new ArrayList<Blob>();
int amt = 5;
int cons1 = 200, cons2 = 150;
float xspeed, yspeed, radius;
float p1, p2, p3, p4, p5, p6;
OscP5 oscRecieve;
NetAddress dest;
void setup() {
colorMode(RGB, 100);
size(600, 600, P2D);
buffer = createGraphics(600, 600, P3D);
buffer.beginDraw();
buffer.colorMode(HSB, 100);
buffer.endDraw();
String[] cameras = Capture.list();
if (cameras == null) {
video = new Capture(this, 640, 480);
}
if (cameras.length == 0) {
exit();
} else {
video = new Capture(this, 640, 480);
video.start();
numPixelsOrig = video.width * video.height;
}
oscSend = new OscP5(this, 9000);
oscRecieve = new OscP5(this, 12000);
dest = new NetAddress("127.0.0.1", 6448);
}
void draw() {
//println(blobs.size());
if (video.available() == true) {
video.read();
video.loadPixels();
int boxNum = 0;
int tot = boxWidth*boxHeight;
for (int x = 0; x < 640; x += boxWidth) {
for (int y = 0; y < 480; y += boxHeight) {
float red = 0, green = 0, blue = 0;
for (int i = 0; i < boxWidth; i++) {
for (int j = 0; j < boxHeight; j++) {
int index = (x + i) + (y + j) * 640;
red += red(video.pixels[index]);
green += green(video.pixels[index]);
blue += blue(video.pixels[index]);
}
}
downPix[boxNum] = color(red/tot, green/tot, blue/tot);
fill(downPix[boxNum]);
int index = x + 640*y;
red += red(video.pixels[index]);
green += green(video.pixels[index]);
blue += blue(video.pixels[index]);
noStroke();
rect(x, y, boxWidth, boxHeight);
boxNum++;
}
}
if (frameCount % 2 == 0)
sendOsc(downPix);
}
if (blobs.size() < amt) {
blobs.add(new Blob(new PVector(random(100 + (width - 200)), random(100 + (height- 200))), new PVector(-3, 3), random(100, 300)));
}
for (int i = blobs.size() - 1; i >= 0; i--) {
if (blobs.size() > amt) {
blobs.remove(i);
}
}
buffer.beginDraw();
buffer.loadPixels();
for (int x = 0; x < buffer.width; x++) {
for (int y = 0; y < buffer.height; y++) {
int index = x + y * buffer.width;
float sum = 0;
for (Blob b : blobs) {
float d = dist(x, y, b.pos.x, b.pos.y);
sum += 10 * b.r / d;
}
buffer.pixels[index] = color(sum, 255, 255); //constrain(sum, cons1, cons2)
}
}
buffer.updatePixels();
buffer.endDraw();
for (Blob b : blobs) {
b.update();
}
//if () {
//xspeed = map(p1, 0, 1, -5, 5);
//yspeed = map(p2, 0, 1, -5, 5);
//radius = map(p3, 0, 1, 100, 300);
//cons1 = int(map(p4, 0, 1, 0, 255));
//cons2 = int(map(p5, 0, 1, 0, 255));
//amt = int(map(p6, 0, 1, 1, 6));
//for (Blob b : blobs) {
// b.updateAlgorithm(xspeed, yspeed, radius);
//}
//}
image(buffer, 0, 0);
}
void sendOsc(int[] px) {
//println(px);
OscMessage msg = new OscMessage("/wek/inputs");
for (int i = 0; i < px.length; i++) {
msg.add(float(px[i]));
}
oscSend.send(msg, dest);
}
void oscEvent(OscMessage theOscMessage) {
if (theOscMessage.checkAddrPattern("/wek/outputs")==true) {
if (theOscMessage.checkTypetag("fff")) {
p1 = theOscMessage.get(0).floatValue();
p2 = theOscMessage.get(1).floatValue();
p3 = theOscMessage.get(2).floatValue();
p4 = theOscMessage.get(2).floatValue();
p5 = theOscMessage.get(2).floatValue();
p6 = theOscMessage.get(2).floatValue();
} else {
}
}
}
void mousePressed() {
xspeed = random(-5, 5);
yspeed = random(-5, 5);
radius = random(100, 300);
cons1 = int(random(255));
cons2 = int(random(255));
amt = int(random(6));
for (Blob b : blobs) {
b.updateAlgorithm(xspeed, yspeed, radius);
}
}
class Blob {
PVector pos;
PVector vel;
float r;
Blob(PVector pos, PVector vel, float r) {
this.pos = pos.copy();
this.vel = vel.copy();
this.r = r;
}
void update(){
pos.add(vel);
if (pos.x > width || pos.x < 0) {
vel.x *= -1;
}
if (pos.y > height || pos.y < 0) {
vel.y *= -1;
}
}
void updateAlgorithm(float vx, float vy, float nr){
vel.x = vx;
vel.y = vy;
r = nr;
}
}
Then I create some graphics in the buffer element. but the graphics aren't using my HSB color mode with the result i only see blue and white...
so how do i correct my code, or change the colorMode for a PGraphics element to HSB?
According to the PGraphics reference:
The beginDraw() and endDraw() methods (see above example) are
necessary to set up the buffer and to finalize it
therefore you should try this:
buffer = createGraphics(600, 600, P3D);
buffer.beginDraw();
buffer.colorMode(HSB, 100);
buffer.endDraw();
Here's a full test sketch to run and compare:
PGraphics buffer;
void setup(){
colorMode(RGB, 100);
size(600, 600, P2D);
//draw test gradient in RGB buffer
noStroke();
for(int i = 0 ; i < 10; i++){
fill(i * 10,100,100);
rect(0,i * 60,width,60);
}
buffer = createGraphics(600, 600, P3D);
buffer.beginDraw();
buffer.colorMode(HSB, 100);
buffer.endDraw();
//draw test gradient in HSB buffer
buffer.beginDraw();
buffer.noStroke();
for(int i = 0 ; i < 10; i++){
buffer.fill(i * 10,100,100);
buffer.rect(0,i * 60,width,60);
}
buffer.endDraw();
//finally render the buffer on screen, offset to the right for comparison
image(buffer,300,0);
}
I have an algorithm for Floodfilling a canvas. Im trying to incorporate this with fabricJS. So here is the dilemna.... I create a fabric.Canvas(). Which creates a wrapper canvas and also an upper-canvas canvas. I click on the canvas to apply my Floodfill(). This works fine and applies my color. But as soon as i go to drag my canvas objects around, or add additional objects to the canvas, the color disappears and looks like it resets of sort.
Any idea why this is?
This happen because fabricjs wipe out all canvas every frame and redraw from its internal data.
I made a JSfiddle that implements Flood Fill for Fabric JS. Check it here: https://jsfiddle.net/av01d/dfvp9j2u/
/*
* FloodFill for fabric.js
* #author Arjan Haverkamp (av01d)
* #date October 2018
*/
var FloodFill = {
// Compare subsection of array1's values to array2's values, with an optional tolerance
withinTolerance: function(array1, offset, array2, tolerance)
{
var length = array2.length,
start = offset + length;
tolerance = tolerance || 0;
// Iterate (in reverse) the items being compared in each array, checking their values are
// within tolerance of each other
while(start-- && length--) {
if(Math.abs(array1[start] - array2[length]) > tolerance) {
return false;
}
}
return true;
},
// The actual flood fill implementation
fill: function(imageData, getPointOffsetFn, point, color, target, tolerance, width, height)
{
var directions = [[1, 0], [0, 1], [0, -1], [-1, 0]],
coords = [],
points = [point],
seen = {},
key,
x,
y,
offset,
i,
x2,
y2,
minX = -1,
maxX = -1,
minY = -1,
maxY = -1;
// Keep going while we have points to walk
while (!!(point = points.pop())) {
x = point.x;
y = point.y;
offset = getPointOffsetFn(x, y);
// Move to next point if this pixel isn't within tolerance of the color being filled
if (!FloodFill.withinTolerance(imageData, offset, target, tolerance)) {
continue;
}
if (x > maxX) { maxX = x; }
if (y > maxY) { maxY = y; }
if (x < minX || minX == -1) { minX = x; }
if (y < minY || minY == -1) { minY = y; }
// Update the pixel to the fill color and add neighbours onto stack to traverse
// the fill area
i = directions.length;
while (i--) {
// Use the same loop for setting RGBA as for checking the neighbouring pixels
if (i < 4) {
imageData[offset + i] = color[i];
coords[offset+i] = color[i];
}
// Get the new coordinate by adjusting x and y based on current step
x2 = x + directions[i][0];
y2 = y + directions[i][1];
key = x2 + ',' + y2;
// If new coordinate is out of bounds, or we've already added it, then skip to
// trying the next neighbour without adding this one
if (x2 < 0 || y2 < 0 || x2 >= width || y2 >= height || seen[key]) {
continue;
}
// Push neighbour onto points array to be processed, and tag as seen
points.push({ x: x2, y: y2 });
seen[key] = true;
}
}
return {
x: minX,
y: minY,
width: maxX-minX,
height: maxY-minY,
coords: coords
}
}
}; // End FloodFill
var fcanvas; // Fabric Canvas
var fillColor = '#f00';
var fillTolerance = 2;
function hexToRgb(hex, opacity) {
opacity = Math.round(opacity * 255) || 255;
hex = hex.replace('#', '');
var rgb = [], re = new RegExp('(.{' + hex.length/3 + '})', 'g');
hex.match(re).map(function(l) {
rgb.push(parseInt(hex.length % 2 ? l+l : l, 16));
});
return rgb.concat(opacity);
}
function floodFill(enable) {
if (!enable) {
fcanvas.off('mouse:down');
fcanvas.selection = true;
fcanvas.forEachObject(function(object){
object.selectable = true;
});
return;
}
fcanvas.deactivateAll().renderAll(); // Hide object handles!
fcanvas.selection = false;
fcanvas.forEachObject(function(object){
object.selectable = false;
});
fcanvas.on({
'mouse:down': function(e) {
var mouse = fcanvas.getPointer(e.e),
mouseX = Math.round(mouse.x), mouseY = Math.round(mouse.y),
canvas = fcanvas.lowerCanvasEl,
context = canvas.getContext('2d'),
parsedColor = hexToRgb(fillColor),
imageData = context.getImageData(0, 0, canvas.width, canvas.height),
getPointOffset = function(x,y) {
return 4 * (y * imageData.width + x)
},
targetOffset = getPointOffset(mouseX, mouseY),
target = imageData.data.slice(targetOffset, targetOffset + 4);
if (FloodFill.withinTolerance(target, 0, parsedColor, fillTolerance)) {
// Trying to fill something which is (essentially) the fill color
console.log('Ignore... same color')
return;
}
// Perform flood fill
var data = FloodFill.fill(
imageData.data,
getPointOffset,
{ x: mouseX, y: mouseY },
parsedColor,
target,
fillTolerance,
imageData.width,
imageData.height
);
if (0 == data.width || 0 == data.height) {
return;
}
var tmpCanvas = document.createElement('canvas'), tmpCtx = tmpCanvas.getContext('2d');
tmpCanvas.width = canvas.width;
tmpCanvas.height = canvas.height;
var palette = tmpCtx.getImageData(0, 0, tmpCanvas.width, tmpCanvas.height); // x, y, w, h
palette.data.set(new Uint8ClampedArray(data.coords)); // Assuming values 0..255, RGBA
tmpCtx.putImageData(palette, 0, 0); // Repost the data.
var imgData = tmpCtx.getImageData(data.x, data.y, data.width, data.height); // Get cropped image
tmpCanvas.width = data.width;
tmpCanvas.height = data.height;
tmpCtx.putImageData(imgData,0,0);
fcanvas.add(new fabric.Image(tmpCanvas, {
left: data.x,
top: data.y,
selectable: false
}))
}
});
}
$(function() {
// Init Fabric Canvas:
fcanvas = new fabric.Canvas('c', {
backgroundColor:'#fff',
enableRetinaScaling: false
});
// Add some demo-shapes:
fcanvas.add(new fabric.Circle({
radius: 80,
fill: false,
left: 100,
top: 100,
stroke: '#000',
strokeWidth: 2
}));
fcanvas.add(new fabric.Triangle({
width: 120,
height: 160,
left: 50,
top: 50,
stroke: '#000',
fill: '#00f',
strokeWidth: 2
}));
fcanvas.add(new fabric.Rect({
width: 120,
height: 160,
left: 150,
top: 50,
fill: 'red',
stroke: '#000',
strokeWidth: 2
}));
fcanvas.add(new fabric.Rect({
width: 200,
height: 120,
left: 200,
top: 120,
fill: 'green',
stroke: '#000',
strokeWidth: 2
}));
/* Images work very well too. Make sure they're CORS
enabled though! */
var img = new Image();
img.crossOrigin = 'anonymous';
img.onload = function() {
fcanvas.add(new fabric.Image(img, {
left: 300,
top: 100,
angle: 30,
}));
}
img.src = 'http://misc.avoid.org/chip.png';
});
i am new in fabric js want to set the drag limit
i have also try with https://github.com/kangax/fabric.js/wiki/Working-with-events
not able to get the solution.
please check the attached image, object can move anyware but it should be display in red area only.i want this. help me...thanks in advance !!
While Orangepill's answer is correct, it produces a "stuttering" when your object hits the object bounds. If you have a rectangular bounding box (and not a complex bounding object) an alternative is to allow the object to be dragged along the bounds and "slide" along the bounding box. You do this by capping the coordinates values and letting the other dimension move as usual. An example snippet would look like so:
var canvas = new fabric.Canvas("bounded");
var boundingBox = new fabric.Rect({
fill: "none",
width: 600,
height: 400,
hasBorders: false,
hasControls: false,
lockMovementX: true,
lockMovementY: true,
evented: false,
stroke: "red"
});
var movingBox = new fabric.Rect({
width: 100,
height: 100,
hasBorders: false,
hasControls: false
});
canvas.on("object:moving", function() {
var top = movingBox.top;
var bottom = top + movingBox.height;
var left = movingBox.left;
var right = left + movingBox.width;
var topBound = boundingBox.top;
var bottomBound = topBound + boundingBox.height;
var leftBound = boundingBox.left;
var rightBound = leftBound + boundingBox.width;
// capping logic here
movingBox.setLeft(Math.min(Math.max(left, leftBound), rightBound - movingBox.width));
movingBox.setTop(Math.min(Math.max(top, topBound), bottomBound - movingBox.height));
});
canvas.add(boundingBox);
canvas.add(movingBox);
See this example in JSFiddle here
Felix Fung's answer was a starting point, but there are many things to consider. Here is a version that accounts for some of them.
It handles the canvas having a viewport transform (ie, zoomed/panned) and objects that are center-origined instead of left/top-origined. It also constrains objects wider/taller than the viewport to the top/left instead of the bottom/right.
canvas.on("object:moving", function(e) {
var obj = e.target;
var canvas = obj.canvas;
var top = obj.top;
var left = obj.left;
var zoom = canvas.getZoom();
var pan_x = canvas.viewportTransform[4];
var pan_y = canvas.viewportTransform[5];
// width & height we are constraining to must be calculated by applying the inverse of the current viewportTransform
var c_width = canvas.width / zoom;
var c_height = canvas.height / zoom;
var w = obj.width * obj.scaleX
var left_adjust, right_adjust
if(obj.originX == "center") {
left_adjust = right_adjust = w / 2;
} else {
left_adjust = 0;
right_adjust = w;
}
var h = obj.height * obj.scaleY;
var top_adjust, bottom_adjust;
if(obj.originY == "center") {
top_adjust = bottom_adjust = h / 2;
} else {
top_adjust = 0;
bottom_adjust = h;
}
// if you need margins set them here
var top_margin = 0;
var bottom_margin = 0;
var left_margin = 0;
var right_margin = 0;
var top_bound = top_margin + top_adjust - pan_y;
var bottom_bound = c_height - bottom_adjust - bottom_margin - pan_y;
var left_bound = left_margin + left_adjust - pan_x;
var right_bound = c_width - right_adjust - right_margin - pan_x;
if( w > c_width ) {
obj.setLeft(left_bound);
} else {
obj.setLeft(Math.min(Math.max(left, left_bound), right_bound));
}
if( h > c_height ) {
obj.setTop(top_bound);
} else {
obj.setTop(Math.min(Math.max(top, top_bound), bottom_bound));
}
});
What had worked for me is to create an event listener for the object:moving event. When the move is happening you update the goodtop and goodleft variables and once you are out of bounds to reposition the object to the last good points.
var goodtop, goodleft, boundingObject;
canvas.on("object:moving", function(){
var obj = this.relatedTarget;
var bounds = boundingObject;
obj.setCoords();
if(!obj.isContainedWithinObject(bounds)){
obj.setTop(goodtop);
obj.setLeft(goodleft);
canvas.refresh();
} else {
goodtop = obj.top;
goodleft = obj.left;
}
});
I used Michael Johnston's snipped as a starting point to add bounding control for rotated elements. This snipped only covers the cases when either (obj.centerX && obj.centerY == "center") || (obj.centerX && obj.centerY != "center")
canvasRef.current.on("object:moving", function (e) {
var obj = e.target;
var canvas = obj.canvas;
var zoom = canvas.getZoom();
var pan_x = canvas.viewportTransform[4];
var pan_y = canvas.viewportTransform[5];
// get left, top, width, and height of object
var left = obj.left;
var top = obj.top;
var width = obj.width * obj.scaleX;
var height = obj.height * obj.scaleY;
// width & height we are constraining to must be calculated by applying the inverse of the current viewportTransform
var c_width = canvas.width / zoom;
var c_height = canvas.height / zoom;
// calculate values that define the origin of the object, when it is centered in the center or not
var left_adjust, right_adjust;
if (obj.originX == "center") {
left_adjust = right_adjust = width / 2;
} else {
left_adjust = 0;
right_adjust = width;
}
var top_adjust, bottom_adjust;
if (obj.originY == "center") {
top_adjust = bottom_adjust = height / 2;
} else {
top_adjust = 0;
bottom_adjust = height;
}
// support for rotated objects
if (obj.angle) {
var angle = obj.angle;
if (angle > 270) {
angle -= 270;
} else if (angle > 180) {
angle -= 180;
} else if (angle > 90) {
angle -= 90;
}
const radians = angle * (Math.PI / 180);
const w_opposite = width * Math.sin(radians);
const w_adjacent = width * Math.cos(radians);
const h_opposite = height * Math.sin(radians);
const h_adjacent = height * Math.cos(radians);
if (obj.originX != "center" && obj.originY != "center") {
if (obj.angle <= 90) {
left_adjust = h_opposite;
top_adjust = 0;
right_adjust = w_adjacent;
bottom_adjust = h_adjacent + w_opposite;
} else if (obj.angle > 90 && obj.angle <= 180) {
left_adjust = h_adjacent + w_opposite;
top_adjust = h_opposite;
right_adjust = 0;
bottom_adjust = w_adjacent;
} else if (obj.angle > 180 && obj.angle <= 270) {
left_adjust = w_adjacent;
top_adjust = w_opposite + h_adjacent;
right_adjust = h_opposite;
bottom_adjust = 0;
} else {
left_adjust = 0;
top_adjust = w_adjacent;
right_adjust = w_opposite + h_adjacent;
bottom_adjust = h_opposite;
}
}
if (obj.originX == "center" && obj.originY == "center") {
if (obj.angle <= 90 || (obj.angle > 180 && obj.angle <= 270)) {
left_adjust = (w_adjacent + h_opposite) / 2;
right_adjust = (w_adjacent + h_opposite) / 2;
top_adjust = (h_adjacent + w_opposite) / 2;
bottom_adjust = (h_adjacent + w_opposite) / 2;
} else {
left_adjust = (h_adjacent + w_opposite) / 2;
right_adjust = (h_adjacent + w_opposite) / 2;
top_adjust = (w_adjacent + h_opposite) / 2;
bottom_adjust = (w_adjacent + h_opposite) / 2;
}
}
}
// if you need margins set them here
var top_margin = 0;
var bottom_margin = 0;
var left_margin = 0;
var right_margin = 0;
var top_bound = top_margin + top_adjust - pan_y;
var bottom_bound = c_height - bottom_adjust - bottom_margin - pan_y;
var left_bound = left_margin + left_adjust - pan_x;
var right_bound = c_width - right_adjust - right_margin - pan_x;
if (width > c_width) {
obj.set("left", left_bound);
} else {
obj.set("left", Math.min(Math.max(left, left_bound), right_bound));
}
if (height > c_height) {
obj.set("top", top_bound);
} else {
obj.set("top", Math.min(Math.max(top, top_bound), bottom_bound));
}
});
SEE THE WORKING EXAMPLE
It's as simple as a water try this
just use this js
<script type="text/javascript">
//use global variable for canvas object
var canvas;
var ctx;
function onLoad() {
//get fabric canvas with id mycanvas
canvas = new fabric.Canvas('mycanvas');
canvas.on("mouse : down",function{
//get canvas 2d context
ctx = canvas.getContext("2d");
ctx.beginPath();
ctx.rect(115,60,221,390);//specify bounded rectangle
ctx.closePath();
ctx.clip();
ctx.save();
});
//now restore the context on mouse up
canvas.on("mouse : up",function(){
ctx.restore();//restore the context
});
}
</script>
Hope this will help you.
Njoy coding :)
now u can have the same clipping region for every object supported by Fabric.js for transformation and moving.
try it :).
I am testing my iPhone application on an iOS 3.1.3 iPhone. I am selecting/capturing an image using a UIImagePickerController:
UIImagePickerController *imagePicker = [[UIImagePickerController alloc] init];
[imagePicker setSourceType:UIImagePickerControllerSourceTypeCamera];
[imagePicker setDelegate:self];
[self.navigationController presentModalViewController:imagePicker animated:YES];
[imagePicker release];
- (void)imagePickerController:(UIImagePickerController *)picker didFinishPickingMediaWithInfo:(NSDictionary *)info {
self.image = [info objectForKey:UIImagePickerControllerOriginalImage];
imageView.image = self.image;
[self.navigationController dismissModalViewControllerAnimated:YES];
submitButton.enabled = YES;
}
I then at some point send it to my web server using the ASI classes:
ASIFormDataRequest *request = [ASIFormDataRequest requestWithURL:[NSURL URLWithString:#"http://example.com/myscript.php"]];
[request setDelegate:self];
[request setStringEncoding:NSUTF8StringEncoding];
[request setShouldContinueWhenAppEntersBackground:YES];
//other post keys/values
[request setFile:UIImageJPEGRepresentation(self.image, 100.0f) withFileName:[NSString stringWithFormat:#"%d.jpg", [[NSDate date] timeIntervalSinceNow]] andContentType:#"image/jpg" forKey:#"imageFile"];
[request startAsynchronous];
the problem:
when i take a picture with the iphone while holding it landscape, the image gets uploaded to the server and it viewed like you would expect. when taking a picture holding the phone in portrait, the image is uploaded and viewed as it had been rotated 90 degrees.
my application is set to only work in portrait modes(upsidedown and regular).
How can i make the image always show the correct orientation after uploading?
the image appears to be correct as displayed in an UIImageView(directly after taking the picture), but viewing on the server says otherwise.
A UIImage has a property imageOrientation, which instructs the UIImageView and other UIImage consumers to rotate the raw image data. There's a good chance that this flag is being saved to the exif data in the uploaded jpeg image, but the program you use to view it is not honoring that flag.
To rotate the UIImage to display properly when uploaded, you can use a category like this:
UIImage+fixOrientation.h
#interface UIImage (fixOrientation)
- (UIImage *)fixOrientation;
#end
UIImage+fixOrientation.m
#implementation UIImage (fixOrientation)
- (UIImage *)fixOrientation {
// No-op if the orientation is already correct
if (self.imageOrientation == UIImageOrientationUp) return self;
// We need to calculate the proper transformation to make the image upright.
// We do it in 2 steps: Rotate if Left/Right/Down, and then flip if Mirrored.
CGAffineTransform transform = CGAffineTransformIdentity;
switch (self.imageOrientation) {
case UIImageOrientationDown:
case UIImageOrientationDownMirrored:
transform = CGAffineTransformTranslate(transform, self.size.width, self.size.height);
transform = CGAffineTransformRotate(transform, M_PI);
break;
case UIImageOrientationLeft:
case UIImageOrientationLeftMirrored:
transform = CGAffineTransformTranslate(transform, self.size.width, 0);
transform = CGAffineTransformRotate(transform, M_PI_2);
break;
case UIImageOrientationRight:
case UIImageOrientationRightMirrored:
transform = CGAffineTransformTranslate(transform, 0, self.size.height);
transform = CGAffineTransformRotate(transform, -M_PI_2);
break;
case UIImageOrientationUp:
case UIImageOrientationUpMirrored:
break;
}
switch (self.imageOrientation) {
case UIImageOrientationUpMirrored:
case UIImageOrientationDownMirrored:
transform = CGAffineTransformTranslate(transform, self.size.width, 0);
transform = CGAffineTransformScale(transform, -1, 1);
break;
case UIImageOrientationLeftMirrored:
case UIImageOrientationRightMirrored:
transform = CGAffineTransformTranslate(transform, self.size.height, 0);
transform = CGAffineTransformScale(transform, -1, 1);
break;
case UIImageOrientationUp:
case UIImageOrientationDown:
case UIImageOrientationLeft:
case UIImageOrientationRight:
break;
}
// Now we draw the underlying CGImage into a new context, applying the transform
// calculated above.
CGContextRef ctx = CGBitmapContextCreate(NULL, self.size.width, self.size.height,
CGImageGetBitsPerComponent(self.CGImage), 0,
CGImageGetColorSpace(self.CGImage),
CGImageGetBitmapInfo(self.CGImage));
CGContextConcatCTM(ctx, transform);
switch (self.imageOrientation) {
case UIImageOrientationLeft:
case UIImageOrientationLeftMirrored:
case UIImageOrientationRight:
case UIImageOrientationRightMirrored:
// Grr...
CGContextDrawImage(ctx, CGRectMake(0,0,self.size.height,self.size.width), self.CGImage);
break;
default:
CGContextDrawImage(ctx, CGRectMake(0,0,self.size.width,self.size.height), self.CGImage);
break;
}
// And now we just create a new UIImage from the drawing context
CGImageRef cgimg = CGBitmapContextCreateImage(ctx);
UIImage *img = [UIImage imageWithCGImage:cgimg];
CGContextRelease(ctx);
CGImageRelease(cgimg);
return img;
}
#end
I figured out a much simpler one:
- (UIImage *)normalizedImage {
if (self.imageOrientation == UIImageOrientationUp) return self;
UIGraphicsBeginImageContextWithOptions(self.size, NO, self.scale);
[self drawInRect:(CGRect){0, 0, self.size}];
UIImage *normalizedImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return normalizedImage;
}
BTW: #Anomie's code does not take scale into account, so will not work for 2x images.
Here is a Swift version of the answer by #an0:
func normalizedImage() -> UIImage {
if (self.imageOrientation == UIImageOrientation.Up) {
return self;
}
UIGraphicsBeginImageContextWithOptions(self.size, false, self.scale);
let rect = CGRect(x: 0, y: 0, width: self.size.width, height: self.size.height)
self.drawInRect(rect)
let normalizedImage : UIImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext();
return normalizedImage;
}
Also in a more general function:
func fixOrientation(img:UIImage) -> UIImage {
if (img.imageOrientation == UIImageOrientation.Up) {
return img;
}
UIGraphicsBeginImageContextWithOptions(img.size, false, img.scale);
let rect = CGRect(x: 0, y: 0, width: img.size.width, height: img.size.height)
img.drawInRect(rect)
let normalizedImage : UIImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext();
return normalizedImage;
}
Swift 3 version:
func fixOrientation(img: UIImage) -> UIImage {
if (img.imageOrientation == .up) {
return img
}
UIGraphicsBeginImageContextWithOptions(img.size, false, img.scale)
let rect = CGRect(x: 0, y: 0, width: img.size.width, height: img.size.height)
img.draw(in: rect)
let normalizedImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return normalizedImage
}
Solution for Swift 3.1 for orientation issue while capturing the image from Camera.
I have updated the solution given by jake and Metal Heart
UIImage extension
//MARK:- Image Orientation fix
extension UIImage {
func fixOrientation() -> UIImage {
// No-op if the orientation is already correct
if ( self.imageOrientation == UIImageOrientation.up ) {
return self;
}
// We need to calculate the proper transformation to make the image upright.
// We do it in 2 steps: Rotate if Left/Right/Down, and then flip if Mirrored.
var transform: CGAffineTransform = CGAffineTransform.identity
if ( self.imageOrientation == UIImageOrientation.down || self.imageOrientation == UIImageOrientation.downMirrored ) {
transform = transform.translatedBy(x: self.size.width, y: self.size.height)
transform = transform.rotated(by: CGFloat(Double.pi))
}
if ( self.imageOrientation == UIImageOrientation.left || self.imageOrientation == UIImageOrientation.leftMirrored ) {
transform = transform.translatedBy(x: self.size.width, y: 0)
transform = transform.rotated(by: CGFloat(Double.pi / 2.0))
}
if ( self.imageOrientation == UIImageOrientation.right || self.imageOrientation == UIImageOrientation.rightMirrored ) {
transform = transform.translatedBy(x: 0, y: self.size.height);
transform = transform.rotated(by: CGFloat(-Double.pi / 2.0));
}
if ( self.imageOrientation == UIImageOrientation.upMirrored || self.imageOrientation == UIImageOrientation.downMirrored ) {
transform = transform.translatedBy(x: self.size.width, y: 0)
transform = transform.scaledBy(x: -1, y: 1)
}
if ( self.imageOrientation == UIImageOrientation.leftMirrored || self.imageOrientation == UIImageOrientation.rightMirrored ) {
transform = transform.translatedBy(x: self.size.height, y: 0);
transform = transform.scaledBy(x: -1, y: 1);
}
// Now we draw the underlying CGImage into a new context, applying the transform
// calculated above.
let ctx: CGContext = CGContext(data: nil, width: Int(self.size.width), height: Int(self.size.height),
bitsPerComponent: self.cgImage!.bitsPerComponent, bytesPerRow: 0,
space: self.cgImage!.colorSpace!,
bitmapInfo: self.cgImage!.bitmapInfo.rawValue)!;
ctx.concatenate(transform)
if ( self.imageOrientation == UIImageOrientation.left ||
self.imageOrientation == UIImageOrientation.leftMirrored ||
self.imageOrientation == UIImageOrientation.right ||
self.imageOrientation == UIImageOrientation.rightMirrored ) {
ctx.draw(self.cgImage!, in: CGRect(x: 0,y: 0,width: self.size.height,height: self.size.width))
} else {
ctx.draw(self.cgImage!, in: CGRect(x: 0,y: 0,width: self.size.width,height: self.size.height))
}
// And now we just create a new UIImage from the drawing context and return it
return UIImage(cgImage: ctx.makeImage()!)
}
}
Swift 2.0
//MARK:- Image Orientation fix
extension UIImage {
func fixOrientation() -> UIImage {
// No-op if the orientation is already correct
if ( self.imageOrientation == UIImageOrientation.Up ) {
return self;
}
// We need to calculate the proper transformation to make the image upright.
// We do it in 2 steps: Rotate if Left/Right/Down, and then flip if Mirrored.
var transform: CGAffineTransform = CGAffineTransformIdentity
if ( self.imageOrientation == UIImageOrientation.Down || self.imageOrientation == UIImageOrientation.DownMirrored ) {
transform = CGAffineTransformTranslate(transform, self.size.width, self.size.height)
transform = CGAffineTransformRotate(transform, CGFloat(M_PI))
}
if ( self.imageOrientation == UIImageOrientation.Left || self.imageOrientation == UIImageOrientation.LeftMirrored ) {
transform = CGAffineTransformTranslate(transform, self.size.width, 0)
transform = CGAffineTransformRotate(transform, CGFloat(M_PI_2))
}
if ( self.imageOrientation == UIImageOrientation.Right || self.imageOrientation == UIImageOrientation.RightMirrored ) {
transform = CGAffineTransformTranslate(transform, 0, self.size.height);
transform = CGAffineTransformRotate(transform, CGFloat(-M_PI_2));
}
if ( self.imageOrientation == UIImageOrientation.UpMirrored || self.imageOrientation == UIImageOrientation.DownMirrored ) {
transform = CGAffineTransformTranslate(transform, self.size.width, 0)
transform = CGAffineTransformScale(transform, -1, 1)
}
if ( self.imageOrientation == UIImageOrientation.LeftMirrored || self.imageOrientation == UIImageOrientation.RightMirrored ) {
transform = CGAffineTransformTranslate(transform, self.size.height, 0);
transform = CGAffineTransformScale(transform, -1, 1);
}
// Now we draw the underlying CGImage into a new context, applying the transform
// calculated above.
let ctx: CGContextRef = CGBitmapContextCreate(nil, Int(self.size.width), Int(self.size.height),
CGImageGetBitsPerComponent(self.CGImage), 0,
CGImageGetColorSpace(self.CGImage),
CGImageGetBitmapInfo(self.CGImage).rawValue)!;
CGContextConcatCTM(ctx, transform)
if ( self.imageOrientation == UIImageOrientation.Left ||
self.imageOrientation == UIImageOrientation.LeftMirrored ||
self.imageOrientation == UIImageOrientation.Right ||
self.imageOrientation == UIImageOrientation.RightMirrored ) {
CGContextDrawImage(ctx, CGRectMake(0,0,self.size.height,self.size.width), self.CGImage)
} else {
CGContextDrawImage(ctx, CGRectMake(0,0,self.size.width,self.size.height), self.CGImage)
}
// And now we just create a new UIImage from the drawing context and return it
return UIImage(CGImage: CGBitmapContextCreateImage(ctx)!)
}
}
Use of this UIImage Extension in your code:
let fixOrientationImage=chosenImage.fixOrientation()
place this in your delegate methods of image picker like this
Swift 3.1
//MARK: Image Picker Delegates
func imagePickerController(
_ picker: UIImagePickerController,
didFinishPickingMediaWithInfo info: [String : Any]){
let chosenImage = info[UIImagePickerControllerOriginalImage] as! UIImage
profileImg.contentMode = .scaleAspectFill
let fixOrientationImage=chosenImage.fixOrientation()
profileImg.image = fixOrientationImage
dismiss(animated: true, completion: nil)
}
Swift 2.0
//MARK: Image Picker Delegates
func imagePickerController(
picker: UIImagePickerController,
didFinishPickingMediaWithInfo info: [String : AnyObject])
{
let chosenImage = info[UIImagePickerControllerOriginalImage] as! UIImage
profileImg.contentMode = .ScaleAspectFill
**//Fix the image orientation**
let fixOrientationImage=chosenImage.fixOrientation()
profileImg.image = fixOrientationImage
dismissViewControllerAnimated(true, completion: nil)
}
Swift 4.x/5.0 version of #an0 's solution:
extension UIImage {
func upOrientationImage() -> UIImage? {
switch imageOrientation {
case .up:
return self
default:
UIGraphicsBeginImageContextWithOptions(size, false, scale)
draw(in: CGRect(origin: .zero, size: size))
let result = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return result
}
}
}
in swift ;)
UPDATE SWIFT 3.0 :D
func sFunc_imageFixOrientation(img:UIImage) -> UIImage {
// No-op if the orientation is already correct
if (img.imageOrientation == UIImageOrientation.up) {
return img;
}
// We need to calculate the proper transformation to make the image upright.
// We do it in 2 steps: Rotate if Left/Right/Down, and then flip if Mirrored.
var transform:CGAffineTransform = CGAffineTransform.identity
if (img.imageOrientation == UIImageOrientation.down
|| img.imageOrientation == UIImageOrientation.downMirrored) {
transform = transform.translatedBy(x: img.size.width, y: img.size.height)
transform = transform.rotated(by: CGFloat(M_PI))
}
if (img.imageOrientation == UIImageOrientation.left
|| img.imageOrientation == UIImageOrientation.leftMirrored) {
transform = transform.translatedBy(x: img.size.width, y: 0)
transform = transform.rotated(by: CGFloat(M_PI_2))
}
if (img.imageOrientation == UIImageOrientation.right
|| img.imageOrientation == UIImageOrientation.rightMirrored) {
transform = transform.translatedBy(x: 0, y: img.size.height);
transform = transform.rotated(by: CGFloat(-M_PI_2));
}
if (img.imageOrientation == UIImageOrientation.upMirrored
|| img.imageOrientation == UIImageOrientation.downMirrored) {
transform = transform.translatedBy(x: img.size.width, y: 0)
transform = transform.scaledBy(x: -1, y: 1)
}
if (img.imageOrientation == UIImageOrientation.leftMirrored
|| img.imageOrientation == UIImageOrientation.rightMirrored) {
transform = transform.translatedBy(x: img.size.height, y: 0);
transform = transform.scaledBy(x: -1, y: 1);
}
// Now we draw the underlying CGImage into a new context, applying the transform
// calculated above.
let ctx:CGContext = CGContext(data: nil, width: Int(img.size.width), height: Int(img.size.height),
bitsPerComponent: img.cgImage!.bitsPerComponent, bytesPerRow: 0,
space: img.cgImage!.colorSpace!,
bitmapInfo: img.cgImage!.bitmapInfo.rawValue)!
ctx.concatenate(transform)
if (img.imageOrientation == UIImageOrientation.left
|| img.imageOrientation == UIImageOrientation.leftMirrored
|| img.imageOrientation == UIImageOrientation.right
|| img.imageOrientation == UIImageOrientation.rightMirrored
) {
ctx.draw(img.cgImage!, in: CGRect(x:0,y:0,width:img.size.height,height:img.size.width))
} else {
ctx.draw(img.cgImage!, in: CGRect(x:0,y:0,width:img.size.width,height:img.size.height))
}
// And now we just create a new UIImage from the drawing context
let cgimg:CGImage = ctx.makeImage()!
let imgEnd:UIImage = UIImage(cgImage: cgimg)
return imgEnd
}
I used this page when designing my app that takes pictures and I found that the following method will correct the orientation and use less memory and processor than previous answers:
CGImageRef cgRef = image.CGImage;
image = [[UIImage alloc] initWithCGImage:cgRef scale:1.0 orientation:UIImageOrientationUp];
This basically just rewraps the actual image data with a new orientation. I was using #an0's code but it makes a new image in memory which can be taxing on a 3264x2448 image that you might get from a camera.
If you enable editing, then the edited image (as opposed to the original) will be oriented as expected:
UIImagePickerController *imagePickerController = [[UIImagePickerController alloc] init];
imagePickerController.allowsEditing = YES;
// set delegate and present controller
- (void)imagePickerController:(UIImagePickerController *)picker didFinishPickingMediaWithInfo:(NSDictionary *)info {
UIImage *photo = [info valueForKey:UIImagePickerControllerEditedImage];
// do whatever
}
Enabling editing allows the user to resize and move the image before tapping "Use Photo"
I achieve this by writing below a few lines of code
extension UIImage {
public func correctlyOrientedImage() -> UIImage {
guard imageOrientation != .up else { return self }
UIGraphicsBeginImageContextWithOptions(size, false, scale)
draw(in: CGRect(origin: .zero, size: size))
let normalizedImage: UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return normalizedImage
}
}
This what I have found for fixing orientation issue
UIImage *initialImage = [info objectForKey:#"UIImagePickerControllerOriginalImage"];
NSData *data = UIImagePNGRepresentation(self.initialImage);
UIImage *tempImage = [UIImage imageWithData:data];
UIImage *fixedOrientationImage = [UIImage imageWithCGImage:tempImage.CGImage
scale:initialImage.scale
orientation:self.initialImage.imageOrientation];
initialImage = fixedOrientationImage;
EDIT:
UIImage *initialImage = [info objectForKey:#"UIImagePickerControllerOriginalImage"];
NSData *data = UIImagePNGRepresentation(self.initialImage);
initialImage = [UIImage imageWithCGImage:[UIImage imageWithData:data].CGImage
scale:initialImage.scale
orientation:self.initialImage.imageOrientation];
Here’s a solution that doesn’t change the colorspace of the original image. If you want to normalize the orientation of a grayscale image, you are out of luck with all solutions based on UIGraphicsBeginImageContextWithOptions because it creates a context in the RGB colorspace. Instead, you have to create a context with the same properties as the original image and draw:
extension UIImage {
static let rotatedOrentations: [UIImage.Orientation] = [.left, .leftMirrored, .right, .rightMirrored]
func normalizedImage() -> UIImage {
if imageOrientation == .up {
return self
}
let image = self.cgImage!
let swapOrientation = UIImage.rotatedOrentations.contains(imageOrientation)
let width = swapOrientation ? image.height : image.width
let height = swapOrientation ? image.width : image.height
let context = CGContext(data: nil, width: width, height: height, bitsPerComponent: image.bitsPerComponent, bytesPerRow: image.bytesPerRow, space: image.colorSpace!, bitmapInfo: image.bitmapInfo.rawValue)!
let flipVertical = CGAffineTransform(a: 1, b: 0, c: 0, d: -1, tx: 0, ty: CGFloat(height));
context.concatenate(flipVertical)
UIGraphicsPushContext(context)
self.draw(at: .zero)
UIGraphicsPopContext()
return UIImage(cgImage: context.makeImage()!)
}
}
Update for Swift 3.1 based on Sourabh Sharma's answer, with code clean up.
extension UIImage {
func fixedOrientation() -> UIImage {
if imageOrientation == .up { return self }
var transform:CGAffineTransform = .identity
switch imageOrientation {
case .down, .downMirrored:
transform = transform.translatedBy(x: size.width, y: size.height).rotated(by: .pi)
case .left, .leftMirrored:
transform = transform.translatedBy(x: size.width, y: 0).rotated(by: .pi/2)
case .right, .rightMirrored:
transform = transform.translatedBy(x: 0, y: size.height).rotated(by: -.pi/2)
default: break
}
switch imageOrientation {
case .upMirrored, .downMirrored:
transform = transform.translatedBy(x: size.width, y: 0).scaledBy(x: -1, y: 1)
case .leftMirrored, .rightMirrored:
transform = transform.translatedBy(x: size.height, y: 0).scaledBy(x: -1, y: 1)
default: break
}
let ctx = CGContext(data: nil, width: Int(size.width), height: Int(size.height),
bitsPerComponent: cgImage!.bitsPerComponent, bytesPerRow: 0,
space: cgImage!.colorSpace!, bitmapInfo: cgImage!.bitmapInfo.rawValue)!
ctx.concatenate(transform)
switch imageOrientation {
case .left, .leftMirrored, .right, .rightMirrored:
ctx.draw(cgImage!, in: CGRect(x: 0, y: 0, width: size.height,height: size.width))
default:
ctx.draw(cgImage!, in: CGRect(x: 0, y: 0, width: size.width,height: size.height))
}
return UIImage(cgImage: ctx.makeImage()!)
}
}
Picker delegate method example:
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any]) {
guard let originalImage = info[UIImagePickerControllerOriginalImage] as? UIImage else { return }
let fixedImage = originalImage.fixedOrientation()
// do your work
}
Here's UIImage extension for swift:
extension UIImage {
func fixOrientation() -> UIImage {
// No-op if the orientation is already correct
if ( self.imageOrientation == UIImageOrientation.Up ) {
return self;
}
// We need to calculate the proper transformation to make the image upright.
// We do it in 2 steps: Rotate if Left/Right/Down, and then flip if Mirrored.
var transform: CGAffineTransform = CGAffineTransformIdentity
if ( self.imageOrientation == UIImageOrientation.Down || self.imageOrientation == UIImageOrientation.DownMirrored ) {
transform = CGAffineTransformTranslate(transform, self.size.width, self.size.height)
transform = CGAffineTransformRotate(transform, CGFloat(M_PI))
}
if ( self.imageOrientation == UIImageOrientation.Left || self.imageOrientation == UIImageOrientation.LeftMirrored ) {
transform = CGAffineTransformTranslate(transform, self.size.width, 0)
transform = CGAffineTransformRotate(transform, CGFloat(M_PI_2))
}
if ( self.imageOrientation == UIImageOrientation.Right || self.imageOrientation == UIImageOrientation.RightMirrored ) {
transform = CGAffineTransformTranslate(transform, 0, self.size.height);
transform = CGAffineTransformRotate(transform, CGFloat(-M_PI_2));
}
if ( self.imageOrientation == UIImageOrientation.UpMirrored || self.imageOrientation == UIImageOrientation.DownMirrored ) {
transform = CGAffineTransformTranslate(transform, self.size.width, 0)
transform = CGAffineTransformScale(transform, -1, 1)
}
if ( self.imageOrientation == UIImageOrientation.LeftMirrored || self.imageOrientation == UIImageOrientation.RightMirrored ) {
transform = CGAffineTransformTranslate(transform, self.size.height, 0);
transform = CGAffineTransformScale(transform, -1, 1);
}
// Now we draw the underlying CGImage into a new context, applying the transform
// calculated above.
var ctx: CGContextRef = CGBitmapContextCreate(nil, Int(self.size.width), Int(self.size.height),
CGImageGetBitsPerComponent(self.CGImage), 0,
CGImageGetColorSpace(self.CGImage),
CGImageGetBitmapInfo(self.CGImage));
CGContextConcatCTM(ctx, transform)
if ( self.imageOrientation == UIImageOrientation.Left ||
self.imageOrientation == UIImageOrientation.LeftMirrored ||
self.imageOrientation == UIImageOrientation.Right ||
self.imageOrientation == UIImageOrientation.RightMirrored ) {
CGContextDrawImage(ctx, CGRectMake(0,0,self.size.height,self.size.width), self.CGImage)
} else {
CGContextDrawImage(ctx, CGRectMake(0,0,self.size.width,self.size.height), self.CGImage)
}
// And now we just create a new UIImage from the drawing context and return it
return UIImage(CGImage: CGBitmapContextCreateImage(ctx))!
}
}
Based on MetalHeart2003's earlier work..
Here is an UIImage extension in Swift 2 based on the accepted answer by #Anomie. It uses a clearer switch case. It also takes the optional value returned by CGBitmapContextCreateImage() into consideration.
extension UIImage {
func rotateImageByOrientation() -> UIImage {
// No-op if the orientation is already correct
guard self.imageOrientation != .Up else {
return self
}
// We need to calculate the proper transformation to make the image upright.
// We do it in 2 steps: Rotate if Left/Right/Down, and then flip if Mirrored.
var transform = CGAffineTransformIdentity;
switch (self.imageOrientation) {
case .Down, .DownMirrored:
transform = CGAffineTransformTranslate(transform, self.size.width, self.size.height)
transform = CGAffineTransformRotate(transform, CGFloat(M_PI))
case .Left, .LeftMirrored:
transform = CGAffineTransformTranslate(transform, self.size.width, 0)
transform = CGAffineTransformRotate(transform, CGFloat(M_PI_2))
case .Right, .RightMirrored:
transform = CGAffineTransformTranslate(transform, 0, self.size.height)
transform = CGAffineTransformRotate(transform, CGFloat(-M_PI_2))
default:
break
}
switch (self.imageOrientation) {
case .UpMirrored, .DownMirrored:
transform = CGAffineTransformTranslate(transform, self.size.width, 0)
transform = CGAffineTransformScale(transform, -1, 1)
case .LeftMirrored, .RightMirrored:
transform = CGAffineTransformTranslate(transform, self.size.height, 0)
transform = CGAffineTransformScale(transform, -1, 1)
default:
break
}
// Now we draw the underlying CGImage into a new context, applying the transform
// calculated above.
let ctx = CGBitmapContextCreate(nil, Int(self.size.width), Int(self.size.height),
CGImageGetBitsPerComponent(self.CGImage), 0,
CGImageGetColorSpace(self.CGImage),
CGImageGetBitmapInfo(self.CGImage).rawValue)
CGContextConcatCTM(ctx, transform)
switch (self.imageOrientation) {
case .Left, .LeftMirrored, .Right, .RightMirrored:
CGContextDrawImage(ctx, CGRectMake(0,0,self.size.height,self.size.width), self.CGImage)
default:
CGContextDrawImage(ctx, CGRectMake(0,0,self.size.width,self.size.height), self.CGImage)
}
// And now we just create a new UIImage from the drawing context
if let cgImage = CGBitmapContextCreateImage(ctx) {
return UIImage(CGImage: cgImage)
} else {
return self
}
}
}
I have experienced this issue with images taken from camera or saved in camera roll which are taken from camera. Images downloaded in photo library from safari browser does not rotate when uploaded.
I was able to solve this issue by making the image data as JPEG before uploading.
let image = info[UIImagePickerControllerOriginalImage] as! UIImage
let data = UIImageJPEGRepresentation(image, 1.0)
We can now use the data for uploading and the image will not get rotated after upload.
Hope this will work.
If I understand, what you want to do is disregard the orientation of the UIImage? If so then you could do this:-
//image is your original image
image = [UIImage imageWithCGImage:[image CGImage]
scale:[image scale]
orientation: UIImageOrientationUp];
or in Swift :-
image = UIImage(CGImage: image.CGImage!, scale: image.scale, orientation:.Up)
It solved my cropping issue.. Hope, this is what you're looking for..
Swift 3 version based on #jake1981 who've taken it from #MetalHeart2003
extension UIImage {
func fixOrientation() -> UIImage {
// No-op if the orientation is already correct
if ( self.imageOrientation == UIImageOrientation.up ) {
return self;
}
// We need to calculate the proper transformation to make the image upright.
// We do it in 2 steps: Rotate if Left/Right/Down, and then flip if Mirrored.
var transform: CGAffineTransform = CGAffineTransform.identity
if ( self.imageOrientation == UIImageOrientation.down || self.imageOrientation == UIImageOrientation.downMirrored ) {
transform = transform.translatedBy(x: self.size.width, y: self.size.height)
transform = transform.rotated(by: CGFloat(M_PI))
}
if ( self.imageOrientation == UIImageOrientation.left || self.imageOrientation == UIImageOrientation.leftMirrored ) {
transform = transform.translatedBy(x: self.size.width, y: 0)
transform = transform.rotated(by: CGFloat(M_PI_2))
}
if ( self.imageOrientation == UIImageOrientation.right || self.imageOrientation == UIImageOrientation.rightMirrored ) {
transform = transform.translatedBy(x: 0, y: self.size.height);
transform = transform.rotated(by: CGFloat(-M_PI_2));
}
if ( self.imageOrientation == UIImageOrientation.upMirrored || self.imageOrientation == UIImageOrientation.downMirrored ) {
transform = transform.translatedBy(x: self.size.width, y: 0)
transform = transform.scaledBy(x: -1, y: 1)
}
if ( self.imageOrientation == UIImageOrientation.leftMirrored || self.imageOrientation == UIImageOrientation.rightMirrored ) {
transform = transform.translatedBy(x: self.size.height, y: 0);
transform = transform.scaledBy(x: -1, y: 1);
}
// Now we draw the underlying CGImage into a new context, applying the transform
// calculated above.
let ctx: CGContext = CGContext(data: nil, width: Int(self.size.width), height: Int(self.size.height),
bitsPerComponent: self.cgImage!.bitsPerComponent, bytesPerRow: 0,
space: self.cgImage!.colorSpace!,
bitmapInfo: self.cgImage!.bitmapInfo.rawValue)!
ctx.concatenate(transform)
if ( self.imageOrientation == UIImageOrientation.left ||
self.imageOrientation == UIImageOrientation.leftMirrored ||
self.imageOrientation == UIImageOrientation.right ||
self.imageOrientation == UIImageOrientation.rightMirrored ) {
ctx.draw(self.cgImage!, in: CGRect(x: 0, y: 0, width: self.size.height, height: self.size.width))
} else {
ctx.draw(self.cgImage!, in: CGRect(x: 0, y: 0, width: self.size.width, height: self.size.height))
}
// And now we just create a new UIImage from the drawing context and return it
return UIImage(cgImage: ctx.makeImage()!)
}
}
#an0, thanks for the answer!
The only thing is autoreleasepool:
func fixOrientation(img: UIImage) -> UIImage? {
let result: UIImage?
if img.imageOrientation == .up {
result = img
} else {
result = autoreleasepool { () -> UIImage? in
UIGraphicsBeginImageContextWithOptions(img.size, false, img.scale)
let rect = CGRect(x: 0, y: 0, width: img.size.width, height: img.size.height)
img.draw(in: rect)
let normalizedImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return normalizedImage
}
}
return result
}
Here is the Swift-4.2 code for automatic fix your image orientation
Returns UIImage
func AutofixImageOrientation(_ image: UIImage)->UIImage {
UIGraphicsBeginImageContext(image.size)
image.draw(at: .zero)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage ?? image
}
I transposed this into Xamarin:
private static UIImage FixImageOrientation(UIImage image)
{
if (image.Orientation == UIImageOrientation.Up)
{
return image;
}
var transform = CGAffineTransform.MakeIdentity();
float pi = (float)Math.PI;
switch (image.Orientation)
{
case UIImageOrientation.Down:
case UIImageOrientation.DownMirrored:
transform = CGAffineTransform.Translate(transform, image.Size.Width, image.Size.Height);
transform = CGAffineTransform.Rotate(transform, pi);
break;
case UIImageOrientation.Left:
case UIImageOrientation.LeftMirrored:
transform = CGAffineTransform.Translate(transform, image.Size.Width, 0);
transform = CGAffineTransform.Rotate(transform, pi / 2);
break;
case UIImageOrientation.Right:
case UIImageOrientation.RightMirrored:
transform = CGAffineTransform.Translate(transform, 0, image.Size.Height);
transform = CGAffineTransform.Rotate(transform, -(pi / 2));
break;
}
switch (image.Orientation)
{
case UIImageOrientation.UpMirrored:
case UIImageOrientation.DownMirrored:
transform = CGAffineTransform.Translate(transform, image.Size.Width, 0);
transform = CGAffineTransform.Scale(transform, -1, 1);
break;
case UIImageOrientation.LeftMirrored:
case UIImageOrientation.RightMirrored:
transform = CGAffineTransform.Translate(transform, image.Size.Height, 0);
transform = CGAffineTransform.Scale(transform, -1, 1);
break;
}
var ctx = new CGBitmapContext(null, (nint)image.Size.Width, (nint)image.Size.Height, image.CGImage.BitsPerComponent,
image.CGImage.BytesPerRow, image.CGImage.ColorSpace, image.CGImage.BitmapInfo);
ctx.ConcatCTM(transform);
switch (image.Orientation)
{
case UIImageOrientation.Left:
case UIImageOrientation.LeftMirrored:
case UIImageOrientation.Right:
case UIImageOrientation.RightMirrored:
ctx.DrawImage(new CGRect(0, 0, image.Size.Height, image.Size.Width), image.CGImage);
break;
default:
ctx.DrawImage(new CGRect(0, 0, image.Size.Width, image.Size.Height), image.CGImage);
break;
}
var cgimg = ctx.ToImage();
var img = new UIImage(cgimg);
ctx.Dispose();
ctx = null;
cgimg.Dispose();
cgimg = null;
return img;
}