/* Now allocate the buffer */
int dataBufferSize=(int)(optimalSize.height*optimalSize.width*(ImageFormat.getBitsPerPixel(parameters.getPreviewFormat())/8.0));
mBuffer= new byte[dataBufferSize];
/* The buffer where the current frame will be copied */
mFrame = new byte[dataBufferSize];
mCamera.addCallbackBuffer(mBuffer);
mCamera.setPreviewCallbackWithBuffer(new Camera.PreviewCallback()
{
private long timestamp=0;
public synchronized void onPreviewFrame(byte[] data, Camera camera)
{
System.arraycopy(data, 0, mFrame, 0, data.length);
Log.i("Completed copying date","Ready for processing");
try{
camera.addCallbackBuffer(mBuffer);
}catch (Exception e)
{
Log.e("Camera", "addCallbackBuffer error");
return;
}
return;
}
});
void EyeBlink::blink(IplImage* frame)
{
CvSeq* comp = 0;
CvRect window, eye;
int key, nc, found;
int text_delay, stage = STAGE_INIT;
int valueBlink =0;
int delay, i;
capture = cvCaptureFromCAM(0);
if (!capture)
//exit_nicely("Cannot initialize camera!");
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, FRAME_WIDTH);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, FRAME_HEIGHT);
frame = cvQueryFrame(capture);
if (!frame)
exit_nicely("cannot query frame!");
cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.4, 0.4, 0, 1, 8);
cvNamedWindow(wnd_name, 1);
/*for (delay = 20, i = 0; i < 6; i++, delay = 20)
while (delay)
{
frame = cvQueryFrame(capture);
if (!frame)
exit_nicely("cannot query frame!");
DRAW_TEXT(frame, msg[i], delay, 0);
cvShowImage(wnd_name, frame);
cvWaitKey(30);
}*/
storage = cvCreateMemStorage(0);
if (!storage)
exit_nicely("cannot allocate memory storage!");
kernel = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_CROSS, NULL);
gray = cvCreateImage(cvGetSize(frame), 8, 1);
prev = cvCreateImage(cvGetSize(frame), 8, 1);
diff = cvCreateImage(cvGetSize(frame), 8, 1);
tpl = cvCreateImage(cvSize(TPL_WIDTH, TPL_HEIGHT), 8, 1);
if (!kernel || !gray || !prev || !diff || !tpl)
exit_nicely("system error.");
gray->origin = frame->origin;
prev->origin = frame->origin;
diff->origin = frame->origin;
cvNamedWindow(wnd_debug, 1);
while (key != 'q')
{
int t=100;
frame = cvQueryFrame(capture);
if (!frame)
exit_nicely("cannot query frame!");
frame->origin = 0;
if (stage == STAGE_INIT)
window = cvRect(0, 0, frame->width, frame->height);
cvCvtColor(frame, gray, CV_BGR2GRAY);
nc = get_connected_components(gray, prev, window, &comp);
if (stage == STAGE_INIT && is_eye_pair(comp, nc, &eye))
{
int i;
for (i = 0; i < 5; i++)
{
frame = frame1[i];
if (!frame)
exit_nicely("cannot query frame");
cvShowImage(wnd_name, frame);
if (diff)
cvShowImage(wnd_debug, diff);
cvWaitKey(30);
}
cvSetImageROI(gray, eye);
cvCopy(gray, tpl, NULL);
cvResetImageROI(gray);
stage = STAGE_TRACKING;
text_delay = 10;
}
if (stage == STAGE_TRACKING)
{
found = locate_eye(gray, tpl, &window, &eye);
if (!found || key == 'r')
stage = STAGE_INIT;
if (is_blink(comp, nc, window, eye))
text_delay = 10;
DRAW_RECTS(frame, diff, window, eye);
DRAW_TEXT(frame, "blink!", text_delay, 1);
}
cvShowImage(wnd_name, frame);
cvShowImage(wnd_debug, diff);
prev = (IplImage*)cvClone(gray);
key = cvWaitKey(15);
t--;
}
exit_nicely(NULL);
}
I am working on eye blink detection but having some challenges. First I captured the frames and buffered them by using setPreviewCallbackwithBuffer method in android but my question is how can I make use of the frames which were stored in bytes. Which means eliminating the cvCameraFromCam and cvQueryFrame functions from the code above.
Related
The following code is working with API 21 and above. However, when I run it on below 21, no data shows up on TextureView.
My configuration for MediaCodec:
MediaFormat format = MediaFormat.createVideoFormat(MediaFormat.MIMETYPE_VIDEO_AVC, 1920, 1080);
format.setByteBuffer("csd-0", ByteBuffer.allocate(100));
format.setInteger(MediaFormat.KEY_MAX_INPUT_SIZE, 100000);
try {
m_codec = MediaCodec.createDecoderByType(MediaFormat.MIMETYPE_VIDEO_AVC);
m_codec.configure(format, new Surface(m_surface.getSurfaceTexture()), null, 0);
m_codec.start();
} catch (Exception e) {
e.printStackTrace();
}
The decoding part:
int inputIndex = m_codec.dequeueInputBuffer(-1);
if (inputIndex >= 0) {
ByteBuffer buffer;
MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
if (android.os.Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
buffer = m_codec.getInputBuffer(inputIndex);
buffer.clear();
}
else {
buffer=m_codec.getInputBuffers()[inputIndex];
buffer.put(videoBuffer,0,info.size);
}
if (buffer != null) {
buffer.put(videoBuffer, info.offset,videoBuffer.length);
m_codec.queueInputBuffer(inputIndex, 0, videoBuffer.length, 0, 0);
}
}
MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
int outputIndex = m_codec.dequeueOutputBuffer(info, 0);
if (outputIndex >= 0) {
m_codec.releaseOutputBuffer(outputIndex, true);
}
EDIT:
I got the solution,when i removed below line, now it's working
format.setByteBuffer("csd-0", ByteBuffer.allocate(100));
I need to find out if a native linux application window is maximized or minimized in a java program.
I tried using X11.XGetWindowProperty() and I get some results also, but I cant make out any useful information in it.
I am using JNA 4.2.1, Java 8 update72 on Ubuntu 14.04 LTS. Any pointers will be very helpful. Thanks in advance to all.
[Edit]
I have the code below which gives me the result I need. But the result is not same for every invocation. The atoms which are returned for the window vary within invocations. Is there any other reliable way to get the window state?
private static X11 x11 = X11.INSTANCE;
private static Display dispy = x11.XOpenDisplay(null);
public static void main(String[] args) {
try {
X11.Atom[] atoms = getAtomProperties(
bytesToInt(getProperty(X11.XA_ATOM, X11.INSTANCE.XInternAtom(dispy, "_NET_WM_STATE", false))));
boolean hidden = false;
boolean vmax = false;
boolean hmax = false;
for (int i = 0; i < atoms.length; i++) {
X11.Atom atom = atoms[i];
if (atom == null)
continue;
String atomName = X11.INSTANCE.XGetAtomName(dispy, atom);
if ("_NET_WM_STATE_HIDDEN".equals(atomName)) {
hidden = true;
} else if ("_NET_WM_STATE_MAXIMIZED_VERT".equals(atomName)) {
vmax = true;
} else if ("_NET_WM_STATE_MAXIMIZED_HORZ".equals(atomName)) {
hmax = true;
}
}
if (hidden)
System.out.println("Window minimized");
else if (vmax && hmax && !hidden)
System.out.println("Window maximized");
else
System.out.println("Window normal");
} catch (X11Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
private static X11.Atom[] getAtomProperties(int[] ids) {
X11.Atom[] atoms = new X11.Atom[ids.length];
for (int i = 0; i < ids.length; i++) {
if (ids[i] == 0)
continue;
atoms[i] = new X11.Atom(ids[i]);
}
return atoms;
}
private static int[] bytesToInt(byte[] prop) {
if (prop == null)
return null;
int[] res = new int[prop.length / 4];
for (int i = 0; i < res.length; i++) {
res[i] = ((prop[i * 4 + 3] & 0xff) << 24) | ((prop[i * 4 + 2] & 0xff) << 16) | ((prop[i * 4 + 1] & 0xff) << 8)
| ((prop[i * 4 + 0] & 0xff));
if (res[i] != 0)
continue;
}
return res;
}
private static byte[] getProperty(X11.Atom xa_prop_type, X11.Atom xa_prop_name) throws X11Exception {
X11.Window window = new X11.Window(73400355);
X11.AtomByReference xa_ret_type_ref = new X11.AtomByReference();
IntByReference ret_format_ref = new IntByReference();
NativeLongByReference ret_nitems_ref = new NativeLongByReference();
NativeLongByReference ret_bytes_after_ref = new NativeLongByReference();
PointerByReference ret_prop_ref = new PointerByReference();
NativeLong long_offset = new NativeLong(0);
NativeLong long_length = new NativeLong(4096 / 4);
/*
* MAX_PROPERTY_VALUE_LEN / 4 explanation (XGetWindowProperty manpage):
*
* long_length = Specifies the length in 32-bit multiples of the data to be retrieved.
*/
if (x11.XGetWindowProperty(dispy, window, xa_prop_name, long_offset, long_length, false, xa_prop_type, xa_ret_type_ref,
ret_format_ref, ret_nitems_ref, ret_bytes_after_ref, ret_prop_ref) != X11.Success) {
String prop_name = x11.XGetAtomName(dispy, xa_prop_name);
throw new X11Exception("Cannot get " + prop_name + " property.");
}
X11.Atom xa_ret_type = xa_ret_type_ref.getValue();
Pointer ret_prop = ret_prop_ref.getValue();
if (xa_ret_type == null) {
// the specified property does not exist for the specified window
return null;
}
if (xa_ret_type == null || xa_prop_type == null || !xa_ret_type.toNative().equals(xa_prop_type.toNative())) {
x11.XFree(ret_prop);
String prop_name = x11.XGetAtomName(dispy, xa_prop_name);
throw new X11Exception("Invalid type of " + prop_name + " property");
}
int ret_format = ret_format_ref.getValue();
long ret_nitems = ret_nitems_ref.getValue().longValue();
// null terminate the result to make string handling easier
int nbytes;
if (ret_format == 32)
nbytes = Native.LONG_SIZE;
else if (ret_format == 16)
nbytes = Native.LONG_SIZE / 2;
else if (ret_format == 8)
nbytes = 1;
else if (ret_format == 0)
nbytes = 0;
else
throw new X11Exception("Invalid return format");
int length = Math.min((int) ret_nitems * nbytes, 4096);
byte[] ret = ret_prop.getByteArray(0, length);
x11.XFree(ret_prop);
return ret;
}
I want to make this game in Processing.
When in 'Switch' those are displayed case0,1,2 in same time.
I don't know how to edit it.
and after case2(gameover), press key '1' to start again.
but I think it goes to case1 when gameover situation...
How can I edit it??
PImage work[] = new PImage[3];
float workSize[] = new float[3];
float workX[] = new float[3];
float workY[] = new float[3];
float workS[] = new float[3];
PImage handA, handB;
PFont font;
int level;
boolean gameover = false;
boolean selected[] = new boolean [3];
int salary = 0;
void setup(){
size(1000,800);
background(255);
imageMode(CENTER);
for (int i=0; i<3; i++) {
workX[i] = random(0, width);
workY[i] = random(0, height);
selected[i] = false;
workSize[i] = 120;
}
handA = loadImage("handA.png");
handB = loadImage("handB.png");
work[0] = loadImage("work0.png");
work[1] = loadImage("work1.png");
work[2] = loadImage("work2.png");
font = createFont("Gulim", 48);
textFont(font);
textAlign(CENTER, CENTER);
}
void draw(){
background(255);
if (mousePressed) {
cursor(handB, 0, 0);
} else {
cursor(handA, 0, 0);
}
switch (level) {
default: // press'1' to start game
fill(0);
text("1을 눌러 일 얻기", width/2, height/2);
if (key == '1') {
level = 1;
}
break;
case 1:
game();
if (gameover == true) {
level = 2;
}
break;
case 2: // press '1' to start again
fill(0);
text("퇴직금 : "+ salary + " + (비정규직으로 퇴직금 없음)", width/2, height/2-100);
text("일을 못해서 정리해고", width/2, height/2);
text("1을 눌러 다시 일 얻기", width/2, height/2+100);
if (key == '1') {
level = 1;
}
break;
}
}
void game() {
for (int i=0; i<3; i++) {
float clickedDist = dist(workX[i], workY[i], mouseX, mouseY);
if (clickedDist<workSize[i]/2 && mousePressed) {
workSize[i] = workSize[i] - 2;
} else {
workSize[i] = workSize[i] + 0.7;
}
if (workSize[i]<100) {
workSize[i] = 0;
}
if (workSize[i]>400) {
gameover = true;
}
if (workSize[i] == 0 && selected[i] == false) {
salary = salary + 50;
selected[i] = true;
workX[i] = random(0, width);
workY[i] = random(0, height);
selected[i] = false;
workSize[i] = 120;
}
if (salary > 150) {
workS[i] = workSize[i] + 0.5;
workSize[i] = workS[i];
}
if (abs(mouseX-workX[i]) < workSize[i]/2 && abs(mouseY-workY[i]) < workSize[i]/2) {
workX[i] += random(-5,5);
workY[i] += random(-5,5);
}
image(work[i], workX[i], workY[i], workSize[i], workSize[i]);
pushMatrix();
fill(0);
textSize(48);
text("봉급 : "+ salary, textWidth("salary"), (textAscent()+textDescent()/2));
popMatrix();
}
}
All you have to do is reset any variables that store the state of your game, such as your level variable. Something like this:
void keyPressed(){
if(gameover && key == '1'){
gameover = false;
level = 1;
}
}
Note: Giving the background of my previous question once again so as to find all the related stuff at one source.
I'm capturing an image from an android mobile device and it’s in JPEG format. The image is of 72X72DPI and 24 bit. When I try to convert this JPEG image to TIFF using LibTiff.Net and to set the tag Photometric Interpretation = 0 for MinIsWhite, the image turns negative (the white becomes black and black becomes white). The environment is Windows 8.1 64 bit, Visual Studio 2012. The tag must have value 0, where 0 = white is zero.
I absolutely must use Photometric.MINISWHITE in images so tried inverting image data before writing it to TIFF as per the below code. But then the compression changes to LZW instead of CCITT4,Photometric is changed to MINISBLACK from MINISWHITE, FIllorder tag is removed, PlanarConfig tag is removed, New tag Predictor is added with value 1 and the image turns negative again.
public partial class Form1 : Form
{
private const TiffTag TIFFTAG_ASCIITAG = (TiffTag)666;
private const TiffTag TIFFTAG_LONGTAG = (TiffTag)667;
private const TiffTag TIFFTAG_SHORTTAG = (TiffTag)668;
private const TiffTag TIFFTAG_RATIONALTAG = (TiffTag)669;
private const TiffTag TIFFTAG_FLOATTAG = (TiffTag)670;
private const TiffTag TIFFTAG_DOUBLETAG = (TiffTag)671;
private const TiffTag TIFFTAG_BYTETAG = (TiffTag)672;
public Form1()
{
InitializeComponent();
}
private void button1_Click(object sender, EventArgs e)
{
using (Bitmap bmp = new Bitmap(#"D:\Projects\ITests\images\IMG_2.jpg"))
{
// convert jpg image to tiff
byte[] tiffBytes = GetTiffImageBytes(bmp, false);
File.WriteAllBytes(#"D:\Projects\ITests\images\output.tif", tiffBytes);
//Invert the tiff image
Bitmap bmpTiff = new Bitmap(#"D:\Projects\ITests\images\output.tif");
Bitmap FBitmap = Transform(bmpTiff);
FBitmap.Save(#"D:\Projects\ITests\images\invOutput1.tif");
}
}
public static byte[] GetTiffImageBytes(Bitmap img, bool byScanlines)
{
try
{
byte[] raster = GetImageRasterBytes(img);
using (MemoryStream ms = new MemoryStream())
{
using (Tiff tif = Tiff.ClientOpen("InMemory", "w", ms, new TiffStream()))
{
if (tif == null)
return null;
tif.SetField(TiffTag.IMAGEWIDTH, img.Width);
tif.SetField(TiffTag.IMAGELENGTH, img.Height);
tif.SetField(TiffTag.COMPRESSION, Compression.CCITTFAX4);
tif.SetField(TiffTag.PHOTOMETRIC, Photometric.MINISWHITE);
tif.SetField(TiffTag.ROWSPERSTRIP, img.Height);
tif.SetField(TiffTag.XRESOLUTION, 200);
tif.SetField(TiffTag.YRESOLUTION, 200);
tif.SetField(TiffTag.SUBFILETYPE, 0);
tif.SetField(TiffTag.BITSPERSAMPLE, 1);
tif.SetField(TiffTag.FILLORDER, FillOrder.LSB2MSB);
tif.SetField(TiffTag.ORIENTATION, BitMiracle.LibTiff.Classic.Orientation.TOPLEFT);
tif.SetField(TiffTag.SAMPLESPERPIXEL, 1);
tif.SetField(TiffTag.RESOLUTIONUNIT, ResUnit.INCH);
tif.SetField(TiffTag.PLANARCONFIG, PlanarConfig.CONTIG);
int tiffStride = tif.ScanlineSize();
int stride = raster.Length / img.Height;
if (byScanlines)
{
// raster stride MAY be bigger than TIFF stride (due to padding in raster bits)
for (int i = 0, offset = 0; i < img.Height; i++)
{
bool res = tif.WriteScanline(raster, offset, i, 0);
if (!res)
return null;
offset += stride;
}
}
else
{
if (tiffStride < stride)
{
// raster stride is bigger than TIFF stride
// this is due to padding in raster bits
// we need to create correct TIFF strip and write it into TIFF
byte[] stripBits = new byte[tiffStride * img.Height];
for (int i = 0, rasterPos = 0, stripPos = 0; i < img.Height; i++)
{
System.Buffer.BlockCopy(raster, rasterPos, stripBits, stripPos, tiffStride);
rasterPos += stride;
stripPos += tiffStride;
}
// Write the information to the file
int n = tif.WriteEncodedStrip(0, stripBits, stripBits.Length);
if (n <= 0)
return null;
}
else
{
// Write the information to the file
int n = tif.WriteEncodedStrip(0, raster, raster.Length);
if (n <= 0)
return null;
}
}
}
return ms.GetBuffer();
}
}
catch (Exception)
{
return null;
}
}
public static byte[] GetImageRasterBytes(Bitmap img)
{
// Specify full image
Rectangle rect = new Rectangle(0, 0, img.Width, img.Height);
Bitmap bmp = img;
byte[] bits = null;
try
{
// Lock the managed memory
if (img.PixelFormat != PixelFormat.Format1bppIndexed)
bmp = convertToBitonal(img);
BitmapData bmpdata = bmp.LockBits(rect, ImageLockMode.ReadOnly, PixelFormat.Format1bppIndexed);
// Declare an array to hold the bytes of the bitmap.
bits = new byte[bmpdata.Stride * bmpdata.Height];
// Copy the sample values into the array.
Marshal.Copy(bmpdata.Scan0, bits, 0, bits.Length);
// Release managed memory
bmp.UnlockBits(bmpdata);
}
finally
{
if (bmp != img)
bmp.Dispose();
}
return bits;
}
private static Bitmap convertToBitonal(Bitmap original)
{
int sourceStride;
byte[] sourceBuffer = extractBytes(original, out sourceStride);
// Create destination bitmap
Bitmap destination = new Bitmap(original.Width, original.Height,
PixelFormat.Format1bppIndexed);
destination.SetResolution(original.HorizontalResolution, original.VerticalResolution);
// Lock destination bitmap in memory
BitmapData destinationData = destination.LockBits(
new Rectangle(0, 0, destination.Width, destination.Height),
ImageLockMode.WriteOnly, PixelFormat.Format1bppIndexed);
// Create buffer for destination bitmap bits
int imageSize = destinationData.Stride * destinationData.Height;
byte[] destinationBuffer = new byte[imageSize];
int sourceIndex = 0;
int destinationIndex = 0;
int pixelTotal = 0;
byte destinationValue = 0;
int pixelValue = 128;
int height = destination.Height;
int width = destination.Width;
int threshold = 500;
for (int y = 0; y < height; y++)
{
sourceIndex = y * sourceStride;
destinationIndex = y * destinationData.Stride;
destinationValue = 0;
pixelValue = 128;
for (int x = 0; x < width; x++)
{
// Compute pixel brightness (i.e. total of Red, Green, and Blue values)
pixelTotal = sourceBuffer[sourceIndex + 1] + sourceBuffer[sourceIndex + 2] +
sourceBuffer[sourceIndex + 3];
if (pixelTotal > threshold)
destinationValue += (byte)pixelValue;
if (pixelValue == 1)
{
destinationBuffer[destinationIndex] = destinationValue;
destinationIndex++;
destinationValue = 0;
pixelValue = 128;
}
else
{
pixelValue >>= 1;
}
sourceIndex += 4;
}
if (pixelValue != 128)
destinationBuffer[destinationIndex] = destinationValue;
}
Marshal.Copy(destinationBuffer, 0, destinationData.Scan0, imageSize);
destination.UnlockBits(destinationData);
return destination;
}
private static byte[] extractBytes(Bitmap original, out int stride)
{
Bitmap source = null;
try
{
// If original bitmap is not already in 32 BPP, ARGB format, then convert
if (original.PixelFormat != PixelFormat.Format32bppArgb)
{
source = new Bitmap(original.Width, original.Height, PixelFormat.Format32bppArgb);
source.SetResolution(original.HorizontalResolution, original.VerticalResolution);
using (Graphics g = Graphics.FromImage(source))
{
g.DrawImageUnscaled(original, 0, 0);
}
}
else
{
source = original;
}
// Lock source bitmap in memory
BitmapData sourceData = source.LockBits(
new Rectangle(0, 0, source.Width, source.Height),
ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
// Copy image data to binary array
int imageSize = sourceData.Stride * sourceData.Height;
byte[] sourceBuffer = new byte[imageSize];
Marshal.Copy(sourceData.Scan0, sourceBuffer, 0, imageSize);
// Unlock source bitmap
source.UnlockBits(sourceData);
stride = sourceData.Stride;
return sourceBuffer;
}
finally
{
if (source != original)
source.Dispose();
}
}
public Bitmap Transform(Bitmap bitmapImage)
{
var bitmapRead = bitmapImage.LockBits(new Rectangle(0, 0, bitmapImage.Width, bitmapImage.Height), ImageLockMode.ReadOnly, PixelFormat.Format32bppPArgb);
var bitmapLength = bitmapRead.Stride * bitmapRead.Height;
var bitmapBGRA = new byte[bitmapLength];
Marshal.Copy(bitmapRead.Scan0, bitmapBGRA, 0, bitmapLength);
bitmapImage.UnlockBits(bitmapRead);
for (int i = 0; i < bitmapLength; i += 4)
{
bitmapBGRA[i] = (byte)(255 - bitmapBGRA[i]);
bitmapBGRA[i + 1] = (byte)(255 - bitmapBGRA[i + 1]);
bitmapBGRA[i + 2] = (byte)(255 - bitmapBGRA[i + 2]);
// [i + 3] = ALPHA.
}
var bitmapWrite = bitmapImage.LockBits(new Rectangle(0, 0, bitmapImage.Width, bitmapImage.Height), ImageLockMode.WriteOnly, PixelFormat.Format32bppPArgb);
Marshal.Copy(bitmapBGRA, 0, bitmapWrite.Scan0, bitmapLength);
bitmapImage.UnlockBits(bitmapWrite);
return bitmapImage;
}
}
You should invert image bytes in GetTiffImageBytes method, before writing them to TIFF. Also, the Transform method converts bi-level image to 32bpp one and that is why you get LZW compressed image in the end.
So, add the following code
for (int k = 0; k < raster.Length; k++)
raster[k] = (byte)(~raster[k]);
after byte[] raster = GetImageRasterBytes(img); in GetTiffImageBytes method. This will invert image bytes. And don't use the following code
//Invert the tiff image
Bitmap bmpTiff = new Bitmap(#"D:\Projects\ITests\images\output.tif");
Bitmap FBitmap = Transform(bmpTiff);
FBitmap.Save(#"D:\Projects\ITests\images\invOutput1.tif");
I am trying to load image from a folder into my application, the code I am using is
FileConnection fc = null;
DataInputStream in = null;
DataOutputStream out = null;
try {
fc = (FileConnection)Connector.open("file:///e:/Images/Abc.jpg");
int length = (int)fc.fileSize();//possible loss of precision may throw error
byte[] data = null;
if (length != -1) {
data = new byte[length];
in = new DataInputStream(fc.openInputStream());
in.readFully(data);
}
else {
int chunkSize = 112;
int index = 0;
int readLength = 0;
in = new DataInputStream(fc.openInputStream());
data = new byte[chunkSize];
do {
if (data.length < index + chunkSize) {
byte[] newData = new byte[index + chunkSize];
System.arraycopy(data, 0, newData, 0, data.length);
data = newData;
}
readLength = in.read(data, index, chunkSize);
index += readLength;
} while (readLength == chunkSize);
length = index;
}
Image image = Image.createImage(data, 0, length);
image = createThumbnail(image);
ImageItem imageItem = new ImageItem(null, image, 0, null);
mForm.append(imageItem);
mForm.setTitle("Done.");
fc = (FileConnection)Connector.open("file:///e:/Images/Abc.jpg");
if(!fc.exists()){
try{
fc.create();
}catch(Exception ce){System.out.print("Create Error: " + ce);}
}
out = new DataOutputStream(fc.openOutputStream());
out.write(data);
}
catch (IOException ioe) {
StringItem stringItem = new StringItem(null, ioe.toString());
mForm.append(stringItem);
mForm.setTitle("Done.");
}
finally {
try {
if (in != null) in.close();
if (fc != null) fc.close();
}
catch (IOException ioe) {}
hope it can be the problem with image I tried to resize the image
private Image createThumbnail(Image image)
{
int sourceWidth = image.getWidth();
int sourceHeight = image.getHeight();
int thumbWidth = 18;
int thumbHeight = 23;
Image thumb = Image.createImage(thumbWidth, thumbHeight);
Graphics g = thumb.getGraphics();
for (int y = 0; y < thumbHeight; y++)
{
for (int x = 0; x < thumbWidth; x++)
{
g.setClip(x, y, 1, 1);
int dx = x * sourceWidth / thumbWidth;
int dy = y * sourceHeight / thumbHeight;
g.drawImage(image, x - dx, y - dy, Graphics.LEFT | Graphics.TOP);
}
}
Image immutableThumb = Image.createImage(thumb);
return immutableThumb;
}
Still it returns an exception out of memmory native image deocde error, Somebody pls help me to sort it out
This may occur becuase of two reason.
Image size: In some phones due to small heap size not device does not load image so try with smaller image size.
Second reason may be image is corrupt so try with another image.