How To Convert C# Code into Android (Motion Detection ) - feature-detection

public class MotionDetector1 : IMotionDetector
{
private IFilter grayscaleFilter = new GrayscaleBT709( );
private Difference differenceFilter = new Difference( );
private Threshold thresholdFilter = new Threshold( 15 );
private IFilter erosionFilter = new Erosion( );
private Merge mergeFilter = new Merge( );
private IFilter extrachChannel = new ExtractChannel( RGB.R );
private ReplaceChannel replaceChannel = new ReplaceChannel( RGB.R, null );
private Bitmap backgroundFrame;
private BitmapData bitmapData;
private bool calculateMotionLevel = false;
private int width; // image width
private int height; // image height
private int pixelsChanged;
// Motion level calculation - calculate or not motion level
public bool MotionLevelCalculation
{
get { return calculateMotionLevel; }
set { calculateMotionLevel = value; }
}
// Motion level - amount of changes in percents
public double MotionLevel
{
get { return (double) pixelsChanged / ( width * height ); }
}
// Constructor
public MotionDetector1( )
{
}
// Reset detector to initial state
public void Reset( )
{
if ( backgroundFrame != null )
{
backgroundFrame.Dispose( );
backgroundFrame = null;
}
}
// Process new frame
public void ProcessFrame( ref Bitmap image )
{
if ( backgroundFrame == null )
{
// create initial backgroung image
backgroundFrame = grayscaleFilter.Apply( image );
// get image dimension
width = image.Width;
height = image.Height;
// just return for the first time
return;
}
Bitmap tmpImage;
// apply the grayscale file
tmpImage = grayscaleFilter.Apply( image );
// set backgroud frame as an overlay for difference filter
differenceFilter.OverlayImage = backgroundFrame;
// apply difference filter
Bitmap tmpImage2 = differenceFilter.Apply( tmpImage );
// lock the temporary image and apply some filters on the locked data
bitmapData = tmpImage2.LockBits( new Rectangle( 0, 0, width, height ),
ImageLockMode.ReadWrite, PixelFormat.Format8bppIndexed );
// threshold filter
thresholdFilter.ApplyInPlace( bitmapData );
// erosion filter
Bitmap tmpImage3 = erosionFilter.Apply( bitmapData );
// unlock temporary image
tmpImage2.UnlockBits( bitmapData );
tmpImage2.Dispose( );
// calculate amount of changed pixels
pixelsChanged = ( calculateMotionLevel ) ?
CalculateWhitePixels( tmpImage3 ) : 0;
// dispose old background
backgroundFrame.Dispose( );
// set backgound to current
backgroundFrame = tmpImage;
// extract red channel from the original image
Bitmap redChannel = extrachChannel.Apply( image );
// merge red channel with moving object
mergeFilter.OverlayImage = tmpImage3;
Bitmap tmpImage4 = mergeFilter.Apply( redChannel );
redChannel.Dispose( );
tmpImage3.Dispose( );
// replace red channel in the original image
replaceChannel.ChannelImage = tmpImage4;
Bitmap tmpImage5 = replaceChannel.Apply( image );
tmpImage4.Dispose( );
image.Dispose( );
image = tmpImage5;
}
// Calculate white pixels
private int CalculateWhitePixels( Bitmap image )
{
int count = 0;
// lock difference image
BitmapData data = image.LockBits( new Rectangle( 0, 0, width, height ),
ImageLockMode.ReadOnly, PixelFormat.Format8bppIndexed );
int offset = data.Stride - width;
unsafe
{
byte * ptr = (byte *) data.Scan0.ToPointer( );
for ( int y = 0; y < height; y++ )
{
for ( int x = 0; x < width; x++, ptr++ )
{
count += ( (*ptr) >> 7 );
}
ptr += offset;
}
}
// unlock image
image.UnlockBits( data );
return count;
}
}
}

Related

Index out of range exception while reading pixel data

Related post: Stride of the BitmapData is different than the original
dimension.
I have taken the source code from here and modified it.
The code is generating a variety of exceptions in different occasions.
.
Error in BitmapLocker.cs
At the following line in Lock(),
// Copy data from IntegerPointer to _imageData
Marshal.Copy(IntegerPointer, _imageData, 0, _imageData.Length);
The following exception is being generated:
An unhandled exception of type 'System.AccessViolationException'
occurred in mscorlib.dll
Additional information: Attempted to read or write protected memory.
This is often an indication that other memory is corrupt.
For the following driver code,
double[,] mask = new double[,]
{
{ .11, .11, .11, },
{ .11, .11, .11, },
{ .11, .11, .11, },
};
Bitmap bitmap = ImageDataConverter.ToBitmap(mask);
BitmapLocker locker = new BitmapLocker(bitmap);
locker.Lock();
for (int i = 0; i < bitmap.Width; i++)
{
for (int j = 0; j < bitmap.Height; j++)
{
Color c = locker.GetPixel(i, j);
locker.SetPixel(i, j, c);
}
}
locker.Unlock();
At the following line in GetPixel(),
if (i > dataLength)
{
throw new IndexOutOfRangeException();
}
An unhandled exception of type 'System.IndexOutOfRangeException'
occurred in Simple.ImageProcessing.Framework.dll
Additional information: Index was outside the bounds of the array.
.
At the following line in SetPixel(),
if (ColorDepth == 8)
{
_imageData[i] = color.B;
}
An unhandled exception of type 'System.Exception' occurred in
Simple.ImageProcessing.Framework.dll
Additional information: (0, 0), 262144, Index was outside the bounds
of the array., i=262144
.
Error in Driver program
At the line,
Color c = bmp.GetPixel(i, j);
An unhandled exception of type 'System.InvalidOperationException'
occurred in System.Drawing.dll
Additional information: Bitmap region is already locked.
Source Code:
public class BitmapLocker : IDisposable
{
//private properties
Bitmap _bitmap = null;
bool _isLocked = false;
BitmapData _bitmapData = null;
private byte[] _imageData = null;
//public properties
public IntPtr IntegerPointer { get; private set; }
public int Width { get { return _bitmap.Width; } }
public int Height { get { return _bitmap.Height; } }
public int Stride { get { return _bitmapData.Stride; } }
public int ColorDepth { get { return Bitmap.GetPixelFormatSize(_bitmap.PixelFormat); } }
public int Channels { get { return ColorDepth / 8; } }
public int PaddingOffset { get { return _bitmapData.Stride - (_bitmap.Width * Channels); } }
public PixelFormat ImagePixelFormat { get { return _bitmap.PixelFormat; } }
public bool IsGrayscale { get { return Grayscale.IsGrayscale(_bitmap); } }
//Constructor
public BitmapLocker(Bitmap source)
{
IntegerPointer = IntPtr.Zero;
this._bitmap = source;
}
/// Lock bitmap
public void Lock()
{
if (_isLocked == false)
{
try
{
// Lock bitmap (so that no movement of data by .NET framework) and return bitmap data
_bitmapData = _bitmap.LockBits(
new Rectangle(0, 0, _bitmap.Width, _bitmap.Height),
ImageLockMode.ReadWrite,
_bitmap.PixelFormat);
// Create byte array to copy pixel values
int noOfBitsNeededForStorage = _bitmapData.Stride * _bitmapData.Height;
int noOfBytesNeededForStorage = noOfBitsNeededForStorage / 8;
_imageData = new byte[noOfBytesNeededForStorage * ColorDepth];//# of bytes needed for storage
IntegerPointer = _bitmapData.Scan0;
// Copy data from IntegerPointer to _imageData
Marshal.Copy(IntegerPointer, _imageData, 0, _imageData.Length);
_isLocked = true;
}
catch (Exception)
{
throw;
}
}
else
{
throw new Exception("Bitmap is already locked.");
}
}
/// Unlock bitmap
public void Unlock()
{
if (_isLocked == true)
{
try
{
// Copy data from _imageData to IntegerPointer
Marshal.Copy(_imageData, 0, IntegerPointer, _imageData.Length);
// Unlock bitmap data
_bitmap.UnlockBits(_bitmapData);
_isLocked = false;
}
catch (Exception)
{
throw;
}
}
else
{
throw new Exception("Bitmap is not locked.");
}
}
public Color GetPixel(int x, int y)
{
Color clr = Color.Empty;
// Get color components count
int channels = ColorDepth / 8;
// Get start index of the specified pixel
int i = (Height - y - 1) * Stride + x * channels;
int dataLength = _imageData.Length - channels;
if (i > dataLength)
{
throw new IndexOutOfRangeException();
}
if (ColorDepth == 32) // For 32 bpp get Red, Green, Blue and Alpha
{
byte b = _imageData[i];
byte g = _imageData[i + 1];
byte r = _imageData[i + 2];
byte a = _imageData[i + 3]; // a
clr = Color.FromArgb(a, r, g, b);
}
if (ColorDepth == 24) // For 24 bpp get Red, Green and Blue
{
byte b = _imageData[i];
byte g = _imageData[i + 1];
byte r = _imageData[i + 2];
clr = Color.FromArgb(r, g, b);
}
if (ColorDepth == 8)
// For 8 bpp get color value (Red, Green and Blue values are the same)
{
byte c = _imageData[i];
clr = Color.FromArgb(c, c, c);
}
return clr;
}
public void SetPixel(int x, int y, Color color)
{
// Get color components count
int cCount = ColorDepth / 8;
// Get start index of the specified pixel
int i = ((Height - y -1) * Stride + x * cCount);
try
{
if (ColorDepth == 32) // For 32 bpp set Red, Green, Blue and Alpha
{
_imageData[i] = color.B;
_imageData[i + 1] = color.G;
_imageData[i + 2] = color.R;
_imageData[i + 3] = color.A;
}
if (ColorDepth == 24) // For 24 bpp set Red, Green and Blue
{
_imageData[i] = color.B;
_imageData[i + 1] = color.G;
_imageData[i + 2] = color.R;
}
if (ColorDepth == 8)
// For 8 bpp set color value (Red, Green and Blue values are the same)
{
_imageData[i] = color.B;
}
}
catch(Exception ex)
{
throw new Exception("("+x+", "+y+"), "+_imageData.Length+", "+ ex.Message+", i=" + i);
}
}
public void Dispose()
{
Dispose(true);
GC.SuppressFinalize(this);
}
protected virtual void Dispose(bool disposing)
{
if (disposing)
{
// free managed resources
_bitmap = null;
_bitmapData = null;_imageData = null;IntegerPointer = IntPtr.Zero;
}
// free native resources if there are any.
//private properties
//public properties
}
}
.
ImageDataConverter.cs
public static Bitmap ToBitmap(double[,] input)
{
int width = input.GetLength(0);
int height = input.GetLength(1);
Bitmap output = Grayscale.CreateGrayscaleImage(width, height);
BitmapData data = output.LockBits(new Rectangle(0, 0, width, height),
ImageLockMode.WriteOnly,
output.PixelFormat);
int pixelSize = System.Drawing.Image.GetPixelFormatSize(PixelFormat.Format8bppIndexed) / 8;
int offset = data.Stride - width * pixelSize;
double Min = 0.0;
double Max = 255.0;
unsafe
{
byte* address = (byte*)data.Scan0.ToPointer();
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
double v = 255 * (input[x, y] - Min) / (Max - Min);
byte value = unchecked((byte)v);
for (int c = 0; c < pixelSize; c++, address++)
{
*address = value;
}
}
address += offset;
}
}
output.UnlockBits(data);
return output;
}
Here is the picture I used for the test,
int noOfBitsNeededForStorage = _bitmapData.Stride * _bitmapData.Height;
That is the most essential bug in the code. Stride * Height are the number of bytes needed for storage. So it doesn't make the _imageData array large enough and IndexOutOfRangeException is the expected outcome.
int pixelSize = System.Drawing.Image.GetPixelFormatSize(PixelFormat.Format8bppIndexed) / 8;
Lots of possible mishaps from this statement. It hard-codes the pixel format to 8bpp but that is not the actual pixel format that the LockBits() call used. Which was output.PixelFormat. Notably fatal on the sample image, although it is not clear how it is used in the code, 8bpp is a very awkward pixel format since it requires a palette. The PNG codec will create a 32bpp image in memory, even though the original file uses 8bpp. You must use output.PixelFormat here to get a match with the locked data and adjust the pixel writing code accordingly. Not clear why it is being used at all, the SetPixel() method provided by the library code should already be good enough.
int dataLength = _imageData.Length - channels;
Unclear what that statement tries to do, subtracting the number of channels is not a sensible operation. It will generate a spurious IndexOutOfRangeException. There is no obvious reason to help, the CLR already provides array index checking on the _imageData array. So just delete that code.
Additional information: Bitmap region is already locked.
Exception handling in the code is not confident, a possible reason for this exception. In general it must be noted that the underlying bitmap is completely inaccessible, other than through _imageData, after the Lock() method was called and Unlock() wasn't called yet. Best way to do this is with try/finally with the Unlock() call in the finally block so you can always be sure that the bitmap doesn't remain locked by accident.
byte c = _imageData[i];
This is not correct, except in the corner case of an 8bpp image that has a palette that was explicitly created to handle grayscale images. The default palette for an 8bpp image does not qualify that requirement nor is it something you can blindly rely on when loading images from a file. Indexed pixel formats where a dreadful hack that was necessary in the early 1990s because video adapters where not yet powerful enough. It no longer makes any sense at all today. Note that SetPixel() also doesn't handle a 16-bit pixel formats. And that the PNG codec will never create an 8bpp memory image and cannot encode an 8bpp file. Best advice is to eliminate 8bpp support completely to arrive at more reliable code.
In fact, the point of directly accessing pixel data is to make image manipulation fast. There is only one pixel format that consistently produces fast code, it is Format32bppArgb. The pixels can now be accessed with an int* instead of a byte*, moving pixels ~4 times faster. And no special tweaks are necessary to deal with stride or special-case the code for methods like SetPixel(). So pass that format into LockBits(), the codec will do the work necessary if the actual image format is not 32bpp as well.
I should note that Format32bppPArgb is the fast pixel format for displaying images on the screen since it is compatible with the pixel format used by all modern video adapters. But that isn't the point of this code and dealing with the pre-multiplied alpha is awkward.

Saving your canvas image

Im using clientcanvas to edit pictures in my app created with tabris. So far it's working quite well, but I got a problem to save the edited picture as a new image. Does anybody has any experience with that?
You need to convert the swt ImageData to an awt BufferedImage and save it. A util for this may look like this:
import java.awt.Color;
import java.awt.Graphics;
import java.awt.Graphics2D;
import java.awt.image.BufferedImage;
import java.awt.image.ColorModel;
import java.awt.image.IndexColorModel;
import java.awt.image.WritableRaster;
import org.eclipse.swt.graphics.ImageData;
import org.eclipse.swt.graphics.PaletteData;
import org.eclipse.swt.graphics.RGB;
public class ImageUtil {
public static BufferedImage convertToAWT( ImageData data ) {
BufferedImage imageToDraw = bgToAWT( data, data.width, data.height );
BufferedImage result = new BufferedImage( data.width, data.height, BufferedImage.TYPE_INT_RGB );
drawWhiteBackground( result );
Graphics graphics = result.getGraphics();
graphics.drawImage( imageToDraw, 0, 0, null );
graphics.dispose();
return result;
}
/*
* See
* http://m4tx.pl/en/2013/01/01/java-swt-to-awt-and-vice-versa-image-conversion
* -with-transparency-support/
*/
public static BufferedImage bgToAWT( ImageData data, int width, int height ) {
BufferedImage result = null;
ColorModel colorModel = null;
PaletteData palette = data.palette;
if( palette.isDirect ) {
BufferedImage bufferedImage = new BufferedImage( width, height, BufferedImage.TYPE_INT_ARGB );
for( int y = 0; y < data.height; y++ ) {
for( int x = 0; x < data.width; x++ ) {
int pixel = data.getPixel( x, y );
RGB rgb = palette.getRGB( pixel );
bufferedImage.setRGB( x, y, data.getAlpha( x, y ) << 24
| rgb.red << 16
| rgb.green << 8
| rgb.blue );
}
}
result = bufferedImage;
} else {
RGB[] rgbs = palette.getRGBs();
byte[] red = new byte[ rgbs.length ];
byte[] green = new byte[ rgbs.length ];
byte[] blue = new byte[ rgbs.length ];
for( int i = 0; i < rgbs.length; i++ ) {
RGB rgb = rgbs[ i ];
red[ i ] = ( byte )rgb.red;
green[ i ] = ( byte )rgb.green;
blue[ i ] = ( byte )rgb.blue;
}
if( data.transparentPixel != -1 ) {
colorModel = new IndexColorModel( data.depth,
rgbs.length,
red,
green,
blue,
data.transparentPixel );
} else {
colorModel = new IndexColorModel( data.depth, rgbs.length, red, green, blue );
}
BufferedImage bufferedImage = new BufferedImage( colorModel,
colorModel.createCompatibleWritableRaster( width,
height ),
false,
null );
WritableRaster raster = bufferedImage.getRaster();
int[] pixelArray = new int[ 1 ];
for( int y = 0; y < data.height; y++ ) {
for( int x = 0; x < data.width; x++ ) {
int pixel = data.getPixel( x, y );
pixelArray[ 0 ] = pixel;
raster.setPixel( x, y, pixelArray );
}
}
result = bufferedImage;
}
return result;
}
private static void drawWhiteBackground( BufferedImage bufferedImage ) {
Graphics2D graphics = bufferedImage.createGraphics();
graphics.setColor( new Color( 255, 255, 255 ) );
graphics.fillRect( 0, 0, bufferedImage.getWidth(), bufferedImage.getHeight() );
}
}

Issue with Photometric Interpretation tag even after inverting the image data

Note: Giving the background of my previous question once again so as to find all the related stuff at one source.
I'm capturing an image from an android mobile device and it’s in JPEG format. The image is of 72X72DPI and 24 bit. When I try to convert this JPEG image to TIFF using LibTiff.Net and to set the tag Photometric Interpretation = 0 for MinIsWhite, the image turns negative (the white becomes black and black becomes white). The environment is Windows 8.1 64 bit, Visual Studio 2012. The tag must have value 0, where 0 = white is zero.
I absolutely must use Photometric.MINISWHITE in images so tried inverting image data before writing it to TIFF as per the below code. But then the compression changes to LZW instead of CCITT4,Photometric is changed to MINISBLACK from MINISWHITE, FIllorder tag is removed, PlanarConfig tag is removed, New tag Predictor is added with value 1 and the image turns negative again.
public partial class Form1 : Form
{
private const TiffTag TIFFTAG_ASCIITAG = (TiffTag)666;
private const TiffTag TIFFTAG_LONGTAG = (TiffTag)667;
private const TiffTag TIFFTAG_SHORTTAG = (TiffTag)668;
private const TiffTag TIFFTAG_RATIONALTAG = (TiffTag)669;
private const TiffTag TIFFTAG_FLOATTAG = (TiffTag)670;
private const TiffTag TIFFTAG_DOUBLETAG = (TiffTag)671;
private const TiffTag TIFFTAG_BYTETAG = (TiffTag)672;
public Form1()
{
InitializeComponent();
}
private void button1_Click(object sender, EventArgs e)
{
using (Bitmap bmp = new Bitmap(#"D:\Projects\ITests\images\IMG_2.jpg"))
{
// convert jpg image to tiff
byte[] tiffBytes = GetTiffImageBytes(bmp, false);
File.WriteAllBytes(#"D:\Projects\ITests\images\output.tif", tiffBytes);
//Invert the tiff image
Bitmap bmpTiff = new Bitmap(#"D:\Projects\ITests\images\output.tif");
Bitmap FBitmap = Transform(bmpTiff);
FBitmap.Save(#"D:\Projects\ITests\images\invOutput1.tif");
}
}
public static byte[] GetTiffImageBytes(Bitmap img, bool byScanlines)
{
try
{
byte[] raster = GetImageRasterBytes(img);
using (MemoryStream ms = new MemoryStream())
{
using (Tiff tif = Tiff.ClientOpen("InMemory", "w", ms, new TiffStream()))
{
if (tif == null)
return null;
tif.SetField(TiffTag.IMAGEWIDTH, img.Width);
tif.SetField(TiffTag.IMAGELENGTH, img.Height);
tif.SetField(TiffTag.COMPRESSION, Compression.CCITTFAX4);
tif.SetField(TiffTag.PHOTOMETRIC, Photometric.MINISWHITE);
tif.SetField(TiffTag.ROWSPERSTRIP, img.Height);
tif.SetField(TiffTag.XRESOLUTION, 200);
tif.SetField(TiffTag.YRESOLUTION, 200);
tif.SetField(TiffTag.SUBFILETYPE, 0);
tif.SetField(TiffTag.BITSPERSAMPLE, 1);
tif.SetField(TiffTag.FILLORDER, FillOrder.LSB2MSB);
tif.SetField(TiffTag.ORIENTATION, BitMiracle.LibTiff.Classic.Orientation.TOPLEFT);
tif.SetField(TiffTag.SAMPLESPERPIXEL, 1);
tif.SetField(TiffTag.RESOLUTIONUNIT, ResUnit.INCH);
tif.SetField(TiffTag.PLANARCONFIG, PlanarConfig.CONTIG);
int tiffStride = tif.ScanlineSize();
int stride = raster.Length / img.Height;
if (byScanlines)
{
// raster stride MAY be bigger than TIFF stride (due to padding in raster bits)
for (int i = 0, offset = 0; i < img.Height; i++)
{
bool res = tif.WriteScanline(raster, offset, i, 0);
if (!res)
return null;
offset += stride;
}
}
else
{
if (tiffStride < stride)
{
// raster stride is bigger than TIFF stride
// this is due to padding in raster bits
// we need to create correct TIFF strip and write it into TIFF
byte[] stripBits = new byte[tiffStride * img.Height];
for (int i = 0, rasterPos = 0, stripPos = 0; i < img.Height; i++)
{
System.Buffer.BlockCopy(raster, rasterPos, stripBits, stripPos, tiffStride);
rasterPos += stride;
stripPos += tiffStride;
}
// Write the information to the file
int n = tif.WriteEncodedStrip(0, stripBits, stripBits.Length);
if (n <= 0)
return null;
}
else
{
// Write the information to the file
int n = tif.WriteEncodedStrip(0, raster, raster.Length);
if (n <= 0)
return null;
}
}
}
return ms.GetBuffer();
}
}
catch (Exception)
{
return null;
}
}
public static byte[] GetImageRasterBytes(Bitmap img)
{
// Specify full image
Rectangle rect = new Rectangle(0, 0, img.Width, img.Height);
Bitmap bmp = img;
byte[] bits = null;
try
{
// Lock the managed memory
if (img.PixelFormat != PixelFormat.Format1bppIndexed)
bmp = convertToBitonal(img);
BitmapData bmpdata = bmp.LockBits(rect, ImageLockMode.ReadOnly, PixelFormat.Format1bppIndexed);
// Declare an array to hold the bytes of the bitmap.
bits = new byte[bmpdata.Stride * bmpdata.Height];
// Copy the sample values into the array.
Marshal.Copy(bmpdata.Scan0, bits, 0, bits.Length);
// Release managed memory
bmp.UnlockBits(bmpdata);
}
finally
{
if (bmp != img)
bmp.Dispose();
}
return bits;
}
private static Bitmap convertToBitonal(Bitmap original)
{
int sourceStride;
byte[] sourceBuffer = extractBytes(original, out sourceStride);
// Create destination bitmap
Bitmap destination = new Bitmap(original.Width, original.Height,
PixelFormat.Format1bppIndexed);
destination.SetResolution(original.HorizontalResolution, original.VerticalResolution);
// Lock destination bitmap in memory
BitmapData destinationData = destination.LockBits(
new Rectangle(0, 0, destination.Width, destination.Height),
ImageLockMode.WriteOnly, PixelFormat.Format1bppIndexed);
// Create buffer for destination bitmap bits
int imageSize = destinationData.Stride * destinationData.Height;
byte[] destinationBuffer = new byte[imageSize];
int sourceIndex = 0;
int destinationIndex = 0;
int pixelTotal = 0;
byte destinationValue = 0;
int pixelValue = 128;
int height = destination.Height;
int width = destination.Width;
int threshold = 500;
for (int y = 0; y < height; y++)
{
sourceIndex = y * sourceStride;
destinationIndex = y * destinationData.Stride;
destinationValue = 0;
pixelValue = 128;
for (int x = 0; x < width; x++)
{
// Compute pixel brightness (i.e. total of Red, Green, and Blue values)
pixelTotal = sourceBuffer[sourceIndex + 1] + sourceBuffer[sourceIndex + 2] +
sourceBuffer[sourceIndex + 3];
if (pixelTotal > threshold)
destinationValue += (byte)pixelValue;
if (pixelValue == 1)
{
destinationBuffer[destinationIndex] = destinationValue;
destinationIndex++;
destinationValue = 0;
pixelValue = 128;
}
else
{
pixelValue >>= 1;
}
sourceIndex += 4;
}
if (pixelValue != 128)
destinationBuffer[destinationIndex] = destinationValue;
}
Marshal.Copy(destinationBuffer, 0, destinationData.Scan0, imageSize);
destination.UnlockBits(destinationData);
return destination;
}
private static byte[] extractBytes(Bitmap original, out int stride)
{
Bitmap source = null;
try
{
// If original bitmap is not already in 32 BPP, ARGB format, then convert
if (original.PixelFormat != PixelFormat.Format32bppArgb)
{
source = new Bitmap(original.Width, original.Height, PixelFormat.Format32bppArgb);
source.SetResolution(original.HorizontalResolution, original.VerticalResolution);
using (Graphics g = Graphics.FromImage(source))
{
g.DrawImageUnscaled(original, 0, 0);
}
}
else
{
source = original;
}
// Lock source bitmap in memory
BitmapData sourceData = source.LockBits(
new Rectangle(0, 0, source.Width, source.Height),
ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
// Copy image data to binary array
int imageSize = sourceData.Stride * sourceData.Height;
byte[] sourceBuffer = new byte[imageSize];
Marshal.Copy(sourceData.Scan0, sourceBuffer, 0, imageSize);
// Unlock source bitmap
source.UnlockBits(sourceData);
stride = sourceData.Stride;
return sourceBuffer;
}
finally
{
if (source != original)
source.Dispose();
}
}
public Bitmap Transform(Bitmap bitmapImage)
{
var bitmapRead = bitmapImage.LockBits(new Rectangle(0, 0, bitmapImage.Width, bitmapImage.Height), ImageLockMode.ReadOnly, PixelFormat.Format32bppPArgb);
var bitmapLength = bitmapRead.Stride * bitmapRead.Height;
var bitmapBGRA = new byte[bitmapLength];
Marshal.Copy(bitmapRead.Scan0, bitmapBGRA, 0, bitmapLength);
bitmapImage.UnlockBits(bitmapRead);
for (int i = 0; i < bitmapLength; i += 4)
{
bitmapBGRA[i] = (byte)(255 - bitmapBGRA[i]);
bitmapBGRA[i + 1] = (byte)(255 - bitmapBGRA[i + 1]);
bitmapBGRA[i + 2] = (byte)(255 - bitmapBGRA[i + 2]);
// [i + 3] = ALPHA.
}
var bitmapWrite = bitmapImage.LockBits(new Rectangle(0, 0, bitmapImage.Width, bitmapImage.Height), ImageLockMode.WriteOnly, PixelFormat.Format32bppPArgb);
Marshal.Copy(bitmapBGRA, 0, bitmapWrite.Scan0, bitmapLength);
bitmapImage.UnlockBits(bitmapWrite);
return bitmapImage;
}
}
You should invert image bytes in GetTiffImageBytes method, before writing them to TIFF. Also, the Transform method converts bi-level image to 32bpp one and that is why you get LZW compressed image in the end.
So, add the following code
for (int k = 0; k < raster.Length; k++)
raster[k] = (byte)(~raster[k]);
after byte[] raster = GetImageRasterBytes(img); in GetTiffImageBytes method. This will invert image bytes. And don't use the following code
//Invert the tiff image
Bitmap bmpTiff = new Bitmap(#"D:\Projects\ITests\images\output.tif");
Bitmap FBitmap = Transform(bmpTiff);
FBitmap.Save(#"D:\Projects\ITests\images\invOutput1.tif");

Is it possible to apply sound filters in Flash?

So basically i have a soundboard made in Flash CS5, is it possible to alternate the sound of the library's audio files with using Flash only? Like make the clips sound deeper or faster, thats the point. But if it's not pissbile
You cannot do it in AS2. But yes, you can change the pitch and playback speed of a sound in AS3. In fact you can do more than that (search for Audiotool) but this demonstrates how it is done:
http://plasticsturgeon.com/2012/05/changing-the-pitch-of-a-looped-sound-at-runtime-with-actionscript/
And in case my blog goes down or something, here is the relevant class that changes the pitch of a sound.:
package sound
{
import flash.events.Event;
import flash.events.SampleDataEvent;
import flash.media.Sound;
import flash.media.SoundChannel;
import flash.media.SoundTransform;
import flash.net.URLRequest;
import flash.utils.ByteArray;
/**
* #author Andre Michelle (andr...#gmail.com)
* Modified by Zach Foley aka The Plastic Sturgeon
*/
public class MP3Pitch
{
private const BLOCK_SIZE: int = 3072;
private var _mp3: Sound;
private var _sound: Sound;
private var _target: ByteArray;
private var _position: Number;
private var _rate: Number;
private var repeat:SoundChannel;
private var _volume:Number = 1;
private var byteArray:ByteArray;
// Pass in your looped Sound
public function MP3Pitch( pitchedSound: Sound)
{
_target = new ByteArray();
_mp3 = pitchedSound;
_position = 0.0;
_rate = 0.0;
_sound = new Sound();
_sound.addEventListener( SampleDataEvent.SAMPLE_DATA, sampleData );
repeat = _sound.play();
}
public function get rate(): Number
{
return _rate;
}
// Also added a handy volume setter
public function set volume( value: Number ): void
{
_volume = value;
repeat.soundTransform = new SoundTransform(_volume);
}
// use this to set the pitch of your sound
public function set rate( value: Number ): void
{
if( value < 0.0 )
value = 0;
_rate = value;
}
private function sampleData( event: SampleDataEvent ): void
{
//-- REUSE INSTEAD OF RECREATION
_target.position = 0;
//-- SHORTCUT
var data: ByteArray = event.data;
var scaledBlockSize: Number = BLOCK_SIZE * _rate;
var positionInt: int = _position;
var alpha: Number = _position - positionInt;
var positionTargetNum: Number = alpha;
var positionTargetInt: int = -1;
//-- COMPUTE NUMBER OF SAMPLES NEED TO PROCESS BLOCK (+2 FOR INTERPOLATION)
var need: int = Math.ceil( scaledBlockSize ) + 2;
//-- EXTRACT SAMPLES
var read: int = _mp3.extract( _target, need, positionInt );
var n: int = read == need ? BLOCK_SIZE : read / _rate;
var l0: Number;
var r0: Number;
var l1: Number;
var r1: Number;
for( var i: int = 0 ; i < n ; ++i )
{
//-- AVOID READING EQUAL SAMPLES, IF RATE < 1.0
if( int( positionTargetNum ) != positionTargetInt )
{
positionTargetInt = positionTargetNum;
//-- SET TARGET READ POSITION
_target.position = positionTargetInt << 3; //-- READ TWO STEREO SAMPLES FOR LINEAR INTERPOLATION l0 = _target.readFloat(); r0 = _target.readFloat(); l1 = _target.readFloat(); r1 = _target.readFloat(); } //-- WRITE INTERPOLATED AMPLITUDES INTO STREAM data.writeFloat( l0 + alpha * ( l1 - l0 ) ); data.writeFloat( r0 + alpha * ( r1 - r0 ) ); //-- INCREASE TARGET POSITION positionTargetNum += _rate; //-- INCREASE FRACTION AND CLAMP BETWEEN 0 AND 1 alpha += _rate; while( alpha >= 1.0 ) --alpha;
}
//-- FILL REST OF STREAM WITH ZEROs
if( i < BLOCK_SIZE )
{
while( i < BLOCK_SIZE ) { data.writeFloat( 0.0 ); data.writeFloat( 0.0 ); ++i; } } //-- INCREASE SOUND POSITION _position += scaledBlockSize; // My little addition here: if (_position > _mp3.length * 44.1) {
_position = 0;
_target.position = 0;
}
}
}
}

Array of Images

I'm working on a blackberry project and for that I need to create grid layout. I'm working on "Blackberry java sdk".
I'm using this code
public class GridScreen extends UiApplication {
// main method
public static void main(String[] args) {
GridScreen theApp = new GridScreen();
UiApplication.getUiApplication().pushScreen(new GFMScreen());
theApp.enterEventDispatcher();
}
}
// VFM
class GFMScreen extends MainScreen {
public GFMScreen() {
// this doesnt do anything for VCENTER!!
//super(Field.USE_ALL_HEIGHT);
// create a grid field manager, with 2 cols and 0 style param for super class
// style of Manager.FIELD_VCENTER | Field.USE_ALL_HEIGHT doesnt do a thing!
int columns = 2;
final GridFieldManager gfm = new GridFieldManager(columns, 0);
// add some items to the screen
int size = 6;
BitmapField[] fRay = new BitmapField[size];
for (int i = 0; i < size; i++) {
// create an bitmap field that's centered H + V (inside grid space)
fRay[i] = new BitmapField(loadBitmap("images/" + (i + 1) + ".png"),
Field.FIELD_HCENTER | Field.FIELD_VCENTER | Field.FOCUSABLE);
gfm.add(fRay[i]);
}
// set padding on top/bottom
{
// add gfm to screen - this does not center the gfm on the screen... is top aligned no matter what!
add(gfm);
int gfmHeight = 48 * (size / columns);
int borderHeight = (Display.getHeight() - gfmHeight) / 2;
gfm.setBorder(BorderFactory.createSimpleBorder(
new XYEdges(borderHeight, 0, borderHeight, 0),
Border.STYLE_TRANSPARENT));
System.out.println("border=" + borderHeight);
System.out.println("display=" + Display.getHeight());
System.out.println("gfm=" + gfmHeight);
}
}
/** #param res eg "images/icon.png" */
public static Bitmap loadBitmap(String res) {
EncodedImage img = EncodedImage.getEncodedImageResource(res);
return img.getBitmap();
}
}// end class
What is wrong in this code?
Is there any best approch to create grid layout in BlackBerry.
In above code error is "Display.getHeight() is not define".
Hope this code helps:
Bitmap[] images = new Bitmap[6];
for ((int i = 0; i < 6; i++) {
string filename = "images/" + String.valueOf(i + 1) + ".png";
images[i] = Bitmap.getBitmapResource(filename);
}
}

Resources