I am trying remove 3mm bleed size from pdf. by using below criteria
My source file is Source file
I am using below code to trim left and right
public void TrimLeftandRight(string sourceFilePath, string outputFilePath)
{
PdfReader pdfReader = new PdfReader(sourceFilePath);
float width = (float)GetPDFwidth(sourceFilePath);
float height = (float)GetPDFHeight(sourceFilePath);
float widthTo_Trim = iTextSharp.text.Utilities.MillimetersToPoints(3);
PdfRectangle rectrightside = new PdfRectangle(0, 0, width - widthTo_Trim, height);
PdfRectangle rectLeftside = new PdfRectangle(widthTo_Trim, 0, width, height);
// int[] pagealignment = new int[] { 8, 1, 2, 7, 6, 3, 4, 5 };
int[] pagealignment = new int[] { 6, 1, 2, 5, 4, 3 };
using (var output = new FileStream(outputFilePath, FileMode.CreateNew, FileAccess.Write))
{
// Create a new document
Document doc = new Document();
// Make a copy of the document
PdfSmartCopy smartCopy = new PdfSmartCopy(doc, output);
// Open the newly created document
doc.Open();
// Loop through all pages of the source document
for (int i = 1; i <= pdfReader.NumberOfPages; i++)
{
// Get a page
var page = pdfReader.GetPageN(i);
// Apply the rectangle filter we created
switch (i)
{
case 6:
page.Put(PdfName.CROPBOX, rectLeftside);
page.Put(PdfName.MEDIABOX, rectrightside);
break;
case 2:
page.Put(PdfName.MEDIABOX, rectrightside);
break;
case 4:
page.Put(PdfName.MEDIABOX, rectLeftside);
break;
case 1:
page.Put(PdfName.MEDIABOX, rectLeftside);
break;
case 5:
page.Put(PdfName.MEDIABOX, rectrightside);
// page.Put(PdfName.CROPBOX, rectLeftside);
break;
case 3:
page.Put(PdfName.CROPBOX, rectLeftside);
page.Put(PdfName.MEDIABOX, rectrightside);
break;
}
// Copy the content and insert into the new document
var copiedPage = smartCopy.GetImportedPage(pdfReader, i);
smartCopy.AddPage(copiedPage);
}
// Close the output document
smartCopy.Close();
doc.Close();
doc.Dispose();
}
}
the output of above code produces
Trimmed left and right file
and I used below code to merge trimmed files
public void CreategateFinalOutput(string inputfile)
{
double widthinpoints = iTextSharp.text.Utilities.MillimetersToPoints(897);
string onlyfilename = Path.GetFileName(inputfile);
// string originalfilename = Server.MapPath("~/Uploads/" + onlyfilename);
int Noofpagesinpdf = GetNoofpagesofpdf(inputfile);
// var a3doc = new Document(PageSize.A3.Rotate(), 0, 0, 0, 0);
double originalwidth = GetPDFwidth(inputfile);
float widthTo_Trim = iTextSharp.text.Utilities.MillimetersToPoints(3);
double width = (GetPDFwidth(inputfile) * 3);
width = widthinpoints;
double height = GetPDFHeight(inputfile);
var a3reader = new PdfReader(inputfile);
var a3doc = new Document(new Rectangle((float)width, (float)height));
var a3writer = PdfWriter.GetInstance(a3doc, new FileStream(Server.MapPath("~/RP/" + onlyfilename), FileMode.Create));
a3doc.Open();
var a3cb = a3writer.DirectContent;
PdfImportedPage page;
int totalPages = a3reader.NumberOfPages;
// int[] pagealignment = new int[] { 8, 1, 2, 7, 6, 3, 4, 5 };
int[] pagealignment = new int[] { 5, 6, 1, 2, 3, 4 };
int iteration = 1;
for (int i = 1; i <= totalPages; i++)
{
a3doc.NewPage();
var a3size = new Document(new Rectangle((float)width, (float)height));
//new code
int fistpage = 0;
int secpage = 0;
int thirdpage = 0;
switch (iteration)
{
case 1:
fistpage = 5;
secpage = 6;
thirdpage = 1;
break;
case 2:
fistpage = 2;
secpage = 3;
thirdpage = 4;
break;
}
double trimwidth = iTextSharp.text.Utilities.MillimetersToPoints(3);
page = a3writer.GetImportedPage(a3reader, fistpage);
double pagewidth = page.Width;
a3cb.AddTemplate(page, 0, 0);
i++;
page = a3writer.GetImportedPage(a3reader, secpage);
double pagewidtha = page.Width;
a3cb.AddTemplate(page, (float)(pagewidtha), 0);
i++;
page = a3writer.GetImportedPage(a3reader, thirdpage);
double pagewidthaThird = page.Width;
// a3cb.AddTemplate(page, (int)(a3size.Width / 2), 0); //commented
a3cb.AddTemplate(page, (float)(pagewidthaThird + pagewidth), 0);
iteration++;
a3doc.Close();
}
}
When i merged pdf by using above code the out put is not as per desire
Final output
Here we have removed borders of page 5 and 6 but when we merged there is border appearing .
You can see it downloading pdfs.. apologies for a such a big code. the help will be highly appreciated
I request to download pdfs and check pdfs for better views
Related
I'm having a problem trying to implement multiple models in Vulkan. I used the official Vulkan tutorial to set up most of the boilerplate code, then the rest is custom. At the moment, I am able to get one model to render perfectly, while the other identical model renders in about 60% of frames and doesn't render at all in others, as shown in the gif: Engine output
I've tried changing many things to fix it, but I can't pinpoint what is causing the issue. Any help would be appreciated wholly. I'm attaching the code snippets I think most relevant but I can include more if it helps solve the issue.
CreateCommandBuffers():
...
vkCmdBindVertexBuffers(commandBuffers[i], 0, 1, vertexBuffers, offsets);
vkCmdBindIndexBuffer(commandBuffers[i], indexBuffer, 0, VK_INDEX_TYPE_UINT32);
vkCmdBindDescriptorSets(commandBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 0, 1, &descriptorSets[i], 0, nullptr); //COMEBACKTO
vkCmdDrawIndexed(commandBuffers[i], models[0].getModelIndicesSize(), 1, 0, 0, 0);
vkCmdBindDescriptorSets(commandBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 0, 1, &descriptorSets[i + 1], 0, nullptr);
vkCmdDrawIndexed(commandBuffers[i], models[0].getModelIndicesSize(), 1, 0, 0, 0);
...
createDescriptorSets():
std::vector<VkDescriptorSetLayout> layouts(swapChainImages.size() * models.size(), descriptorSetLayout);
VkDescriptorSetAllocateInfo allocInfo{};
allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
allocInfo.descriptorPool = descriptorPool;
allocInfo.descriptorSetCount = static_cast<uint32_t>(swapChainImages.size() * models.size());
allocInfo.pSetLayouts = layouts.data();
descriptorSets.resize(swapChainImages.size() * models.size());
if (vkAllocateDescriptorSets(device, &allocInfo, descriptorSets.data()) != VK_SUCCESS) {
throw std::runtime_error("failed to allocate descriptor sets!");
}
for (int i = 0; i < swapChainImages.size(); i++) {
std::vector<VkWriteDescriptorSet> descWrites;
descWrites.resize(models.size() * 2);
for (int j = 0; j < models.size(); j++) {
VkDescriptorBufferInfo bufferInfo{};
bufferInfo.offset = 0;
bufferInfo.range = sizeof(UniformBufferObject);
//bufferInfo.buffer = uniformBuffers[i];
bufferInfo.buffer = uniformBuffers[j + (i * (models.size() - 1))];
VkDescriptorImageInfo imageInfo{};
imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
imageInfo.imageView = textureImageView; //set these up
imageInfo.sampler = textureSampler; //set these up
int tmp = j * 2;
int tmp1 = tmp + 1;
descWrites[tmp].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descWrites[tmp].dstSet = descriptorSets[j + (i * (models.size() - 1))];
descWrites[tmp].dstBinding = 0;
descWrites[tmp].dstArrayElement = 0;
descWrites[tmp].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descWrites[tmp].descriptorCount = 1;
descWrites[tmp].pBufferInfo = &bufferInfo;
descWrites[tmp1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descWrites[tmp1].dstSet = descriptorSets[j + (i * (models.size() - 1))];
descWrites[tmp1].dstBinding = 1;
descWrites[tmp1].dstArrayElement = 0;
descWrites[tmp1].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
descWrites[tmp1].descriptorCount = 1;
descWrites[tmp1].pImageInfo = &imageInfo;
}
vkUpdateDescriptorSets(device, static_cast<uint32_t>(descWrites.size()), descWrites.data(), 0, nullptr);
}
Note: Giving the background of my previous question once again so as to find all the related stuff at one source.
I'm capturing an image from an android mobile device and it’s in JPEG format. The image is of 72X72DPI and 24 bit. When I try to convert this JPEG image to TIFF using LibTiff.Net and to set the tag Photometric Interpretation = 0 for MinIsWhite, the image turns negative (the white becomes black and black becomes white). The environment is Windows 8.1 64 bit, Visual Studio 2012. The tag must have value 0, where 0 = white is zero.
I absolutely must use Photometric.MINISWHITE in images so tried inverting image data before writing it to TIFF as per the below code. But then the compression changes to LZW instead of CCITT4,Photometric is changed to MINISBLACK from MINISWHITE, FIllorder tag is removed, PlanarConfig tag is removed, New tag Predictor is added with value 1 and the image turns negative again.
public partial class Form1 : Form
{
private const TiffTag TIFFTAG_ASCIITAG = (TiffTag)666;
private const TiffTag TIFFTAG_LONGTAG = (TiffTag)667;
private const TiffTag TIFFTAG_SHORTTAG = (TiffTag)668;
private const TiffTag TIFFTAG_RATIONALTAG = (TiffTag)669;
private const TiffTag TIFFTAG_FLOATTAG = (TiffTag)670;
private const TiffTag TIFFTAG_DOUBLETAG = (TiffTag)671;
private const TiffTag TIFFTAG_BYTETAG = (TiffTag)672;
public Form1()
{
InitializeComponent();
}
private void button1_Click(object sender, EventArgs e)
{
using (Bitmap bmp = new Bitmap(#"D:\Projects\ITests\images\IMG_2.jpg"))
{
// convert jpg image to tiff
byte[] tiffBytes = GetTiffImageBytes(bmp, false);
File.WriteAllBytes(#"D:\Projects\ITests\images\output.tif", tiffBytes);
//Invert the tiff image
Bitmap bmpTiff = new Bitmap(#"D:\Projects\ITests\images\output.tif");
Bitmap FBitmap = Transform(bmpTiff);
FBitmap.Save(#"D:\Projects\ITests\images\invOutput1.tif");
}
}
public static byte[] GetTiffImageBytes(Bitmap img, bool byScanlines)
{
try
{
byte[] raster = GetImageRasterBytes(img);
using (MemoryStream ms = new MemoryStream())
{
using (Tiff tif = Tiff.ClientOpen("InMemory", "w", ms, new TiffStream()))
{
if (tif == null)
return null;
tif.SetField(TiffTag.IMAGEWIDTH, img.Width);
tif.SetField(TiffTag.IMAGELENGTH, img.Height);
tif.SetField(TiffTag.COMPRESSION, Compression.CCITTFAX4);
tif.SetField(TiffTag.PHOTOMETRIC, Photometric.MINISWHITE);
tif.SetField(TiffTag.ROWSPERSTRIP, img.Height);
tif.SetField(TiffTag.XRESOLUTION, 200);
tif.SetField(TiffTag.YRESOLUTION, 200);
tif.SetField(TiffTag.SUBFILETYPE, 0);
tif.SetField(TiffTag.BITSPERSAMPLE, 1);
tif.SetField(TiffTag.FILLORDER, FillOrder.LSB2MSB);
tif.SetField(TiffTag.ORIENTATION, BitMiracle.LibTiff.Classic.Orientation.TOPLEFT);
tif.SetField(TiffTag.SAMPLESPERPIXEL, 1);
tif.SetField(TiffTag.RESOLUTIONUNIT, ResUnit.INCH);
tif.SetField(TiffTag.PLANARCONFIG, PlanarConfig.CONTIG);
int tiffStride = tif.ScanlineSize();
int stride = raster.Length / img.Height;
if (byScanlines)
{
// raster stride MAY be bigger than TIFF stride (due to padding in raster bits)
for (int i = 0, offset = 0; i < img.Height; i++)
{
bool res = tif.WriteScanline(raster, offset, i, 0);
if (!res)
return null;
offset += stride;
}
}
else
{
if (tiffStride < stride)
{
// raster stride is bigger than TIFF stride
// this is due to padding in raster bits
// we need to create correct TIFF strip and write it into TIFF
byte[] stripBits = new byte[tiffStride * img.Height];
for (int i = 0, rasterPos = 0, stripPos = 0; i < img.Height; i++)
{
System.Buffer.BlockCopy(raster, rasterPos, stripBits, stripPos, tiffStride);
rasterPos += stride;
stripPos += tiffStride;
}
// Write the information to the file
int n = tif.WriteEncodedStrip(0, stripBits, stripBits.Length);
if (n <= 0)
return null;
}
else
{
// Write the information to the file
int n = tif.WriteEncodedStrip(0, raster, raster.Length);
if (n <= 0)
return null;
}
}
}
return ms.GetBuffer();
}
}
catch (Exception)
{
return null;
}
}
public static byte[] GetImageRasterBytes(Bitmap img)
{
// Specify full image
Rectangle rect = new Rectangle(0, 0, img.Width, img.Height);
Bitmap bmp = img;
byte[] bits = null;
try
{
// Lock the managed memory
if (img.PixelFormat != PixelFormat.Format1bppIndexed)
bmp = convertToBitonal(img);
BitmapData bmpdata = bmp.LockBits(rect, ImageLockMode.ReadOnly, PixelFormat.Format1bppIndexed);
// Declare an array to hold the bytes of the bitmap.
bits = new byte[bmpdata.Stride * bmpdata.Height];
// Copy the sample values into the array.
Marshal.Copy(bmpdata.Scan0, bits, 0, bits.Length);
// Release managed memory
bmp.UnlockBits(bmpdata);
}
finally
{
if (bmp != img)
bmp.Dispose();
}
return bits;
}
private static Bitmap convertToBitonal(Bitmap original)
{
int sourceStride;
byte[] sourceBuffer = extractBytes(original, out sourceStride);
// Create destination bitmap
Bitmap destination = new Bitmap(original.Width, original.Height,
PixelFormat.Format1bppIndexed);
destination.SetResolution(original.HorizontalResolution, original.VerticalResolution);
// Lock destination bitmap in memory
BitmapData destinationData = destination.LockBits(
new Rectangle(0, 0, destination.Width, destination.Height),
ImageLockMode.WriteOnly, PixelFormat.Format1bppIndexed);
// Create buffer for destination bitmap bits
int imageSize = destinationData.Stride * destinationData.Height;
byte[] destinationBuffer = new byte[imageSize];
int sourceIndex = 0;
int destinationIndex = 0;
int pixelTotal = 0;
byte destinationValue = 0;
int pixelValue = 128;
int height = destination.Height;
int width = destination.Width;
int threshold = 500;
for (int y = 0; y < height; y++)
{
sourceIndex = y * sourceStride;
destinationIndex = y * destinationData.Stride;
destinationValue = 0;
pixelValue = 128;
for (int x = 0; x < width; x++)
{
// Compute pixel brightness (i.e. total of Red, Green, and Blue values)
pixelTotal = sourceBuffer[sourceIndex + 1] + sourceBuffer[sourceIndex + 2] +
sourceBuffer[sourceIndex + 3];
if (pixelTotal > threshold)
destinationValue += (byte)pixelValue;
if (pixelValue == 1)
{
destinationBuffer[destinationIndex] = destinationValue;
destinationIndex++;
destinationValue = 0;
pixelValue = 128;
}
else
{
pixelValue >>= 1;
}
sourceIndex += 4;
}
if (pixelValue != 128)
destinationBuffer[destinationIndex] = destinationValue;
}
Marshal.Copy(destinationBuffer, 0, destinationData.Scan0, imageSize);
destination.UnlockBits(destinationData);
return destination;
}
private static byte[] extractBytes(Bitmap original, out int stride)
{
Bitmap source = null;
try
{
// If original bitmap is not already in 32 BPP, ARGB format, then convert
if (original.PixelFormat != PixelFormat.Format32bppArgb)
{
source = new Bitmap(original.Width, original.Height, PixelFormat.Format32bppArgb);
source.SetResolution(original.HorizontalResolution, original.VerticalResolution);
using (Graphics g = Graphics.FromImage(source))
{
g.DrawImageUnscaled(original, 0, 0);
}
}
else
{
source = original;
}
// Lock source bitmap in memory
BitmapData sourceData = source.LockBits(
new Rectangle(0, 0, source.Width, source.Height),
ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
// Copy image data to binary array
int imageSize = sourceData.Stride * sourceData.Height;
byte[] sourceBuffer = new byte[imageSize];
Marshal.Copy(sourceData.Scan0, sourceBuffer, 0, imageSize);
// Unlock source bitmap
source.UnlockBits(sourceData);
stride = sourceData.Stride;
return sourceBuffer;
}
finally
{
if (source != original)
source.Dispose();
}
}
public Bitmap Transform(Bitmap bitmapImage)
{
var bitmapRead = bitmapImage.LockBits(new Rectangle(0, 0, bitmapImage.Width, bitmapImage.Height), ImageLockMode.ReadOnly, PixelFormat.Format32bppPArgb);
var bitmapLength = bitmapRead.Stride * bitmapRead.Height;
var bitmapBGRA = new byte[bitmapLength];
Marshal.Copy(bitmapRead.Scan0, bitmapBGRA, 0, bitmapLength);
bitmapImage.UnlockBits(bitmapRead);
for (int i = 0; i < bitmapLength; i += 4)
{
bitmapBGRA[i] = (byte)(255 - bitmapBGRA[i]);
bitmapBGRA[i + 1] = (byte)(255 - bitmapBGRA[i + 1]);
bitmapBGRA[i + 2] = (byte)(255 - bitmapBGRA[i + 2]);
// [i + 3] = ALPHA.
}
var bitmapWrite = bitmapImage.LockBits(new Rectangle(0, 0, bitmapImage.Width, bitmapImage.Height), ImageLockMode.WriteOnly, PixelFormat.Format32bppPArgb);
Marshal.Copy(bitmapBGRA, 0, bitmapWrite.Scan0, bitmapLength);
bitmapImage.UnlockBits(bitmapWrite);
return bitmapImage;
}
}
You should invert image bytes in GetTiffImageBytes method, before writing them to TIFF. Also, the Transform method converts bi-level image to 32bpp one and that is why you get LZW compressed image in the end.
So, add the following code
for (int k = 0; k < raster.Length; k++)
raster[k] = (byte)(~raster[k]);
after byte[] raster = GetImageRasterBytes(img); in GetTiffImageBytes method. This will invert image bytes. And don't use the following code
//Invert the tiff image
Bitmap bmpTiff = new Bitmap(#"D:\Projects\ITests\images\output.tif");
Bitmap FBitmap = Transform(bmpTiff);
FBitmap.Save(#"D:\Projects\ITests\images\invOutput1.tif");
I have used the below mentioned snippet to show text with image. However I am unable to display image with it.
Is the path to image not accessible from code?
C1.Win.C1FlexGrid.C1FlexGrid gAuditL = new C1.Win.C1FlexGrid.C1FlexGrid();
.
.
.
gAuditL.DataSource = AuditLogVieweryDT;// this is datasource
for (int i = gAuditL.Row.Fixed; i < gAuditL.Rows.Count; i++)
//foreach row in grid
{
string severity = gAuditL[i, gAuditL.Cols["Severity"].Index].ToString();
if (severity == "Information")
{
this.gAuditL.SetCellImage(i, 0,Image.FromFile(#".\\Resources\information.bmp"));
this.gAuditL.SetData(i, 0, "Information");
}
if (severity == "Warning")
{
this.gAuditL.SetCellImage(i, 0, Image.FromFile(#".\\Resources\warning.bmp"));
this.gAuditL.SetData(i, 0, "Warning");
}
if (severity == "Critical")
{
this.gAuditL.SetCellImage(i, 0, Image.FromFile(#".\\Resources\critical.bmp"));
this.gAuditL.SetData(i, 0, "Critical");
}
if (severity == "Unspecified")
{
this.gAuditL.SetCellImage(i, 0, Image.FromFile(#".\\Resources\unspecified.bmp"));
this.gAuditL.SetData(i, 0, "Unspecified");
}
this.gAuditL.Styles.Normal.ImageAlign = C1.Win.C1FlexGrid.ImageAlignEnum.LeftCenter;
this.gAuditL.Styles.Normal.TextAlign = C1.Win.C1FlexGrid.TextAlignEnum.RightCenter;
}
Please refer this.(Answer posted by OP)
namespace SampleProject.Forms.Maintenance
{
public partial class SampleProject: Form
{
Image img1, img2, img3, img4;// declare member variable
//Load Event
private void AuditLogViewer_Load(object sender, EventArgs e)
{
object information = Resources.ResourceManager.GetObject("information"); //Return an object from the image chan1.png in the project
img1 = (Image)information;
object Warning = Resources.ResourceManager.GetObject("warning"); //Return an object from the image chan1.png in the project
img2 = (Image)Warning;
object critical = Resources.ResourceManager.GetObject("critical"); //Return an object from the image chan1.png in the project
img3 = (Image)critical;
object unspecified = Resources.ResourceManager.GetObject("unspecified"); //Return an object from the image chan1.png in the project
img4 = (Image)unspecified;
}
//Grid Click Event
private void grdAuditLogs_OwnerDrawCell(object sender, OwnerDrawCellEventArgs e)
{
if (e.Col == 2)
{
//let the grid paint the background and border for the cell
e.DrawCell(C1.Win.C1FlexGrid.DrawCellFlags.Background | C1.Win.C1FlexGrid.DrawCellFlags.Border);
//find text width
var width = (int)e.Graphics.MeasureString(e.Text, e.Style.Font).Width;
//x-coordinate for each image
var img1_x = e.Bounds.X + width + 10;
var img2_x = e.Bounds.X + width + 10;
var img3_x = e.Bounds.X + width + 10;
var img4_x = e.Bounds.X + width + 10;
//var img3_x = img2_x + img2.Width + 5;
//location for each image
var img1_loc = new Point(img1_x, e.Bounds.Y + img1.Height - 18);
var img2_loc = new Point(img2_x, e.Bounds.Y + img2.Height - 18);
var img3_loc = new Point(img3_x, e.Bounds.Y + img3.Height - 18);
var img4_loc = new Point(img4_x, e.Bounds.Y + img4.Height - 18);
//draw images at aforementioned points
if (grdAuditLogs[e.Row, grdAuditLogs.Cols["Severity"].Index].ToString() == "Information")
e.Graphics.DrawImage(img1, img1_loc);
if (grdAuditLogs[e.Row, grdAuditLogs.Cols["Severity"].Index].ToString() == "Warning")
e.Graphics.DrawImage(img2, img2_loc);
if (grdAuditLogs[e.Row, grdAuditLogs.Cols["Severity"].Index].ToString() == "Critical")
e.Graphics.DrawImage(img3, img3_loc);
if (grdAuditLogs[e.Row, grdAuditLogs.Cols["Severity"].Index].ToString() == "Unspecified")
e.Graphics.DrawImage(img4, img4_loc);
//e1.Graphics.DrawImage(img3, img3_loc);
//draw text
e.Graphics.DrawString(e.Text, e.Style.Font, Brushes.Black, e.Bounds.Location);
e.Handled = true;
}
}
/* Now allocate the buffer */
int dataBufferSize=(int)(optimalSize.height*optimalSize.width*(ImageFormat.getBitsPerPixel(parameters.getPreviewFormat())/8.0));
mBuffer= new byte[dataBufferSize];
/* The buffer where the current frame will be copied */
mFrame = new byte[dataBufferSize];
mCamera.addCallbackBuffer(mBuffer);
mCamera.setPreviewCallbackWithBuffer(new Camera.PreviewCallback()
{
private long timestamp=0;
public synchronized void onPreviewFrame(byte[] data, Camera camera)
{
System.arraycopy(data, 0, mFrame, 0, data.length);
Log.i("Completed copying date","Ready for processing");
try{
camera.addCallbackBuffer(mBuffer);
}catch (Exception e)
{
Log.e("Camera", "addCallbackBuffer error");
return;
}
return;
}
});
void EyeBlink::blink(IplImage* frame)
{
CvSeq* comp = 0;
CvRect window, eye;
int key, nc, found;
int text_delay, stage = STAGE_INIT;
int valueBlink =0;
int delay, i;
capture = cvCaptureFromCAM(0);
if (!capture)
//exit_nicely("Cannot initialize camera!");
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, FRAME_WIDTH);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, FRAME_HEIGHT);
frame = cvQueryFrame(capture);
if (!frame)
exit_nicely("cannot query frame!");
cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.4, 0.4, 0, 1, 8);
cvNamedWindow(wnd_name, 1);
/*for (delay = 20, i = 0; i < 6; i++, delay = 20)
while (delay)
{
frame = cvQueryFrame(capture);
if (!frame)
exit_nicely("cannot query frame!");
DRAW_TEXT(frame, msg[i], delay, 0);
cvShowImage(wnd_name, frame);
cvWaitKey(30);
}*/
storage = cvCreateMemStorage(0);
if (!storage)
exit_nicely("cannot allocate memory storage!");
kernel = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_CROSS, NULL);
gray = cvCreateImage(cvGetSize(frame), 8, 1);
prev = cvCreateImage(cvGetSize(frame), 8, 1);
diff = cvCreateImage(cvGetSize(frame), 8, 1);
tpl = cvCreateImage(cvSize(TPL_WIDTH, TPL_HEIGHT), 8, 1);
if (!kernel || !gray || !prev || !diff || !tpl)
exit_nicely("system error.");
gray->origin = frame->origin;
prev->origin = frame->origin;
diff->origin = frame->origin;
cvNamedWindow(wnd_debug, 1);
while (key != 'q')
{
int t=100;
frame = cvQueryFrame(capture);
if (!frame)
exit_nicely("cannot query frame!");
frame->origin = 0;
if (stage == STAGE_INIT)
window = cvRect(0, 0, frame->width, frame->height);
cvCvtColor(frame, gray, CV_BGR2GRAY);
nc = get_connected_components(gray, prev, window, &comp);
if (stage == STAGE_INIT && is_eye_pair(comp, nc, &eye))
{
int i;
for (i = 0; i < 5; i++)
{
frame = frame1[i];
if (!frame)
exit_nicely("cannot query frame");
cvShowImage(wnd_name, frame);
if (diff)
cvShowImage(wnd_debug, diff);
cvWaitKey(30);
}
cvSetImageROI(gray, eye);
cvCopy(gray, tpl, NULL);
cvResetImageROI(gray);
stage = STAGE_TRACKING;
text_delay = 10;
}
if (stage == STAGE_TRACKING)
{
found = locate_eye(gray, tpl, &window, &eye);
if (!found || key == 'r')
stage = STAGE_INIT;
if (is_blink(comp, nc, window, eye))
text_delay = 10;
DRAW_RECTS(frame, diff, window, eye);
DRAW_TEXT(frame, "blink!", text_delay, 1);
}
cvShowImage(wnd_name, frame);
cvShowImage(wnd_debug, diff);
prev = (IplImage*)cvClone(gray);
key = cvWaitKey(15);
t--;
}
exit_nicely(NULL);
}
I am working on eye blink detection but having some challenges. First I captured the frames and buffered them by using setPreviewCallbackwithBuffer method in android but my question is how can I make use of the frames which were stored in bytes. Which means eliminating the cvCameraFromCam and cvQueryFrame functions from the code above.
I'm using Speex to encode the raw data but after I decode the data the audio plays at a faster rate because it makes you sound like a chipmunk. I'm using NSpeex and Silverlight 4.
8kHz Sampling
Encoding Function:
JSpeexEnc encoder = new JSpeexEnc();
int rawDataSize = 0;
public byte[] EncodeAudio(byte[] rawData)
{
var encoder = new SpeexEncoder(BandMode.Narrow);
var inDataSize = rawData.Length / 2;
var inData = new short[inDataSize];
for (var index = 0; index < rawData.Length; index += 2)
{
inData[index / 2] = BitConverter.ToInt16(rawData, index);
}
inDataSize = inDataSize - inDataSize % encoder.FrameSize;
var encodedData = new byte[rawData.Length];
var encodedBytes = encoder.Encode(inData, 0, inDataSize, encodedData, 0, encodedData.Length);
byte[] encodedAudioData = null;
if (encodedBytes != 0)
{
encodedAudioData = new byte[encodedBytes];
Array.Copy(encodedData, 0, encodedAudioData, 0, encodedBytes);
}
rawDataSize = inDataSize; // Count of encoded shorts, for debugging
return encodedAudioData;
}
Decoding Function:
SpeexDecoder decoder = new SpeexDecoder(BandMode.Narrow);
public byte[] Decode(byte[] encodedData)
{
try
{
short[] decodedFrame = new short[8000]; // should be the same number of samples as on the capturing side
int decoderBytes = decoder.Decode(encodedData, 0, encodedData.Length, decodedFrame, 0, false);
byte[] decodedData = new byte[encodedData.Length];
byte[] decodedAudioData = null;
decodedAudioData = new byte[decoderBytes * 2];
for (int shortIndex = 0, byteIndex = 0; byteIndex < decoderBytes; shortIndex++)
{
BitConverter.GetBytes(decodedFrame[shortIndex + byteIndex]).CopyTo(decodedAudioData, byteIndex * 2);
byteIndex++;
}
// todo: do something with the decoded data
return decodedAudioData;
}
catch (Exception ex)
{
ShowMessageBox(ex.Message.ToString());
return null;
}
}
Playing the audio:
void PlayWave(byte[] PCMBytes)
{
byte[] decodedBuffer = Decode(PCMBytes);
MemoryStream ms_PCM = new MemoryStream(decodedBuffer);
MemoryStream ms_Wave = new MemoryStream();
_pcm.SavePcmToWav(ms_PCM, ms_Wave, 16, 8000, 1);
WaveMediaStreamSource WaveStream = new WaveMediaStreamSource(ms_Wave);
mediaElement1.SetSource(WaveStream);
mediaElement1.Play();
}
Sorry guys for the late response but I figured out what the problem was.
Inside my decode function I loop through the decoded short array but I'm only copying half of the bytes into my new byte array.
It needs to look something like this:
decodedAudioData = new byte[decoderBytes * 2];
for (int shortIndex = 0, byteIndex = 0; shortIndex < decodedFrame.Length; shortIndex++, byteIndex += 2)
{
byte[] temp = BitConverter.GetBytes(decodedFrame[shortIndex]);
decodedAudioData[byteIndex] = temp[0];
decodedAudioData[byteIndex + 1] = temp[1];
}