I get a cracking at the beginning and end of a core audio output queue. The code should simply generate a tone.
Edit: created a sample project
https://github.com/MrMatthias/CoreAudioCrackle
Here is the setup:
-(void) startOutputQueue {
if(userData.outputQueue != NULL) {
if(!checkError(AudioQueuePrime(userData.outputQueue, 0, NULL), "AudioQueuePrime")) {
NSLog(#"Error priming QutputQueue");
}
if(!checkError(AudioQueueStart(userData.outputQueue, NULL), "AudioQueueStart Output")) {
NSLog(#"Error starting OutputQueue");
}
}
}
-(void) setupOutputQueue {
memset(&userData.outputDesc, 0, sizeof(userData.outputDesc));
userData.outputDesc.mFormatID = kAudioFormatLinearPCM;
userData.outputDesc.mFramesPerPacket = 1;
userData.outputDesc.mFormatFlags = kAudioFormatFlagIsBigEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
userData.outputDesc.mChannelsPerFrame = 1;
userData.outputDesc.mSampleRate = 44100;
userData.outputDesc.mBitsPerChannel = 16;
userData.outputDesc.mBytesPerFrame = userData.outputDesc.mBytesPerPacket = 2;
userData.outputSamplePosition = 0;
if (userData.outputQueue == NULL) {
if(!checkError(AudioQueueNewOutput(&userData.outputDesc, outputCallback, &userData, NULL, NULL, 0, &userData.outputQueue), "AudioQueueNewOutput")) {
return;
}
UInt32 bufferSize = userData.outputDesc.mBytesPerFrame * userData.outputDesc.mSampleRate * DURATION;
for (int i=0; i<3; ++i) {
if(!checkError(AudioQueueAllocateBuffer(userData.outputQueue, bufferSize, &userData.outputBuffers[i]), "AudioQueueAllocateBuffer")) {
return;
}
outputCallback(&userData, userData.outputQueue, userData.outputBuffers[i]);
}
}
}
In the output callback i call a block that fills the buffers:
userData->outputBlock(userData, inAQ, inBuffer);
AudioQueueEnqueueBuffer(userData->outputQueue, inBuffer, 0, NULL);
Filling of the Buffer looks like this:
UInt32 sampleCount = DURATION * userData->outputDesc.mSampleRate;
double f1 = userData->outputDesc.mSampleRate / 10000.0f;
for (int i=0; i<sampleCount; ++i) {
SInt16 sample = CFSwapInt16HostToBig(SHRT_MAX * ( sin((userData->outputSamplePosition + i) * 2 * M_PI / f1) ));
((SInt16*)inBuffer->mAudioData)[i] = sample;
}
userData->outputSamplePosition += sampleCount;
inBuffer->mAudioDataByteSize = sampleCount * 2;
The recording looks like this:
Try adding (SInt16) just before SHRT_MAX ( sin((userData->...
such that your entire code block looks like this
SInt16 sample = CFSwapInt16HostToBig((SInt16)SHRT_MAX * ( sin((userData->outputSamplePosition + i) * 2 * M_PI / f1) ));
Related
I have two functions recursive and iterative to calculate money change; in the iterative version I needed to check for if the money is multiple of change (money modulus change is zero): is there a way to do iterative without checking for modulus?
public static int MoneyChangeRecur(int money, int[] changes)
{
int minChange = 0;
if (money == 0) return 0;
int min = int.MaxValue;
for (int i = 0; i < changes.Length; i++)
{
if (money >= changes[i])
{
minChange = MoneyChangeRecur(money - changes[i], changes);
if (minChange + 1 <= min)
{
min = minChange + 1;
}
}
}
return min;
}
static int IterativeMoneychange(int money)
{
int[] ar = new int[money + 1];
int[] change = { 6, 5, 1 };
int min = 9999;
int index = 0;
for (int i = 0; i < money+1; i++)
{
min = 99999;
index = 0;
bool modSet = false;
for (int j= 0; j <change.Length; j++)
{
if (i >= change[j])
{
int mod=(i % change[j]);
if(mod==0&&change[j]!=1)
{
if (!modSet) min = 99999;
if ((i-change[j] )< min)
{
min = (i - change[j]);
modSet = true;
}
}
else
{
if ((i - change[j]) < min)
{
min = (i - change[j]);
modSet = false;
}
}
}
}
if (min != 99999)// min = 0;
ar[i] = ar[min] +1;
}
return ar[money];
}enter code here
So I'm making a simple steganography tool (encrypting messages within images) and exposing it as a web service via Node.js. I am very new to Javascript and Node.js in particular. The app first converts a text string into a binary string by changing each character into an 8-bit ASCII encoding, resulting in one large binary string. I then encrypt the message within the pixels. Even values of pixels represent 0s from the binary, and odd values represent 1s. The end of the string is marked as 3 pixels of value 100 in a row (this is temporary, until I figure out a better way to mark the end). I'm using a node.js library called 'pngjs' that gives me pixel-level access to png images.
So I have a problem with the decodeMessage function. It builds up the string message, and is then meant to return it, however the return call at the end results in undefined.
How can I fix it?
Thanks in advance for the help!
function encodeMessage(image, mes) {
var message = mes;
var fs = require('fs'),
PNG = require('pngjs').PNG;
fs.createReadStream(image)
.pipe(new PNG({
filterType: 4
}))
.on('parsed', function() {
for (var y = 0; y < this.height; y++) {
for (var x = 0; x < this.width; x++) {
var idx = (this.width * y + x);// << 2;
//console.log(idx);
if (idx < message.length) {
var item = message.charAt(idx);
/* if the character in the encoded string is 0 */
if (item == 0) {
/* if the pixel is odd, we want it to be even */
if (this.data[idx] % 2 == 1) {
/* if the pixel is 0, add 1 to it */
if (this.data[idx] == 0) {
this.data[idx] = this.data[idx] + 1;
} else {
/* else substract 1 */
this.data[idx] = this.data[idx] - 1;
}
}
} else {
/* if the character in the encoded string is 1 */
if (this.data[idx] % 2 == 0) {
if (this.data[idx] == 0) {
this.data[idx] = this.data[idx] + 1;
} else {
this.data[idx] = this.data[idx] - 1;
}
}
}
//console.log(this.data[idx]);
} else if (idx === message.length) {
/* do something to the first pixel following the end of the string */
this.data[idx] = 100;
this.data[idx+1] = 100;
this.data[idx+2] = 100;
//console.log(this.data[idx]);
} else {
/* do something to the remaining pixels */
}
}
}
this.pack().pipe(fs.createWriteStream('encoded_' + image));
});
}
function decodeMessage(image) {
var message = "";
var fs = require('fs'),
PNG = require('pngjs').PNG;
fs.createReadStream(image)
.pipe(new PNG({
filterType: 4
}))
.on('parsed', function() {
dance:
for (var y = 0; y < this.height; y++) {
for (var x = 0; x < this.width; x++) {
var idx = (this.width * y + x);// << 2;
if (this.data[idx] == 100 && this.data[idx+1] == 100 && this.data[idx+2] == 100) {
break dance;
} else {
if (this.data[idx] % 2 == 0) {
message += "0";
} else {
message += "1";
}
}
}
}
/* the message outputs correctly over here */
console.log(message);
//return message;
});
/* but the return of the variable here doesn't work */
return message;
}
exports.encodeMessage = encodeMessage;
exports.decodeMessage = decodeMessage;
The parsed event is fired asynchronously, so you cannot return a value from decodeMessage.
function decodeMessage(image, cb) {
// Code
.on('parsed', function() {
// Code
console.log(message);
cb(message);
});
}
Then you must pass a callback to your decodeMessage function.
decodeMessage(image, function(decoded){
// Here is the decoded data.
});
The same is true for your encodeMessage function. The function will return before encoding has finished. If you want to know when it is done, you need to pass a callback the same way.
I found a project description on a course website for computer graphics. I am trying to complete the project for fun.
Here is the link to the problem description:
http://www.pdfhost.net/index.php?Action=Download&File=901bc7785bef41364b3a40f6f4493926
Below is my code. The problem I am running in to is that the terms of the series grow so fast I can't map the points to the screen correctly. From the problem description it says the points will be mappable within a -2 - 2 square but the difference in value between the points is so huge that normalizing by the largest would collapse most of the points to a single pixel.
I assume I have a fundamental misunderstanding that I can't identify. Any help or insight would be appreciated!
int w = 800, h = 600;
int numTimes = 10, cSize = 5;
float xr = 2, yr = 2;
void setup() {
size(w,h);
}
void draw() {
background(255);
Complex v = new Complex(mouseX*(xr/w) - (xr/2), mouseY*(yr/h) - (yr/2));
Complex[] exps = new Complex[numTimes];
for (int i = 0; i < numTimes; i++) {
exps[i] = complexExp(v,i);
}
ellipse(w/2, h/2, cSize, cSize);
for (int i = 0; i < numTimes; i++) {
drawSeries(new Complex(0,0), exps, i, i);
}
}
void drawSeries(Complex vToDraw, Complex[] exps, int count, int clrTrunc) {
if (count == 0) {
Complex v = exps[0];
float progress = float(clrTrunc) / float(numTimes);
fill(255*progress, 180, 255 - 255*progress);
vToDraw.add(v);
ellipse(vToDraw.r*(w/xr) + (w/2), vToDraw.i*(h/xr) + h/2, cSize, cSize);
vToDraw.sub(v);
vToDraw.sub(v);
ellipse(vToDraw.r*(w/xr) + (w/2), vToDraw.i*(h/xr) + h/2, cSize, cSize);
} else {
Complex v = exps[count];
vToDraw.add(v);
drawSeries(vToDraw, exps, count - 1, clrTrunc );
vToDraw.sub(v);
vToDraw.sub(v);
drawSeries(vToDraw, exps, count - 1,clrTrunc );
}
}
Complex complexExp(Complex v, int times) {
if (times == 0) {
return new Complex(1, 1);
} else if ( times == 1) {
return new Complex( v.r*v.r - v.i*v.i, 2*v.r*v.i );
} else {
return complexExp( new Complex( v.r*v.r - v.i*v.i, 2*v.r*v.i ), times - 1 );
}
}
class Complex {
float r, i;
Complex() {
this.r = 0;
this.i = 0;
}
Complex(float r, float i) {
this.r = r;
this.i = i;
}
void add(Complex nv) {
this.r += nv.r;
this.i += nv.i;
}
void sub(Complex nv) {
this.r -= nv.r;
this.i -= nv.i;
}
}
I think you can make the code cleaner if you write a more complete Complex class.
int w = 800, h = 600;
int numTimes = 10, cSize = 5;
float xr = 3, yr = 3;
void setup() {
size(w,h);
noLoop();
}
void mousePressed() {
redraw();
}
void draw() {
background(255);
Complex v = new Complex(mouseX*(xr/w) - (xr/2), mouseY*(yr/h) - (yr/2));
Complex[] exps = new Complex[numTimes];
for (int i = 0; i < numTimes; i++) {
exps[i] = v.raisedTo(i);
print(exps[i]);
}
ellipse(w/2, h/2, cSize, cSize);
print(exps);
drawSerie(exps, numTimes);
}
void drawSerie(Complex[] exps, int total)
{
Complex partial = new Complex(0, 0);
drawPartial(exps, total -1, partial);
}
void drawFinal(Complex toDraw)
{
point(toDraw.r*(w/xr) + (w/2), toDraw.i*(h/xr) + h/2);
}
void drawPartial(Complex [] exps, int depth, Complex partial)
{
if (depth == -1)
{
drawFinal(partial);
return;
}
int nextDepth = depth -1;
drawPartial(exps, nextDepth, partial);
Complex element = exps[depth];
drawPartial(exps, nextDepth, partial.add(element));
drawPartial(exps, nextDepth, partial.sub(element));
}
class Complex {
float r, i;
Complex() {
this.r = 0;
this.i = 0;
}
Complex(float r, float i) {
this.r = r;
this.i = i;
}
Complex(Complex other)
{
this.r = other.r;
this.i = other.i;
}
Complex mult(Complex other)
{
return new Complex(this.r*other.r - this.i*other.i, this.r*other.i + this.i*other.r);
}
Complex add(Complex nv) {
return new Complex(this.r + nv.r, this.i + nv.i);
}
Complex sub(Complex nv) {
return new Complex(this.r - nv.r, this.i - nv.i);
}
Complex raisedTo(int n) {
if (n == 0) {
return new Complex(1, 0);
}
else if (n % 2 == 0)
{
return (this.mult(this)).raisedTo(n/2);
}
else
{
return this.mult(this.raisedTo(n - 1 ));
}
}
String toString()
{
return "real: " + this.r + " imaginary: " + this.i;
}
}
The computation of the series is not efficient but, I think, it is clear
/* Now allocate the buffer */
int dataBufferSize=(int)(optimalSize.height*optimalSize.width*(ImageFormat.getBitsPerPixel(parameters.getPreviewFormat())/8.0));
mBuffer= new byte[dataBufferSize];
/* The buffer where the current frame will be copied */
mFrame = new byte[dataBufferSize];
mCamera.addCallbackBuffer(mBuffer);
mCamera.setPreviewCallbackWithBuffer(new Camera.PreviewCallback()
{
private long timestamp=0;
public synchronized void onPreviewFrame(byte[] data, Camera camera)
{
System.arraycopy(data, 0, mFrame, 0, data.length);
Log.i("Completed copying date","Ready for processing");
try{
camera.addCallbackBuffer(mBuffer);
}catch (Exception e)
{
Log.e("Camera", "addCallbackBuffer error");
return;
}
return;
}
});
void EyeBlink::blink(IplImage* frame)
{
CvSeq* comp = 0;
CvRect window, eye;
int key, nc, found;
int text_delay, stage = STAGE_INIT;
int valueBlink =0;
int delay, i;
capture = cvCaptureFromCAM(0);
if (!capture)
//exit_nicely("Cannot initialize camera!");
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, FRAME_WIDTH);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, FRAME_HEIGHT);
frame = cvQueryFrame(capture);
if (!frame)
exit_nicely("cannot query frame!");
cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.4, 0.4, 0, 1, 8);
cvNamedWindow(wnd_name, 1);
/*for (delay = 20, i = 0; i < 6; i++, delay = 20)
while (delay)
{
frame = cvQueryFrame(capture);
if (!frame)
exit_nicely("cannot query frame!");
DRAW_TEXT(frame, msg[i], delay, 0);
cvShowImage(wnd_name, frame);
cvWaitKey(30);
}*/
storage = cvCreateMemStorage(0);
if (!storage)
exit_nicely("cannot allocate memory storage!");
kernel = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_CROSS, NULL);
gray = cvCreateImage(cvGetSize(frame), 8, 1);
prev = cvCreateImage(cvGetSize(frame), 8, 1);
diff = cvCreateImage(cvGetSize(frame), 8, 1);
tpl = cvCreateImage(cvSize(TPL_WIDTH, TPL_HEIGHT), 8, 1);
if (!kernel || !gray || !prev || !diff || !tpl)
exit_nicely("system error.");
gray->origin = frame->origin;
prev->origin = frame->origin;
diff->origin = frame->origin;
cvNamedWindow(wnd_debug, 1);
while (key != 'q')
{
int t=100;
frame = cvQueryFrame(capture);
if (!frame)
exit_nicely("cannot query frame!");
frame->origin = 0;
if (stage == STAGE_INIT)
window = cvRect(0, 0, frame->width, frame->height);
cvCvtColor(frame, gray, CV_BGR2GRAY);
nc = get_connected_components(gray, prev, window, &comp);
if (stage == STAGE_INIT && is_eye_pair(comp, nc, &eye))
{
int i;
for (i = 0; i < 5; i++)
{
frame = frame1[i];
if (!frame)
exit_nicely("cannot query frame");
cvShowImage(wnd_name, frame);
if (diff)
cvShowImage(wnd_debug, diff);
cvWaitKey(30);
}
cvSetImageROI(gray, eye);
cvCopy(gray, tpl, NULL);
cvResetImageROI(gray);
stage = STAGE_TRACKING;
text_delay = 10;
}
if (stage == STAGE_TRACKING)
{
found = locate_eye(gray, tpl, &window, &eye);
if (!found || key == 'r')
stage = STAGE_INIT;
if (is_blink(comp, nc, window, eye))
text_delay = 10;
DRAW_RECTS(frame, diff, window, eye);
DRAW_TEXT(frame, "blink!", text_delay, 1);
}
cvShowImage(wnd_name, frame);
cvShowImage(wnd_debug, diff);
prev = (IplImage*)cvClone(gray);
key = cvWaitKey(15);
t--;
}
exit_nicely(NULL);
}
I am working on eye blink detection but having some challenges. First I captured the frames and buffered them by using setPreviewCallbackwithBuffer method in android but my question is how can I make use of the frames which were stored in bytes. Which means eliminating the cvCameraFromCam and cvQueryFrame functions from the code above.
I want to create a list of operation's in a grid view. For example visit this URL.
http://cdn-static.cnet.co.uk/i/product_media/40000186/nokia1616_01.jpg
You can look at this question or this page(and use LWUIT or CustomItems) or extend "canvas".In this way you need to two pictures for every operation in grid view.One for normal state and another for highlighted.Here is a simple canvas that represents 4 operations in 2*2 grid:
public class GridCanvas extends Canvas {
int highlightedRow = 0;
int highlightedColumn = 0;
Image[][] normalImageMat;
Image[][] highlightedImageMat;
Image[][] imageMat;
int gridColumnNo;
int gridRowNo;
/**
* constructor
*/
public GridCanvas() {
gridColumnNo = 2;
gridRowNo = 2;
normalImageMat = new Image[gridRowNo][gridColumnNo];
highlightedImageMat = new Image[gridRowNo][gridColumnNo];
imageMat = new Image[gridRowNo][gridColumnNo];
try {
for (int i = 0; i < gridRowNo; i++) {
for (int j = 0; j < gridColumnNo; j++) {
normalImageMat[i][j] = Image.createImage("/hello/normalImage" + i + j + ".png");
}
}
for (int i = 0; i < gridRowNo; i++) {
for (int j = 0; j < gridColumnNo; j++) {
highlightedImageMat[i][j] = Image.createImage("/hello/highlightedImage" + i + j + ".png");
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* paint
*/
public void paint(Graphics g) {
g.setColor(255, 255, 255);
g.fillRect(0, 0, getWidth(), getHeight());
for (int i = 0; i < gridRowNo; i++) {
System.arraycopy(normalImageMat[i], 0, imageMat[i], 0, 2);
}
imageMat[highlightedRow][highlightedColumn] = highlightedImageMat[highlightedRow][highlightedColumn];
int width = 0;
int height = 0;
for (int i = 0; i < gridRowNo; i++) {
for (int j = 0; j < gridColumnNo; j++) {
g.drawImage(imageMat[i][j], width, height, 0);
width = width + imageMat[i][j].getWidth();
}
width = 0;
height = height + imageMat[0][0].getHeight();
}
}
/**
* Called when a key is pressed.
*/
protected void keyPressed(int keyCode) {
int gameAction = this.getGameAction(keyCode);
if (gameAction == RIGHT) {
highlightedColumn = Math.min(highlightedColumn + 1, gridColumnNo - 1);
} else if (gameAction == LEFT) {
highlightedColumn = Math.max(highlightedColumn - 1, 0);
} else if (gameAction == UP) {
highlightedRow = Math.max(0, highlightedRow - 1);
} else if (gameAction == DOWN) {
highlightedRow = Math.min(gridRowNo - 1, highlightedRow + 1);
}
repaint();
}
}
In real samples you would to detect gridColumnNo and gridRowNo due to screen and your icons dimensions.
If you can not go with LWUIT (license, library size, etc) and do not want to leave the screen rendering to LCDUI (CustomItem), you should extend Canvas.
I have shared code for an adaptive grid at http://smallandadaptive.blogspot.com.br/2010/12/touch-menu.html Feel free to use it.
At this sample all items are Strings, but you can change the TouchItem to draw Images instead.