I am building a time-lapse camera web application using Raspberry Pi and the Raspberry Pi Camera Module. So far I have built a web application (using NodeJS, Express, AngularJS, and BootStrap 3) that can interact with the Raspberry Camera Module using an open source NodeJS module (https://www.npmjs.org/package/raspicam).
I have a global variable called "setting" that will always change whenever the user changes the camera settings:
var setting = {
mode: "timelapse",
output: "public/images/image%d.jpg", // image1, image2, image3, etc...
encoding: "jpg",
timelapse: 3000, // take a picture every 3 seconds
timeout: 12000 // take a total of 4 pictures over 12 seconds
}
I have three functions in Express that can:
set Camera settings
exports.setCamera = function(req, res) {
setting = {
mode: req.body.mode,
output: req.body.output,
encoding: req.body.encoding,
timelapse: req.body.timelapse,
timeout: req.body.timeout
}
res.json(setting, 200);
console.log('SET CAMERA - ' + JSON.stringify(setting));
}
start the Camera
exports.startCamera = function(req, res) {
camera = new RaspiCam(setting);
camera.on("start", function( err, timestamp ){
console.log("timelapse started at " + timestamp);
});
camera.on("read", function( err, timestamp, filename ){
console.log("timelapse image captured with filename: " + filename);
});
camera.on("exit", function( timestamp ){
console.log("timelapse child process has exited");
res.json(setting, 200);
});
camera.on("stop", function( err, timestamp ){
console.log("timelapse child process has been stopped at " + timestamp);
});
camera.start();
setTimeout(function(){
camera.stop();
}, setting.timeout + 1000);
console.log('START CAMERA - ' + JSON.stringify(setting));
}
stop the Camera
exports.stopCamera = function(req, res) {
camera.stop();
res.json(setting, 200);
console.log('STOP CAMERA - ' + JSON.stringify(setting));
}
As you can see in the "startCamera" function, I am creating a new RaspiCam object called "camera" that passes in the global variable "setting" (which can always change). When the camera object is created, I am also creating "start", "read", "exist", and "stop" functions for it. The problem is that since I am not setting the camera object as a global variable, when the user decides to click Stop halfway during the session, the "stopCamera" function gets called but it does not know what camera.stop() is and says it is undefined. Is there a way I can allow the "stopCamera" function to know what camera.stop() is (which was created in the "startCamera" function)?
Sorry if this is confusing, I don't know how else to describe my problem.. :(
I think you have a problem with how this is architected, but the simple solution to your question is check to see if the camera object has been initialized.
exports.stopCamera = function(req, res) {
if(camera && typeof(camera.stop) == "function") {
camera.stop();
console.log('STOP CAMERA - ' + JSON.stringify(setting));
}
res.json(setting, 200);
}
Related
I'm making a fun open sourced sample for doing Edge Compute Computer Vision using the Raspberry Pi as my hardware.
The current SDK I have to access hardware is nodejs based (I'll release a second with python when it is available). Warning: I am a node novice.
The issue I am facing is that I want to take pictures using the stock camera in a loop without saving a file. I just want to get access to the buffer, extract the pixels, pass to my second edge module.
Taking pictures with no file save in a while(true) loop appears to never execute.
Here is my sample:
'use strict';
var sleep = require('sleep');
const Raspistill = require('node-raspistill').Raspistill;
var pixel_getter = require('pixel-getter');
while(true) {
const camera = new Raspistill({ verticalFlip: true,
horizontalFlip: true,
width: 500,
height: 500,
encoding: 'jpg',
noFileSave: true,
time: 1 });
camera.takePhoto().then((photo) => {
console.log('got photo');
pixel_getter.get(photo,
function(err, pixels) {
console.log('got pixels');
console.log(String(pixels));
});
});
sleep.sleep(5);
}
console.log('picture taken');
In the above code, none of the console.log functions actually ever log; leading me to believe that photos are never taken and therefor pixels can not be extracted.
Any assistance would be greatly appreciated.
UPDATE:
It looks like the looping mechanic might be funny. I guess I don't really care if it takes pictures in a loop as long as it takes a picture, I pass it off, I take a picture and I pass it off, indefinately.
I decided to approach the problem with a recursive loop instead which worked brilliantly.
'use strict';
const sleep = require('sleep');
const Raspistill = require('node-raspistill').Raspistill;
const pixel_getter = require('pixel-getter')
const camera = new Raspistill({ verticalFlip: true,
horizontalFlip: true,
width: 500,
height: 500,
encoding: 'jpg',
noFileSave: true,
time: 5 });
function TakePictureLoop() {
console.log('taking picture');
camera.takePhoto().then((photo) => {
console.log('got photo');
pixel_getter.get(photo, function(err, pixels) {
console.log('got pixels');
TakePictureLoop();
});
});
}
TakePictureLoop();
I'm a little bit frustrated that I can't stop a sound. I used the example code on how to play a sound in three js, and under play(), I want to stop to test. It doesn't matter when I stop a sound in gameplay, I get the same error:
TypeError: Not enough arguments
I didn't think calling stop() required any arguments. I am running on Safari and Firefox.
Any help would be appreciated!
Here's my code:
// instantiate a listener
var audioListener = new THREE.AudioListener();
// add the listener to the camera
camera.add( audioListener );
// instantiate audio object
var oceanAmbientSound = new THREE.Audio( audioListener );
// add the audio object to the scene
scene.add( oceanAmbientSound );
// instantiate a loader
var loader = new THREE.AudioLoader();
// load a resource
loader.load(
// resource URL
'sounds/mySound.mp3',
// Function when resource is loaded
function ( audioBuffer ) {
// set the audio object buffer to the loaded object
oceanAmbientSound.setBuffer( audioBuffer );
// play the audio
oceanAmbientSound.play();
oceanAmbientSound.stop();
},
// Function called when download progresses
function ( xhr ) {
console.log( (xhr.loaded / xhr.total * 100) + '% loaded' );
},
// Function called when download errors
function ( xhr ) {
console.log( 'An error happened' );
}
Figured it out- it must be my old version of Safari (8.0.8). Nonetheless, thanks for your help!
I'm going to be honest. I'm way in over my head here.
I need to scrape data from a dynamic site for my employer. Before the data is visible on the page, there are some clicks and waits necessary. Simple PHP scraping won't do. So I found out about this NodeJS + PhantomJS combo. Quite a pain to set up, but I did manage to load a site, run some code and get a result.
I wrote a piece of jQuery which uses timeout loops to wait for some data to be loaded. Eventually I get a js object that I want to write to a file (JSON).
The issue I'm facing.
I build up the the js object inside the PhantomJS .evaluate scope, which runs in a headerless browser, so not directly in my Node.JS server scope. How do I send the variable I built up inside evaluate back to my server so I can write it to my file?
Some example code (I know it's ugly, but it's for illustrative purposes). I use node-phantom-simple as a bridge between Phantom and Node
var phantom = require('node-phantom-simple'),
fs = require('fs'),
webPage = 'https://www.imagemedia.com/printing/business-card-printing/'
phantom.create(function(err, ph) {
return ph.createPage(function(err, page) {
return page.open(webPage, function(err, status) {
page.onConsoleMessage = function(msg) {
console.log(msg);
};
console.log("opened site? ", status);
page.evaluate(function() {
setTimeout(function() {
$('.price-select-cnt').eq(0).find('select').val('1266').change()
timeOutLoop()
function timeOutLoop() {
console.log('looping')
setTimeout(function() {
if ($('#ajax_price_tool div').length != 6) {
timeOutLoop()
} else {
$('.price-select-cnt').eq(1).find('select').val('25')
$('.price-select-cnt').eq(2).find('select').val('Premium Card Stock')
$('.price-select-cnt').eq(3).find('select').val('Standard').change()
timeOutLoop2()
}
}, 100)
}
function timeOutLoop2() {
console.log('looping2')
setTimeout(function() {
if ($('.pricing-cost-cnt').text() == '$0' || $('.pricing-cost-cnt').text() == '') {
timeOutLoop2()
} else {
var price = $('.pricing-cost-cnt').text()
console.log(price)
}
}, 100)
}
}, 4000)
});
});
});
});
function writeJSON(plsWrite) {
var key = 'file'
fs.writeFile('./results/' + key + '.json', plsWrite, 'utf8', function() {
console.log('The JSON file is saved as');
console.log('results/' + key + '.json');
});
}
So do do I write the price this code takes from the website, get it out of the evaluate scope and write it to a file?
I am dipping my toe into using different npm modules my own way whereas before I just executed already created gulpfiles. The npm module penthouse loads a webpage and determines the above the fold CSS for that page. I am trying to take that module and use it with a site crawler so I can get the above the fold css for all pages, and store that CSS in a table.
So essentially I am:
Crawling a site to get all the urls
capturing the page id from each url
storing pages & their id's in a CSV
load the CSV and pass each URL to penthouse
take penthouse output and store it in a table
So I am fine up until the last two steps. When I am reading the CSV, I get the error possible EventEmitter memory leak detected. 11 exit listeners added. Use emitter.setMaxListeners() to increase limit.
The stack trace points here at line 134. After reading about the error, it makes sense because I see a bunch of event listeners being added, but I don't see penthouse ever really executing and closing the event listeners.
It works just fine standalone as expected (Running penthouse against a single page then exiting). But when I execute the code below to try and loop through all URLs in a csv, it spits out the memory leak error twice, and just hangs. None of my console.log statements in the following script are executed.
However, I added console.log to the end of the penthouse index.js file, and it is executed multiple times (where it adds event listeners), but it never timeouts or exits.
So it's clear I am not integrating this properly, but not sure how to proceed. What would be the best way to force it to read one line in the CSV at a time, process the URL, then take the output and store it in the DB before moving onto the next line?
const fs = require('fs');
var csv = require('fast-csv');
var penthouse = require('penthouse'),
path = require('path');
var readUrlCsv = function() {
var stream = fs.createReadStream("/home/vagrant/urls.csv");
var csvStream = csv()
//returns single line from CSV
.on("data", function(data) {
// data[0]: table id, data[1]: page type, data[2]: url
penthouse({
url : data[2],
css : './dist/styles/main.css'
}, function(err, criticalCss) {
if (err) {
console.log(err);
}
console.log('do we ever get here?'); //answer is no
if (data[1] === 'post') {
wp.posts().id( data[0] ).post({
inline_css: criticalCss
}).then(function( response ) {
console.log('saved to db');
});
} else {
wp.pages().id( data[0] ).page({
inline_css: criticalCss
}).then(function( response ) {
console.log('saved to db');
});
}
});
})
.on("end", function(){
console.log("done");
});
return stream.pipe(csvStream);
};
UPDATE
Changed my method to look like below so it processes all rows first, but still throws the same error. Writes "done" to the console, and immediately spits out the memory warning twice.
var readUrlCsv = function() {
var stream = fs.createReadStream("/home/vagrant/urls.csv");
var urls = [];
var csvStream = csv()
.on("data", function(data) {
// data[0]: table id, data[1]: page type, data[2]: url
urls.push(data);
})
.on("end", function(){
console.log("done");
buildCriticalCss(urls);
});
return stream.pipe(csvStream);
};
var buildCriticalCss = function(urls) {
//console.log(urls);
urls.forEach(function(data, idx) {
//console.log(data);
penthouse({
url : data[2],
css : './dist/styles/main.css',
// OPTIONAL params
width : 1300, // viewport width
height : 900, // viewport height
timeout: 30000, // ms; abort critical css generation after this timeout
strict: false, // set to true to throw on css errors (will run faster if no errors)
maxEmbeddedBase64Length: 1000 // charaters; strip out inline base64 encoded resources larger than this
}, function(err, criticalCss) {
if (err) {
console.log(err);
}
console.log('do we ever finish one?');
if (data[1] === 'post') {
console.log('saving post ' + data[0]);
wp.posts().id( data[0] ).post({
inline_css: criticalCss
}).then(function( response ) {
console.log('saved post to db');
});
} else {
console.log('saving page ' + data[0]);
wp.pages().id( data[0] ).page({
inline_css: criticalCss
}).then(function( response ) {
console.log('saved page to db');
});
}
});
});
};
Update 2
I took the simple approach to control the amount of concurrent processes spawned.
var readUrlCsv = function() {
var stream = fs.createReadStream("/home/vagrant/urls.csv");
var urls = [];
var csvStream = csv()
.on("data", function(data) {
// data[0]: table id, data[1]: page type, data[2]: url
urls.push(data);
})
.on("end", function(){
console.log("done");
//console.log(urls);
buildCriticalCss(urls);
});
return stream.pipe(csvStream);
};
function buildCriticalCss(data) {
var row = data.shift();
console.log(row);
penthouse({
url : row[2],
css : './dist/styles/main.css',
// OPTIONAL params
width : 1300, // viewport width
height : 900, // viewport height
timeout: 30000, // ms; abort critical css generation after this timeout
strict: false, // set to true to throw on css errors (will run faster if no errors)
maxEmbeddedBase64Length: 1000 // charaters; strip out inline base64 encoded resources larger than this
}, function(err, criticalCss) {
if (err) {
console.log('err');
}
// handle your criticalCSS
console.log('finished');
console.log(row[2]);
// now start next job, if we have more urls
if (data.length !== 0) {
buildCriticalCss(data);
}
});
}
The error message you're seeing is a default printed to the console by node's event library if more than the allowed number of event listeners are defined for an instance of EventEmitter. It does not indicate an actual memory leak. Rather it is displayed to make sure you're aware of the possibility of a leak.
You can see this by checking the event.EventEmitter source code at lines 20 and 244.
To stop EventEmitter from displaying this message and since penthouse does not expose its specific EventEmitter, you'll need to set the default allowed event emitter listeners to something larger than its default value of 10 using:
var EventEmitter=require('event').EventEmitter;
EventEmitter.defaultMaxListeners=20;
Note that according to Node's documentation for EventEmitter.defaultMaxListeners, this will change the maximum number of listeners for all instances of EventEmitter, including those that have already been defined previous to the change.
Or you could simply ignore the message.
Further to the hanging of your code, I'd advise gathering all the results from the parsing of your CSV into an array, and then processing the array contents separately from the parsing process.
This would accomplish two things: It would allow you to
be assured the entire CSV file was valid before you started processing, and
instrument debugging messages while processing each element, which would give you deeper insight into how each element of the array was processed.
UPDATE
As noted below, depending on how many URLs you're processing, you're probably overwhelming Node's ability to handle all of your requests in parallel.
One easy way to proceed would be to use eventing to marshall your processing so your URLs are processed sequentially, as in:
var assert=require('assert'),
event=require('events'),
fs=require('fs'),
csv=require('fast-csv');
penthouse=require('penthouse');
var emitter=new events.EventEmitter();
/** Container for URL records read from CSV file.
*
* #type {Array}
*/
var urls=[];
/** Reads urls from file and triggers processing
*
* #emits processUrl
*/
var readUrlCsv = function() {
var stream = fs.createReadStream("/home/vagrant/urls.csv");
stream.on('error',function(e){ // always handle errors!!
console.error('failed to createReadStream: %s',e);
process.exit(-1);
});
var csvStream = csv()
.on("data", function(data) {
// data[0]: table id, data[1]: page type, data[2]: url
urls.push(data);
})
.on("end", function(){
console.log("done reading csv");
//console.log(urls);
emitter.emit('processUrl'); // start processing URLs
})
.on('error',function(e){
console.error('failed to parse CSV: %s',e);
process.exit(-1);
});
// no return required since we don't do anything with the result
stream.pipe(csvStream);
};
/** Event handler to process a single URL
*
* #emits processUrl
*/
var onProcessUrl=function(){
// always check your assumptions
assert(Array.isArray(urls),'urls must be an array');
var urlRecord=urls.shift();
if(urlRecord){
assert(Array.isArray(urlRecord),'urlRecord must be an array');
assert(urlRecord.length>2,'urlRecord must have at least three elements');
penthouse(
{
// ...
},
function(e,criticalCss){
if(e){
console.error('failed to process record %s: %s',urlRecord,e);
return; // IMPORTANT! do not drop through to rest of func!
}
// do what you need with the result here
if(urls.length===0){ // ok, we're done
console.log('completed processing URLs');
return;
}
emitter.emit('processUrl');
}
);
}
}
/**
* processUrl event - triggers processing of next URL
*
* #event processUrl
*/
emitter.on('processUrl',onProcessUrl); // assign handler
// start everything going...
readUrlCsv();
The benefit of using events here rather than your solution is the lack of recursion which can easily overwhelm your stack.
Hint: You can use events to handle all program flow issues normally addressed by Promises or modules like async.
And since events are at the very heart of Node (the "event loop"), it's really the best, most efficient way to solve such problems.
It's both elegant and "The Node Way"!
Here is a gist that illustrates the technique, without relying on streams or penthouse, the output of which is:
url: url1
RESULT: RESULT FOR url1
url: url2
RESULT: RESULT FOR url2
url: url3
RESULT: RESULT FOR url3
completed processing URLs
Besides using console.logs which usually is enough, you can also use the built in debugger: https://nodejs.org/api/debugger.html
Another thing you can do is go into the node_modules/penthouse directory and add your console.logs or debugger statement into the code for that module. That way you can debug your program there rather than the module just being a black box.
Also make sure there isn't some kind of race condition where for example the CSV doesn't always get output before it tries to read them in.
I think that the memory leak issue is probably a red herring as far as making your code function.
From your comment it sounds like you want to do something like the following with async.mapSeries: http://promise-nuggets.github.io/articles/15-map-in-series.html You could also use promises as it shows or even after getting promises set up use the async/await stuff with a regular for loop after compiling with babel. In the long run I recommend doing that sort of thing with async/await and babel but that might be overkill just to get this working.
I'm writing an app in express.js / node.js
I have a form with an input type "file" for upload images.
I want my images directly uploaded, without user confirmation. And displayed on the user screen after cropped and stored into a directory.
To do that, I use the socketio-file-upload library.
It works great on a computer. I can upload my images and display them.
The problem is with Ipad (I haven't try Iphone yet)
On the web app, when I click on the "browse" button, I can choose an existing picture or take a picture. I try to take a picture and upload it but nothing hapens...
Here is the code of the server for upload :
io.of('/register').on('connection', function (socket) {
/***************FILE UPLOAD***********************/
// Make an instance of socketioFileUploadServer and listen on this socket:
var uploader = new socketioFileUploadServer();
uploader.dir = __dirname + '/client/tmp/markers';
uploader.listen(socket);
// Do something when a file is saved:
uploader.on('saved', function(event){
console.log('Original saved');
// resize and rename image with a unique id
var newName = Math.random().toString(36).substr(2, 9);
// marker 32x32
easyimg.rescrop({src:path.resolve(__dirname, 'client/tmp/markers/' + event.file.name), dst:path.resolve(__dirname, 'client/tmp/markers/' + newName + '_marker'), width:32, height:32}, function(err, image) {
if (err) throw err;
console.log('Resized and cropped: ' + image.width + ' x ' + image.height);
// marker is uploaded - resized - croped, now display it
socket.emit('displayMarker', {markerPath : '/tmp/markers/' + newName + '_marker', markerName : newName});
// remove original from file system
fs.unlink(path.resolve(__dirname, 'client/tmp/markers/' + event.file.name), function(){
if (err) throw err;
console.log('Original removed');
});
});
});
uploader.on('start', function(event){
console.log('Client start upload');
socket.emit('displayOverlay');
});
// Error handler:
uploader.on('error', function(event){
console.log("Error from uploader", event);
});
});
And client side :
document.addEventListener("DOMContentLoaded", function(){
// REGISTRATION MARKER
var socketMarker = io.connect('/register');
var siofuMarker = new SocketIOFileUpload(socketMarker);
siofuMarker.listenOnInput(document.getElementById("uploadMarkerFromExploratorInput"));
// Do something when a file is uploaded:
siofuMarker.addEventListener("complete", function(event){
$('.overlay').hide();
});
// display loader window
socketMarker.on('displayOverlay', displayOverlay);
// server send we can display the marker in the register step1 view
socketMarker.on('displayMarker', function(data) {
$('#markerImage').html('');
$('#markerImage').html('<img src="' + data.markerPath + '" />');
$('#markerImageName').val(data.markerName);
});
});
With a computer, console will display :
Client start upload
Original saved
Resized and cropped: 32 x 32
Original removed
And with Ipad :
Client start upload
Is someone have a idea or another way to have the same result ?
Apreciate your help. C.
As I suspected, you need to add the proper attributes to the HTML markup for the File Input element:
<input type="file" accept="image/*" capture="camera">
That should do the trick. The most important part to understand is the "accepts" attribute.