I am trying to develop in Nodejs using its evented system. I've implemented the following code and it passes my tests (using mocha). However, I am not confident that it will work in a production environment because of the the scope of the that variable.
In my code, I assign that=this. Afterwards I assign the config object to that.config and the callback emits the event without passing config as a parameter. Then the listener function uses the that.config to emit another signal. The cache object is a request to a redis database.
The question is: will the that.config object always refer to the scope when I've emitted the signal or could it be modified by another request (that gets another config) after the first emitted signal but before the first listener uses that.config (inside another callback)?
function SensorTrigger(sensorClass, configClass) {
this.sensorClass = sensorClass || {'entityName': 'sensor'};
this.configClass = configClass || {'entityName': 'config'};
this.cache = new CacheRedis(app.redisClient, app.logmessage);
events.EventEmitter.call(this);
}
util.inherits(SensorTrigger, events.EventEmitter);
SensorTrigger.prototype.getSensorConfig = function(sensor, trigger) {
var that = this
, sensorKeyId = that.sensorClass.entityName + ':' + sensor
, baseKeyId = "base";
that.trigger = trigger;
that.id = sensor;
var callBack = function (config) {
config = utils.deepen(config);
that.receiver = config.receiver;
that.config = config.triggers[that.trigger];
that.emit(that.config.data, sensor);
}
that.cache.getItem(that.configClass, sensorKeyId, function(err, config) {
if (!config) {
that.cache.getItem(that.configClass, baseKeyId, function(err, config) {
callBack(config);
})
} else {
callBack(config);
}
})
}
SensorTrigger.prototype.getAllData = function(sensor) {
var that = this;
that.cache.getAllData(that.sensorClass, sensor, function(err, data) {
if (err) {
that.emit("error", err);
} else {
that.emit(that.config.aggregation, sensor, data);
}
})
}
trigger = new SensorTrigger();
trigger.on("onNewData", trigger.getSensorConfig);
trigger.on("all", trigger.getAllData);
An example of the config object:
{
"id": "base",
"triggers.onNewData.data": "all",
"triggers.onNewData.aggregation": "max",
"triggers.onNewData.trigger": "threshold",
"receiver.host": "localhost",
"receiver.port": "8889",
"receiver.path": "/receiver"
}
Only if the next time it runs "that" is changed to a different instance of this. Since each instance has it's own function, the only way that could happen is if you do something like:
var trigger = new SensorTrigger(sensor, config);
trigger.getSensorConfig.apply(SOMETHING_ELSE, sensor2, trigger2);
As long as you use it like you normally would:
var trigger = new SensorTrigger(sensor, config);
trigger.getSensorConfig(sensor2, trigger2);
//or even:
trigger.getSensorConfig.apply(trigger, sensor2, trigger2);
It is fine. What you are doing is a common practice in JavaScript, and used in production all the time.
Related
I can not figure out why the following code does not work:
var os = new Proxy(require('os'), {});
console.log( os.cpus() ); // TypeError: Illegal invocation
whereas
var os = require('os');
console.log(Reflect.apply(os.cpus, os, []));
or
var os = new Proxy(require('os'), {});
console.log( os.platform() );
works as expected.
Having just skim read the source for the os package in the Node repo, it appears that the cpus() is exported from binding.getCPUs which is a C hook in the Node runtime environment.
cpus() therefore has the binding object as a function context, which is then lost through the proxy, giving you the IllegalInvocation error because there is no context to the function when you call it — although I'm hazy on the details.
platform() on the other hand is exported as function () { return process.platform; }, and hence it's just a function that returns an object, and doesn't need to be run under a specific context because Node function contexts will have the process variable specified by default (unless it has been overridden).
The following behaviour shows that applying the os as a context to the cpus function will work — proxies on function objects evidently lose the function context when calling properties.
const os = require('os');
const proxy = new Proxy(os, {}); // proxy of object, functions called get proxy context rather than os context
const cpus = new Proxy(os.cpus, {}); // proxy of function, still has os context
console.log(os.cpus()); // works (duh)
console.log(cpus()); // works
console.log(proxy.cpus.apply(os, [])); // works
console.log(proxy.cpus()); // fails with IllegalInvocation
Note: If someone can clear up the details on the JS function context for an answer I'd love to read it too.
How about composition:
const os = require('os');
const proxy = new Proxy(os, {});
Object.getOwnPropertyNames(os).forEach(k => {
var v = os[k];
if(typeof v === "function") proxy[k] = v.bind(os);
});
//the `!!` because I don't want the actual print
//only a `true` or an `Error`
console.log(!!os.cpus());
console.log(!!proxy.cpus());
console.log(!!proxy.cpus.apply(proxy, []));
and all this as a utility function to "replace" new Proxy(), where handler.bindTargetFunctions can be
either an array of keyNames to be bound (so you can be specific)
or any truthy or falsy value to determine wether all functions on the target should be bound
the code:
function proxy(target, handler){
const _proxy = new Proxy(target, handler);
if(handler.bindTargetFunctions){
let bindTargetFunctions = handler.bindTargetFunctions;
if(!Array.isArray(bindTargetFunctions)){
bindTargetFunctions = Object.getOwnPropertyNames(target)
.filter(key => typeof target[key] === "function");
}
bindTargetFunctions.forEach(key => {
_proxy[key] = target[key].bind(target);
});
}
return _proxy;
}
const os = proxy(require('os'), { bindTargetFunctions: true });
//or
//const os = proxy(require('os'), { bindTargetFunctions: ["cpus"] });
console.log(os.cpus());
Edit:
Currently I try to bind functions directly in my get handler (see github.com/FranckFreiburger/module-invalidate/blob/master/…), the drawback of my solution is that each access to a function returns a new binding.
I entioned caching in the comments. This is how this cache could look like:
function createProxy(mod){
var cache = Object.create(null);
return new Proxy(function(){}, {
get(target, property, receiver) {
var val = Reflect.get(mod._exports, property, receiver);
if(typeof val === "function"){
if(!(property in cache) || cache[property].original !== val){
cache[property] = {
original: val,
bound: bal.bind(mod._exports)
}
}
val = cache[property].bound;
}else if(property in cache){
delete cache[property];
}
return val;
}
});
}
And No, I don't consider this cache a regular object. Not because it inherits from null, but because logically, to me this is a dictionary/map. And I don't know any reason why you would ever extend or proxy a particular dictionary.
While unittesting my NodeJS application I'm trying to create a simple helper class that will translate the Kafka pub-sub semantics into a simpler API suited for unittesting.
My idea is to be able to write mocha unittest like this:
const testSubscriber = kafkaTestHelper.getTestSubscriber({topic:'test'});
return someKafkaProducer.sendAsync({topic: 'test', message: randomWord})
.then(() =>
testSubscriber.next()
).then(msg => {
msg.should.equal(randomWord);
});
Of course I would also add helper methods such as
testSubscriber.nextUntil(someFilter)
This is inspired by the AKKA.NET TestKit which has a similar approach.
I have two questions:
Is this a reasonable approach or is there some cleaner way to unittest application logic based on Kafka stream processing in NodeJS?
Can anybody post coding examples showing how to make testSubscriber work as I intend?
This might not be the most elegant solution but it seems to work, at least for my initial testing. The trick is to create an ever growing list of Promises for which the resolver function is kept by reference in an array called 'resolvers'. Then when a message comes in, the resolver is invoked with the message. In this way I can return promises to any unittest invoking next() and it will work transparently if either the message was already delivered or it will be delivered in the future.
I still feel I'm reinventing the wheel here, so any comments would still be greatly appreciated.
function TestSubscriber(consumer, initialMessageFilter) {
this.consumer = consumer;
let promiseBuffer = [];
let resolvers = [];
let resolveCounter = 0;
let isStarted = false;
const ensurePromiseBuffer = function() {
if (promiseBuffer.length === 0 || resolveCounter >= resolvers.length) {
const newPromise = new Promise(function(resolve, reject) {
resolvers.push(resolve);
});
promiseBuffer.push(newPromise);
}
}
const that = this;
this.consumer.on('message', function(message) {
if (!isStarted) {
//Determine if we should start now.
isStarted = initialMessageFilter === undefined || initialMessageFilter(message);
}
if (isStarted) {
ensurePromiseBuffer();
const resolver = resolvers[resolveCounter];
resolver(message);
resolveCounter++;
that.consumer.commit(function(err, data) {
if (err) {
//Just log any errors here as we are running inside a unittest
log.warn(err)
}
})
}
});
this.next = function() {
ensurePromiseBuffer();
return promiseBuffer.shift();
};
}
const cache = {};
module.exports = {
getTestSubscriber: function({topic}, initialMessageFilter) {
if (!cache[topic]) {
const consumer = kafka.getConsumer({topic, groupId: GROUP_ID});
cache[topic] = new TestSubscriber(consumer, initialMessageFilter);
}
return cache[topic];
}
}
Edit: I know JS is asynchronous, I have looked over the How to Return thread. The issue I'm having is that going from "foo" examples to something specific = I'm not quite sure where to re-format this.
Also here is some context: https://github.com/sharkwheels/beanballs/blob/master/bean-to-osc-two.js
I have a question about returns in node. It might be a dumb question, but here goes. I have a function that connects to a socket, and gets OSC messages from processing:
var sock = dgram.createSocket("udp4", function(msg, rinfo) {
try {
// get at all that info being sent out from Processing.
//console.log(osc.fromBuffer(msg));
var getMsg = osc.fromBuffer(msg);
var isMsg = getMsg.args[0].value;
var isName = getMsg.args[1].value;
var isAdd = getMsg.address;
var isType = getMsg.oscType;
// make an array out of it
var isAll = [];
isAll.push(isName);
isAll.push(isMsg);
isAll.push(isAdd);
isAll.push(isType);
// return the array
console.log(isAll);
return isAll;
} catch (error) {
console.log(error);
}
});
Below I have the start of another function, to write some of that array to a BLE device. It needs name and characteristics from a different function. How do I get the below function to use isAll AND two existing parameters?
var writeToChars = function (name, characteristics) { // this is passing values from the BLE setup function
// i need to get isAll to here.
// eventually this will write some values from isAll into a scratch bank.
}
Thanks.
async call in this case be written something like this. state can be maintained in the variables in closure if required. In this particular case - you can do without any state (isAll) as well.
var isAll;
var soc = dgram.createSocket('udp4', oncreatesocket);
function oncreatesocket(msg, rinfo)
{
isAll = parseMessage(msg);
writeData(isAll);
}
function parseMessage(msg) {
...
// code to parse msg and return isAll
}
function writeData() {}
if the writeData is small enough function. It can be inside oncreatesocket without impacting the readability of the code.
Alright. So I figured out what to do, at least in this scenario. I'm sure there is a better way to do this, but for now, this works.
I'm mapping an existing global array of peripherals into the write function, while passing the OSC message to it as a parameter. This solved my issue of "how do I get two pieces of information to the same place". It figures out which peripheral is which and writes a different value to each scratch bank of each peripheral accordingly. Leaving here for future reference.
var writeToBean = function(passThrough){
var passThrough = passThrough;
console.log("in Write to bean: ", passThrough);
_.map(beanArray, function(n){
if(n.advertisement.localName === passThrough.name){
//var name = n.advertisement.localName;
n.discoverSomeServicesAndCharacteristics(['a495ff20c5b14b44b5121370f02d74de'], [scratchThr], function(error, services, characteristics){
var service = services[0];
var characteristic = characteristics[0];
var toSend = passThrough.msg;
console.log("service", service);
console.log("characteristic", characteristic);
if (toSend != null) {
characteristic.write(new Buffer([toSend]), false, function(error) {
if (error) { console.log(error); }
console.log("wrote " + toSend + " to scratch bank 3");
});
}
// not sure how to make the program resume, it stops here. No error, just stops processing.
});
}
});
}
sir/madam exlain the flow of node.js from client to server with the dynamic parameters passing from userinterface to api's based up on these parameters we will get the output from api.for example sabre api etc..
exports.flightDestinations = function(req, res) {
var callback = function(error, data) {
if (error) {
// Your error handling here
console.log(error);
} else {
// Your success handling here
// console.log(JSON.parse(data));
res.send(JSON.parse(data));
}
};
sabre_dev_studio_flight.airports_top_destinations_lookup({
topdestinations: '50'
}, callback);
};
we want this value 50 from user...and how to give this value?and how to call this function in node.js.
The exports variable is initially set to that same object (i.e. it's a shorthand "alias"), so in the module code you would usually write something like this:
var myFunc1 = function() { ... };
var myFunc2 = function() { ... };
exports.myFunc1 = myFunc1;
exports.myFunc2 = myFunc2;
to export (or "expose") the internally scoped functions myFunc1 and myFunc2.
And in the calling code you would use:
var m = require('mymodule');
m.myFunc1();
where the last line shows how the result of require is (usually) just a plain object whose properties may be accessed.
NB: if you overwrite exports then it will no longer refer to module.exports. So if you wish to assign a new object (or a function reference) to exports then you should also assign that new object to module.exports
It's worth noting that the name added to the exports object does not have to be the same as the module's internally scoped name for the value that you're adding, so you could have:
var myVeryLongInternalName = function() { ... };
exports.shortName = myVeryLongInternalName;
// add other objects, functions, as required
followed by:
var m = require('mymodule');
m.shortName(); // invokes module.myVeryLongInternalName
Sorry if the question title is a tad ambiguous, but I'm not entirely sure how to word it.
I'm writing an NPM module that talks to a json-rpc api - this is the current setup.
// The module
function MyModule(config) {
// do some connection stuff here
connected = true
}
MyModule.prototype.sendCommand = function() {
if(connected) {
// do command
} else {
// output an error
}
}
module.exports = MyModule;
// The script interacting with the module
var MyModule = require('./MyModule');
var config = {
// config stuff
};
var mod = new MyModule(config);
var mod.sendCommand;
The command won't send, as at this point it hasn't connected, I assume this is due to NodeJS' asynchronous, non-blocking architecture and that I perhaps need to use promises to wait for a response from the API, where would I implement this? Do I do it in my module or do I do it in the script interacting with the module?
You will need to use either a callback or promises or something like that to indicate when the connection is complete so you can then use the connection in further code that is started via that callback.
Though it is generally not considered the best practice to do asynchronous stuff in a constructor, it can be done:
function MyModule(config, completionCallback) {
// do some connection stuff here
connected = true
completionCallback(this);
}
var mod = new MyModule(config, function(mod) {
// object has finished connecting
// further code can run here that uses the connection
mod.sendCommand(...);
});
A more common design pattern is to not put the connecting in the constructor, but to add a method just for that:
function MyModule(config) {
}
MyModule.prototype.connect = function(fn) {
// code here that does the connection and calls
// fn callback when connected
}
var mod = new MyModule(config);
mod.connect(function() {
// object has finished connecting
// further code can run here that uses the connection
mod.sendCommand(...);
});
don't use promises, use node's programming model where you don't "call functions" but you "call functions with a result handler for dealing with the data once it's actually available":
MyModule.prototype.sendCommand = function(handler) {
if(connected) {
// run stuff, obtain results, send that on:
handler(false, result);
} else {
// output an error, although really we should
// just try to connect if we're not, and say
// there's an error only when it actually fails.
handler(new Error("ohonoes"));
}
}
and then you call the function as
var MyModule = require('./MyModule');
var mod = ...
mod.sendCommand(function(err, result) {
// we'll eventually get here, at which point:
if (err) { return console.error(err); }
run();
more();
code();
withResult(result);
});