Concatenating API responses in the correct sequence asynchronously - node.js

So I'm building a simple wrapper around an API to fetch all results of a particular entity. The API method can only return up to 500 results at a time, but it's possible to retrieve all results using the skip parameter, which can be used to specify what index to start retrieving results from. The API also has a method which returns the number of results there are that exist in total.
I've spent some time battling using the request package, trying to come up with a way to concatenate all the results in order, then execute a callback which passes all the results through.
This is my code currently:
Donedone.prototype.getAllActiveIssues = function(callback){
var url = this.url;
request(url + `/issues/all_active.json?take=500`, function (error, response, body) {
if (!error && response.statusCode == 200) {
var data = JSON.parse(body);
var totalIssues = data.total_issues;
var issues = [];
for (let i=0; i < totalIssues; i+=500){
request(url + `/issues/all_active.json?skip=${i}&take=500`, function (error, response, body){
if (!error && response.statusCode == 200) {
console.log(JSON.parse(body).issues.length);
issues.concat(JSON.parse(body).issues);
console.log(issues); // returns [] on all occasions
//callback(issues);
} else{
console.log("AGHR");
}
});
}
} else {
console.log("ERROR IN GET ALL ACTIVE ISSUES");
}
});
};
So I'm starting off with an empty array, issues. I iterate through a for loop, each time increasing i by 500 and passing that as the skip param. As you can see, I'm logging the length of how many issues each response contains before concatenating them with the main issues variable.
The output, from a total of 869 results is this:
369
[]
500
[]
Why is my issues variable empty when I log it out? There are clearly results to concatenate with it.
A more general question: is this approach the best way to go about what I'm trying to achieve? I figured that even if my code did work, the nature of asynchronicity means it's entirely possible for the results to be concatenated in the wrong order.
Should I just use a synchronous request library?

Why is my issues variable empty when I log it out? There are clearly
results to concatenate with it.
A main problem here is that .concat() returns a new array. It doesn't add items onto the existing array.
You can change this:
issues.concat(JSON.parse(body).issues);
to this:
issues = issues.concat(JSON.parse(body).issues);
to make sure you are retaining the new concatenated array. This is a very common mistake.
You also potentially have sequencing issues in your array because you are running a for loop which is starting a whole bunch of requests at the same time and results may or may not arrive back in the proper order. You will still get the proper total number of issues, but they may not be in the order requested. I don't know if that is a problem for you or not. If that is a problem, we can also suggest a fix for that.
A more general question: is this approach the best way to go about
what I'm trying to achieve? I figured that even if my code did work,
the nature of asynchronicity means it's entirely possible for the
results to be concatenated in the wrong order.
Except for the ordering issue which can also be fixed, this is a reasonable way to do things. We would have to know more about your API to know if this is the most efficient way to use the API to get your results. Usually, you want to avoid making N repeated API calls to the same server and you'd rather make one API call to get all the results.
Should I just use a synchronous request library?
Absolutely not. node.js requires learning how to do asynchronous programming. It is a learning step for most people, but is how you get the best performance from node.js and should be learned and used.
Here's a way to collect all the results in reliable order using promises for synchronization and error propagation (which is hugely useful for async processing in node.js):
// promisify the request() function so it returns a promise
// whose fulfilled value is the request result
function requestP(url) {
return new Promise(function(resolve, reject) {
request(url, function(err, response, body) {
if (err || response.statusCode !== 200) {
reject({err: err, response: response});
} else {
resolve({response: response, body: body});
}
});
});
}
Donedone.prototype.getAllActiveIssues = function() {
var url = this.url;
return requestP(url + `/issues/all_active.json?take=500`).then(function(results) {
var data = JSON.parse(results.body);
var totalIssues = data.total_issues;
var promises = [];
for (let i = 0; i < totalIssues; i+= 500) {
promises.push(requestP(url + `/issues/all_active.json?skip=${i}&take=500`).then(function(results) {
return JSON.parse(results.body).issues;
}));
}
return Promise.all(promises).then(function(results) {
// results is an array of each chunk (which is itself an array) so we have an array of arrays
// now concat all results in order
return Array.prototype.concat.apply([], results);
})
});
}
xxx.getAllActiveIssues().then(function(issues) {
// process issues here
}, function(err) {
// process error here
})

Related

Node.js: async.map getting slower

Hello,
I use Node.js to provide an API for storing data on a MongoDB database.
I ran multiple tests on a read method, which takes ids and returns the corresponding documents. The point is that I must return these documents in the specified order. To ensure that, I use the following code:
// Sequentially fetch every element
function read(ids, callback) {
var i = 0;
var results = [];
function next() {
db.findOne(ids[i], function (err, doc) {
results.push(err ? null : doc);
if (ids.length > ++i) {
return next();
}
callback(results);
});
}
next();
}
This way, documents are fetched one-by-one, in the right order. It takes about 11s on my laptop to retrieve 27k documents.
However, I thought that it was possible to improve this method:
// Asynchronously map the whole array
var async = require('async');
function read(ids, callback) {
async.map(ids, db.findOne.bind(db), callback):
}
After running a single test, I was quite satisfied seeing that the 27k documents were retrieved in only 8s using simpler code.
The problem happens when I repeat the same request: the response time keeps growing (proportionally to the number of elements retrieved): 9s 10s 11s 12s.... This problem does not happen in the sequential version.
I tried two versions of Node.js, v6.2.0 and v0.10.29. The problem is the same. What causes this latency and how could I suppress it?
Try to use async.mapLimit to prevent overload. You need some tests to tune limit value with your environment.
But find({_id: {$in: list}}) is always better, because single database request instead of multiple.
I suggest you to try to perform restore of original order client-side.
Something like this:
function read(ids, cb) {
db.find(
{_id: {$in: ids.map(id => mongoose.Types.ObjectId(id))}},
process
);
function process(err, docs) {
if (err) return cb(err);
return cb(null, docs.sort(ordering))
}
function ordering(a, b) {
return ids.indexOf(b._id.toString()) - ids.indexOf(a._id.toString());
}
}
May be, find query needs to be corrected, I can't to know what exact mongodb driver you use.
This code is first-try, more manual sorting can improve performance alot. [].indexOf is heavy too(O(n)).
But I'm almost sure, even as-is now, it will work much faster.
Possible ordering replacement:
var idHash = {};
for(var i = 0; i < ids.length; i++)
idHash[ids[i]] = i;
function ordering(a, b) {
return idHash[b._id.toString()] - idHash[a._id.toString()];
}
Any sort algorithm has O(nlogn) in best case, but we already know result position of each found document, so, we can restore original order by O(n):
var idHash = ids.reduce((c, id, i) => (c[id] = i, c), {});
function process(err, docs) {
if (err) return cb(err);
return cb(null,
docs.reduce(
(c, doc) => (c[idHash[doc._id.toString()]] = doc, c),
ids.map(id => null))) //fill not_found docs by null
}
Functional style makes code flexier. For example this code can be easy modified to use async.reduce to be less sync-blocking.

How do I make a large but unknown number of REST http calls in nodejs?

I have an orientdb database. I want to use nodejs with RESTfull calls to create a large number of records. I need to get the #rid of each for some later processing.
My psuedo code is:
for each record
write.to.db(record)
when the async of write.to.db() finishes
process based on #rid
carryon()
I have landed in serious callback hell from this. The version that was closest used a tail recursion in the .then function to write the next record to the db. However, I couldn't carry on with the rest of the processing.
A final constraint is that I am behind a corporate proxy and cannot use any other packages without going through the network administrator, so using the native nodejs packages is essential.
Any suggestions?
With a completion callback, the general design pattern for this type of problem makes use of a local function for doing each write:
var records = ....; // array of records to write
var index = 0;
function writeNext(r) {
write.to.db(r, function(err) {
if (err) {
// error handling
} else {
++index;
if (index < records.length) {
writeOne(records[index]);
}
}
});
}
writeNext(records[0]);
The key here is that you can't use synchronous iterators like .forEach() because they won't iterate one at a time and wait for completion. Instead, you do your own iteration.
If your write function returns a promise, you can use the .reduce() pattern that is common for iterating an array.
var records = ...; // some array of records to write
records.reduce(function(p, r) {
return p.then(function() {
return write.to.db(r);
});
}, Promsise.resolve()).then(function() {
// all done here
}, function(err) {
// error here
});
This solution chains promises together, waiting for each one to resolve before executing the next save.
It's kinda hard to tell which function would be best for your scenario w/o more detail, but I almost always use asyncjs for this kind of thing.
From what you say, one way to do it would be with async.map:
var recordsToCreate = [...];
function functionThatCallsTheApi(record, cb){
// do the api call, then call cb(null, rid)
}
async.map(recordsToCreate, functionThatCallsTheApi, function(err, results){
// here, err will be if anything failed in any function
// results will be an array of the rids
});
You can also check out other ones to enable throttling, which is probablya good idea.

Meteor: How do I stream and parse a large file to an async Node function?

I'm using the job-collection package to do the following:
Download a large file with a bunch of metadata about webpages
Create a stream from the file metadata that is split by a regex using the NPM event-stream package
Check if there is a match of the metadata in a collection (I've been attempting to stream each webpage's metadata to another function to do this)
The file is too large to buffer, so streaming is required. Here is a small file with a few examples of the metadata if you wish to try this.
Each job from the job-collection package is already inside an async function:
var request = Npm.require('request');
var zlib = Npm.require('zlib');
var EventStream = Meteor.npmRequire('event-stream');
function (job, callback) {
//This download is much too long to block
request({url: job.fileURL, encoding: null}, function (error, response, body) {
if (error) console.error('Error downloading File');
if (response.statusCode !== 200) console.error(downloadResponse.statusCode, 'Status not 200');
var responseEncoding = response.headers['content-type'];
console.log('response encoding is %s', responseEncoding);
if (responseEncoding === 'application/octet-stream' || 'binary/octet-stream') {
console.log('Received binary/octet-stream');
var regexSplit = /WARC\/1\./;
response.pipe(zlib.createGunzip()
.pipe(EventStream.split(regexSplit))
.pipe(EventStream.map(function (webpageMetaData) {
/* Need parse the metaData or pass each webpageMetaData to function
* This next function could block if it had to */
searchPageMetaData(webpageMetaData); // pass each metadatum to this function to update a collection - this function can be synchronous
}));
} else {
console.error('Wrong encoding');
}
});
}
function searchWebPageMetaData(metaData) {
// Parse JSON and search collection for match
}
Are there better ways to structure this? Am I on the right track?
Where to put Meteor.bindEnvironment? - do I I bind the environment for each time I pass to searchWebPageMetaData()? Do I need to expressly use fibers here?
The stream stops when running this if I run it to process.stdout. Am I supposed to put the stream into one of Meteor's wrap
I'm aware of Meteor.wrapAsync. Do I want to wrap the innermost searchWebPageMetaData() function in Meteor.wrapAsync? (think I'm answering this yes as I type)
Will the stream slow to compensate for the slowness of the DB calls? My guess is no but how do I deal with that?
I've spent quite a while learning about Meteor's wrapAsync, and bindEnvironment but having trouble bringing it all together and understanding where to use them.
SUPPLEMENT 1
Just to clarify, the steps are:
Download file;
Create stream;
unzip it;
split it into individual webPages - EventStream handles this
send it to a function - don't need return values; this could be blocking, it's just some searching and database call
I was trying to do something like this, except the core code I need help with was in a function in a different file. The following code has most of #electric-jesus' answer in there.
processJobs('parseWatFile', {
concurrency: 1,
cargo: 1,
pollInterval: 1000,
prefetch: 1
}, function (job, callback) {
if (job.data.watZipFileLink) {
queue.pause();
console.log('queue should be paused now');
var watFileUrl = 'https://s3.amazonaws.com/ja-common-crawl/exampleWatFile.wat.gz';
function searchPageMetaData(webpageMetaData, callback) {
console.log(webpageMetaData); // Would be nice to just get this function logging each webPageMetaData
future.return(callback(webpageMetaData)); //I don't need this to return any value - do I have to return something?
}
if (!watFile)
console.error('No watFile passed to downloadAndSearchWatFileForEntity ');
var future = new Future(); // Doc Brown would be proud.
if(typeof callback !== 'function') future.throw('callbacks are supposed to be functions.');
request({url: watFile, encoding: null}, function (error, response, body) {
if (error) future.throw('Error Downloading File');
if (response.statusCode !== 200) future.throw('Expected status 200, got ' + response.statusCode + '.');
var responseEncoding = response.headers['content-type'];
if (responseEncoding === 'application/octet-stream' || 'binary/octet-stream') {
var regexSplit = /WARC\/1\./;
response.pipe(zlib.createGunzip()
.pipe(EventStream.split(regexSplit))
.pipe(EventStream.map(function (webpageMetaData) {
searchPageMetaData(webpageMetaData, callback);
})
));
} else {
future.throw('Wrong encoding');
}
});
return future.wait();
} else {
console.log('No watZipFileLink for this job');
job.log('ERROR: NO watZipFileLink from commonCrawlJob collection');
}
queue.resume();
job.done;
callback();
}
Interesting, looks alright. I've never worked with job-collection but it seems to be just a Mongo-driven task queue.. so I am assuming it works like a regular queue. I've always found for stuff with callback, I most certainly use the Future pattern. e.g:
var request = Npm.require('request');
var zlib = Npm.require('zlib');
var EventStream = Meteor.npmRequire('event-stream');
var Future = Npm.require('fibers/future');
var searchWebPageMetaData = function (metaData) {
// Parse JSON and search collection for match
// make it return something
var result = /droids/ig.test(metaData);
return result;
}
var processJob = function (job, callback) {
var future = new Future(); // Doc Brown would be proud.
if(typeof callback !== 'function') future.throw("Oops, you forgot that callbacks are supposed to be functions.. not undefined or whatever.");
//This download is much too long to block
request({url: job.fileURL, encoding: null}, function (error, response, body) {
if (error) future.throw("Error Downloading File");
if (response.statusCode !== 200) future.throw("Expected status 200, got " + downloadResponse.statusCode + ".");
var responseEncoding = response.headers['content-type'];
if (responseEncoding === 'application/octet-stream' || 'binary/octet-stream') {
var regexSplit = /WARC\/1\./;
response.pipe(zlib.createGunzip()
.pipe(EventStream.split(regexSplit))
.pipe(EventStream.map(function (webpageMetaData) {
/* Need parse the metaData or pass each webpageMetaData to function
* This next function could block if it had to */
// pass each metadatum to this function to update a collection - this function can be synchronous
future.return(callback(webpageMetaData)); // this way, processJob returns whatever we find in the completed webpage, via callback.
}));
} else {
future.throw('Wrong encoding');
}
});
return future.wait();
}
Example usage:
so whenever you assign variables here:
var currentJob = processJob(myjob, searchWebPageMetaData);
and even with synchronous type obtainment/variable assignment, you get your async stuff done and transported just-in-time for you.
To answer your questions,
Where to put Meteor.bindEnvironment? - do I I bind the environment for each time I pass to searchWebPageMetaData()? Do I need to expressly use fibers here?
not really, i believe the explicit use of fibers/future already take care of this.
The stream stops when running this if I run it to process.stdout. Am I supposed to put the stream into one of Meteor's wrap
how do you mean? I vaguely remember process.stdout is blocking, that might be a cause. again, wrapping the result in a future should take care of this.
I'm aware of Meteor.wrapAsync. Do I want to wrap the innermost searchWebPageMetaData() function in Meteor.wrapAsync? (think I'm answering this yes as I type)
Take a look at the Meteor.wrapAsync helper code. It's basically a future resolution applied, of course you can do this then again you can also explicitly use fibers/future on its own with no problem.
Will the stream slow to compensate for the slowness of the DB calls? My guess is no but how do I deal with that?
Not really sure what you mean here.. but since we're trying to use asynchronous fibers, my guess is no as well. I've yet to see any slowness with the use of fibers. Probably only if there are multiple jobs launched (and concurrently running) at once, then you will have a performance issue in terms of memory usages. Keep the concurrent queue low as Fibers can be quite powerful in running stuff at the same time. You only have one core to process it all, that's a sad fact as node can't multi-core :(
This is quite tricky if you want to handle all errors correctly. So one should ask themself, what to do if: you code throws an exception, or error event handler is called. You want that errors propagate correctly, that is, are thrown as an exception in the fiber calling streaming code. I implemented something like this for one of our job-collecton tasks, for extracting tar files.
First you need some helper functions:
bindWithFuture = (futures, mainFuture, fun, self) ->
wrapped = (args...) ->
future = new Future()
if mainFuture
future.resolve (error, value) ->
# To resolve mainFuture early when an exception occurs
mainFuture.throw error if error and not mainFuture.isResolved()
# We ignore the value
args.push future.resolver()
try
futures.list.push future
fun.apply (self or #), args
catch error
future.throw error
# This waiting does not really do much because we are
# probably in a new fiber created by Meteor.bindEnvironment,
# but we can still try to wait
Future.wait future
Meteor.bindEnvironment wrapped, null, self
wait = (futures) ->
while futures.list.length
Future.wait futures.list
# Some elements could be added in meantime to futures,
# so let's remove resolved ones and retry
futures.list = _.reject futures.list, (f) ->
if f.isResolved()
# We get to throw an exception if there was an exception.
# This should not really be needed because exception should
# be already thrown through mainFuture and we should not even
# get here, but let's check for every case.
f.get()
true # And to remove resolved
And then you can run something like:
mainFuture = new Future()
# To be able to override list with a new value in wait we wrap it in an object
futures =
list: []
bindWithOnException = (f) =>
Meteor.bindEnvironment f, (error) =>
mainFuture.throw error unless mainFuture.isResolved()
onWebpageMetaData = (metaData, callback) =>
return callback null if mainFuture.isResolved()
# Do whatever you want here.
# Call callback(null) when you finish.
# Call callback(error) if there is an error.
# If you want to call into a Meteor code inside some other callback for async code you use,
# use bindWithOnException to wrap a function and stay inside a Meteor environment and fiber.
MeteorCollection.insert
metaData: metaData
callback null
requestFuture = new Future()
request
url: job.fileURL
encoding: null
,
(error, response, body) ->
return requestFuture.throw error if error
return requestFuture.throw new Error "Expected status 200, got #{ response.statusCode }." unless response.statusCode is 200
requestFuture.return response
response = requestFuture.wait()
responseEncoding = response.headers['content-type']
throw new Error "Wrong encoding" unless responseEncoding in ['application/octet-stream', 'binary/octet-stream']
regexSplit = /WARC\/1\./
response.pipe(
zlib.createGunzip()
).pipe(
EventStream.split regexSplit
).pipe(
EventStream.map bindWithFuture futures, mainFuture, onWebpageMetaData
).on('end', =>
# It could already be resolved by an exception from bindWithFuture or bindWithOnException
mainFuture.return() unless mainFuture.isResolved()
).on('error', (error) =>
# It could already be resolved by an exception from bindWithFuture or bindWithOnException
mainFuture.throw error unless mainFuture.isResolved()
)
mainFuture.wait()
wait futures

Iterate through Array, update/create Objects asynchronously, when everything is done call callback

I have a problem, but I have no idea how would one go around this.
I'm using loopback, but I think I would've face the same problem in mongodb sooner or later. Let me explain what am I doing:
I fetch entries from another REST services, then I prepare entries for my API response (entries are not ready yet, because they don't have id from my database)
Before I send response I want to check if entry exist in database, if it doesn't:
Create it, if it does (determined by source_id):
Use it & update it to newer version
Send response with entries (entries now have database ids assigned to them)
This seems okay, and easy to implement but it's not as far as my knowledge goes. I will try to explain further in code:
//This will not work since there are many async call, and fixedResults will be empty at the end
var fixedResults = [];
//results is array of entries
results.forEach(function(item) {
Entry.findOne({where: {source_id: item.source_id}}, functioN(err, res) {
//Did we find it in database?
if(res === null) {
//Create object, another async call here
fixedResults.push(newObj);
} else {
//Update object, another async call here
fixedResults.push(updatedObj);
}
});
});
callback(null, fixedResults);
Note: I left some of the code out, but I think its pretty self explanatory if you read through it.
So I want to iterate through all objects, create or update them in database, then when all are updated/created, use them. How would I do this?
You can use promises. They are callbacks that will be invoked after some other condition has completed. Here's an example of chaining together promises https://coderwall.com/p/ijy61g.
The q library is a good one - https://github.com/kriskowal/q
This question how to use q.js promises to work with multiple asynchronous operations gives a nice code example of how you might build these up.
This pattern is generically called an 'async map'
var fixedResults = [];
var outstanding = 0;
//results is array of entries
results.forEach(function(item, i) {
Entry.findOne({where: {source_id: item.source_id}}, functioN(err, res) {
outstanding++;
//Did we find it in database?
if(res === null) {
//Create object, another async call here
DoCreateObject(function (err, result) {
if (err) callback(err);
fixedResults[i] = result;
if (--outstanding === 0) callback (null, fixedResults);
});
} else {
//Update object, another async call here
DoOtherCall(function (err, result) {
if(err) callback(err);
fixedResults[i] = result;
if (--outstanding === 0) callback (null, fixedResults);
});
}
});
});
callback(null, fixedResults);
You could use async.map for this. For each element in the array, run the array iterator function doing what you want to do to each element, then run the callback with the result (instead of fixedResults.push), triggering the map callback when all are done. Each iteration ad database call would then be run in parallel.
Mongo has a function called upsert.
http://docs.mongodb.org/manual/reference/method/db.collection.update/
It does exactly what you ask for without needing the checks. You can fire all three requests asnc and just validate the result comes back as true. No need for additional processing.

Trying to understand how the node.js programming model works

I've been reading about node.js recently (like many others). I find interesting for some use cases, but am a bit struggling to understand the inner workings, specifically the interaction between closure functions and the process flow of the code.
Let's say I have a function which accepts a key-value array. The function must check that the values follow certain data-quality guidelines (for example some keys must have a value, other keys must have numbers as values etc) before storing the data somewhere (for the purpose of this question let's assume data validation has to be done in the application itself).
In "regular" developments models I'd write something like this:
resultName = validateName(data.name)
resultAddress = validateAddress(data.address)
resultID = validateID(data.id)
if (resultName && resultAddress && resultID) {
store(data)
else {
sendErrorToUser(data)
}
Get the results of the validations, and either explain the error(s) to the user or store data and return some kind of confirmation. The flow is very clear.
The way I understand node.js, the way to do this would be to delegate the validations to a different function (to avoid waiting for each validation to finish), and supply two callback functions to the functions which validate the chunks of data:
* a callback to call when validation is successful
* a callback to call when validation fails
It's easy to now return to the user with a "please wait" message, but I have to wait for all validations to clear (or fail) before storing the data or explaining the problem to the user. As a simple way to figure out if all the validations are done I thought of using a variable that counts the number of functions that called the callback, and emitting a "validation complete" event to store the validated data (or get back to the user with any errors). Or, alternatively, emit an event after each validation is complete and in that event's code check if all validations are complete before emitting the "store" / "error" events.
My question is -- am I approaching this correctly? Or is there a more suitable way to do these kinds of things with node.js (or similar event-based systems).
Thank you!
Alon
Are your validations asynchronous? If they are not you can use the code you posted, the "regular" one.
If the validations are asynchronous (checking uniqueness of an email for instance), you need to provide callbacks:
var validateUniqueEmail = function (data, callback) {
db.find({email: data.email}, function (err, result) {
callback(err, result === null);
})
};
var validateAndStore = function (data, callback) {
asyncValidation(data, function (err, is_valid) {
if (err) {
callback(err, null);
} else if (!is_valid) {
callback('Email is not unique', null);
} else {
db.store(data, callback);
}
});
}
The code above can be simplified a lot by using some validator or ORM modules already existing
example: mongolia validator module.
Let's go. Basically, what you want to do is something along the lines of :
var validate(data, cb){
var allOk = true;
for(var key in data){
allOk = allOk && validate[key](data.key); // validator depends on the key
}
if (allOk) cb(null, data); else cb(new Error "bleh");
}
This could be done the following way (note how we pass the failed keys as the first (error) argument to the callback):
var validate(data, cb){
var status = {true:[], false:[]},
total = Object.keys(data).length,
done = 0;
for (var key in data)
(function(key){
validate[key](data[key], function(ok){
status[ok].push(key);
if (++done == total){
status[false].length ? cb(status[false]) : cb(null);
}
});
})(key);
}
Which you can use this way :
validate(data, function(failures){
if (failures){
// tell the user the input does not validate on the keys stored in failures
} else {
// all ok
store(data);
}
});
Correct me if I'm wrong, but I think what you're asking is how to handle the response from multiple asynchronous calls.
Here's how I do it (using your validation example):
var result = {};
function isAllDataAvailable() {
return result.name !== undefined
&& result.address !== undefined
&& result.id !== undefined;
}
function callback(error) {
if (error) {
showError(error);
// terminate here (?)
return;
}
if (isAllDataAvailable()) {
showOutput();
}
}
validateName(data, callback);
validateAddress(data, callback);
validateEmail(data, callback);
The key here is the result object, which starts out as empty. As each field gets validated, it gets added to the result object (by the validation functions, which I've left out in the above snippet). I've used a single callback method, but you could have multiple, say callbackName, callbackAddress, etc. The validation results are processed only if and when the result object has been fully populated, which is checked in isAllDataAvailable.
Hope this helps.
Consider using: https://github.com/asaf/nodejs-model
It will make your life much easier when dealing with validators.

Resources