RangeError maximum call stack size exceeded - node.js

For a school project i'm working a nodejs application.
I need to submit some locations of random pokemon. At this moment i'm using dummy data but i'm already getting errors.
I submit to a maximum of 10 locations to mongodb using mongoose but get the following error:
RangeError: Maximum call stack size exceeded
The data is valid to the mongoose Schema. The error is shown also when i submit just 1 location (as it own, not included in a array).
{ _id: 5702bcc7698a36c82833a943,
name: 'Pikachu',
pid: 0,
lng: 5.298302246195419,
lat: 51.68774273818562 }.
My code looks like this:
var startDate = new Date();
if (!req.query.lat || !req.query.lng){
res.status(400).json("Your location has not been included");
}
var lat = parseFloat(req.query.lat);
var lng = parseFloat(req.query.lng);
location.find({}).exec(function(err, locations){
if (err){ return next(err); }
var filteredlocations = [];
locations.forEach(function(fetchedLocation) {
distance = calcDistance(fetchedLocation.lat, fetchedLocation.lng, lat, lng);
if (distance < 1000){
filteredlocations.push(fetchedLocation);
}
});
if (filteredlocations.length < 10) {
var toAdd = 1 - filteredlocations.length;
var newlocations = [];
for (var i = 0; i < toAdd; i++){
var randomlocation = randomLocation(lat, lng);
var newlocation = new location({
"lat": randomlocation.lat,
"lng": randomlocation.lng,
"pid": 0,
"name": "Pikachu",
});
newlocations.push(newlocation);
}
console.log(newlocations[0]);
location.collection.insert(newlocations[0], onInsert);
function onInsert(err, docs){
if (err){
console.log("err: " + err);
} else {
filteredlocations.push(docs);
res.json(filteredlocations);
}
}
}
});

Related

My save data to MongoDB not functioning, is it because my searching is wrong?

I'm trying to make a comparison function. If the value that I search is not exist, the new value will be save to the database. But all I get is 0 new data found. So the system decides to not save the data. Is my searching wrong?
This is my code:
var count = 0;
for (var t = 1; t < 151; t++) {
var searching = JobsSchema.find({ jobName: name[t], company: company[t] })
if (searching == null) {
count = count + 1;
var newJobs = new JobsSchema({
"jobName": name[t],
"country": country[t],
"company": company[t],
"jobType": type[t],
"salary": salary[t],
"skills": skills[t],
"jobDesc": desc[t],
"jobReq": req[t],
"jobResponsibility": resp[t],
"industry": industry[t],
})
newJobs.save(function (err, result) {
if (err) {
console.log(err);
}
})
}
}
console.log(count + " new data found.");
You should await your find function.
Also, change it to findOne to return a single instance of the JobsSchema and await the save call as well.
Finally you will need to wrap the code into an async function:
const saveData = async () => {
var count = 0;
for (var t = 1; t < 151; t++) {
var searching = await JobsSchema.findOne({
jobName: name[t],
company: company[t],
});
if (!searching) {
count = count + 1;
var newJobs = new JobsSchema({
jobName: name[t],
country: country[t],
company: company[t],
jobType: type[t],
salary: salary[t],
skills: skills[t],
jobDesc: desc[t],
jobReq: req[t],
jobResponsibility: resp[t],
industry: industry[t],
});
await newJobs.save();
}
}
console.log(count + ' new data found.');
};
saveData();

"too much data for sort()" error in meteor sitemap generator method

I used meteor sitemaps package to generate sitemap using below code:
Meteor.methods({
sitemapsGenerator: function (generateAnyway) {
var setting = Settings.findOne({ title: 'sitemapsLastGenerateDate'});
if (generateAnyway || (new Date(setting.lastModified.getTime()+(24*60*60*1000)) < new Date)) {
console.log("sitemap generator called");
var generalSitemapUrls = [];
for (var i = 0; i <= Math.round(MyPosts.find().count() / 10000); i++) {
sitemaps.add('/sitemap' + i + '.xml', (function (i) {
var out = [];
MyPosts.find({}, {
fields: {title: 1, postDate: 1},
sort: {postDate: 1}, limit: 10000, skip: i * 10000
}).forEach(function (post) {
console.log("sitemap called" + i);
out.push({
page: "/posts/" + post.title + "/" + post._id,
lastmod: post.postDate,
changefreq: 'weekly'
});
});
return out;
})(i));
}
Settings.update({title: "sitemapsLastGenerateDate"}, {$set: {lastModified: new Date} });
}
}
});
Now after 2 month my collection size was growing up and recently I got below error when try to generate sitemap:
MongoError: too much data for sort() with no index. add an index or
specify a smaller limit.
What is the solution to fix this error?
If this error fix by adding index to collection field how to do this in meteor?
on your collection object you use ensureIndex. Typically you do this on your server side, on startup.

How to perform mass inserts into mongodb using NodeJS

I Have to Insert about 10,00000 documents in mongodb using nodejs.
I'm generating these documents using a for loop storing them into an array before finally inserting them into mongodb.
var codeArray = new Array();
for (var i = 0; i<1000000; i++){
var token = strNpm.generate();
var now = moment().format('YYYYMMDD hhmmss');
var doc1 = {id:token,
Discount_strId:"pending",
Promotion_strCode:token,
Promotion_strStatus:"I",
Promotion_dtmGeneratedDate:now,
User_strLogin:"test",
Promotion_strMode:"S",
Promotion_dtmValidFrom:"pending",
Promotion_dtmValidTill:"pending",
LastModified_dtmStamp:now
};
codeArray.push(doc1);
db.collection('ClPromoCodeMaster').insert(codeArray, function (err, result) {
if (err){
console.log(err);
}else{
console.log('Inserted Records - ', result.ops.length);
}
});
The problem I'm facing is mongo has an inserting limit of 16mb, so I can't insert the entire array at once.
Please suggest most optimum solutions.
The main problem is in the request size and not the document size, but it amounts to the same limitation. Bulk operations and the async library with async.whilst will handle this:
var bulk = db.collection('ClPromoCodeMaster').initializeOrderedBulkOp(),
i = 0;
async.whilst(
function() { return i < 1000000; },
function(callback) {
var token = strNpm.generate();
var now = moment().format('YYYYMMDD hhmmss');
var doc = {
id:token,
Discount_strId:"pending",
Promotion_strCode:token,
Promotion_strStatus:"I",
Promotion_dtmGeneratedDate:now,
User_strLogin:"test",
Promotion_strMode:"S",
Promotion_dtmValidFrom:"pending",
Promotion_dtmValidTill:"pending",
LastModified_dtmStamp:now
};
bulk.insert(doc);
i++;
// Drain every 1000
if ( i % 1000 == 0 ) {
bulk.execute(function(err,response){
bulk = db.collection('ClPromoCodeMaster').initializeOrderedBulkOp();
callback(err);
});
} else {
callback();
}
},
function(err) {
if (err) throw err;
console.log("done");
}
);
I should note that regardless there is an internal limit on bulk operations to 1000 operations per batch. You can submit in larger sizes, but the driver is just going to break these up and still submit in batches of 1000.
The 1000 is a good number to stay at though, since it is already in line with how the request will be handled, as well as being a reasonable number of things to hold in memory before draining the request queue and sending to the server.
For inserting millions of record at a time, Create node.js child process fork with MongoDb bulk api.
Child Process Creation:(index.js)
const {fork} = require("child_process");
let counter = 1;
function createProcess(data){
const worker = fork("./dbOperation");
worker.send(data);
worker.on("message", (msg) => {
console.log("Worker Message :",counter, msg);
counter++;
})
}
function bulkSaveUser(records) {
const singleBatchCount = 10000; // Save 10,000 records per hit
const noOfProcess = Math.ceil(records/singleBatchCount);
let data = {};
console.log("No of Process :", noOfProcess);
for(let index = 1; index <= noOfProcess; index++) {
data.startCount = (index == 1) ? index : (((index - 1) * singleBatchCount) + 1);
data.endCount = index * singleBatchCount;
createProcess(data);
}
}
bulkSaveUser(1500000);
DB Operation (dbOperation.js)
const MongoClient = require('mongodb').MongoClient;
// Collection Name
const collectionName = "";
// DB Connection String
const connString = "";
process.on("message", (msg) => {
console.log("Initialize Child Process", msg)
const {startCount, endCount} = msg;
inputStudents(startCount, endCount);
});
function initConnection() {
return new Promise(function(r, e) {
MongoClient.connect(connString, function(err, db) {
if (err) e(err)
r(db);
});
});
}
function inputStudents(startCount, endCount) {
let bulkData = [];
for(let index = startCount; index <= endCount; index++ ){
var types = ['exam', 'quiz', 'homework', 'homework'];
let scores = []
// and each class has 4 grades
for (j = 0; j < 4; j++) {
scores.push({'type':types[j],'score':Math.random()*100});
}
// there are 500 different classes that they can take
class_id = Math.floor(Math.random()*501); // get a class id between 0 and 500
record = {'student_id':index, 'scores':scores, 'class_id':class_id};
bulkData.push({ insertOne : { "document" : record } })
}
initConnection()
.then((db) => {
const studentDb = db.db("student");
const collection = studentDb.collection(colName)
console.log("Bulk Data :", bulkData.length);
collection.bulkWrite(bulkData, function(err, res) {
if (err) throw err;
//console.log("Connected Successfully",res);
process.send("Saved Successfully");
db.close();
});
})
.catch((err) => { console.log("Err :", err) });
}
Sample project to insert millions of record in mongodb using child process fork

Missing 80% of GET responses

Our company is planning on transitioning from REDIS to Aerospike, but we are seeing some strange issues with missing get requests (only 35% making it back to the callback function).
Here is the code we are testing with:
var cluster = require('cluster');
var numCPUs = require('os').cpus().length;
if (cluster.isMaster)
{
for (var i = 0; i < numCPUs; i++)
{
var worker = cluster.fork();
}
}
else
{
var start = new Date().getTime();
var requests = 0;
var responses = 0;
var aerospike = require('./node_modules/aerospike');
var status = aerospike.status;
var client = aerospike.client({
hosts: [
{ addr: '127.0.0.1', port: 3000 }
]
});
function connect_cb( err, client) {
if (err.code == status.AEROSPIKE_OK) {
console.log("Aerospike Connection Success")
}
}
client.connect(connect_cb)
setInterval(function(){
for(var i=0; i<50; i++)
{
var key = aerospike.key('dexi','toys','floor_'+i);
requests++;
client.get(key, function(err, rec, meta) {
responses++;
if ( err.code == status.AEROSPIKE_OK )
{
}
else
{
console.error('Get Error:', err);
}
});
}
},10);
setInterval(function(){
for(var i=0; i<50; i++)
{
var key = aerospike.key('dexi','toys','floor_'+i);
var rec = {
uid: 1000, // integer data stored in bin called "uid"
name: "user_name", // string data stored in bin called "user_name"
dob: { mm: 12, dd: 29, yy: 1995}, // map data stored (msgpack format) in bin called "dob"
friends: [1001, 1002, 1003]
};
var metadata = {
ttl: 10000,
gen: 0
};
client.put(key, rec, metadata, function(err) {
switch ( err.code ) {
case status.AEROSPIKE_OK:
break;
default:
console.error("Put Error: " + err.message);
exitCode = 1;
break;
}
});
}
},10);
setInterval(function(){
var timeSpent = ( new Date().getTime()) - start;
console.log(requests, responses,timeSpent);
},15000);
}
Below is the console output we are seeing:
34400 9306 15098
34150 9250 15080
35050 9330 15087
34150 9235 15092
33250 9310 15120
33950 9249 15090
34650 9298 15101
35000 9400 15102
34700 9300 15166
33150 9399 15181
34500 9300 15193
33850 9292 15207
34400 9250 15162
34100 9360 15212
34050 9250 15171
34100 9348 15159
33800 9250 15118
34300 9309 15189
34050 9300 15152
34250 9405 15181
As you can see, on average, for every 35k get requests we send, we are only seeing a small % of them actually come back. Our Aerospike dashboard also reflects the discrepancy (only seeing 35% of the gets sent), as the throughput is reflecting the responses we are getting back.

Is it possible to build a dynamic task list in nodejs Async (waterfall, series, etc...)

I am pulling information from some collections in mongo that contain node and edge data. First i must get the node so i can grab its edges. Once i have a list of edges i then go back out and grab more nodes (etc.. based on a depth value). The following code is an loose example of how i am attempting to use async.waterfall and the task list.
Initially i have only a single task but once i make my first query i add to the task array. Unfortunately this does not seem to register with async and it does not continue to process the tasks i am adding.
Is there a better way to do this?
var async = require('async')
var mongoose = require('mongoose')
var _ = requrie('underscore')
var Client = this.Mongo.connect(/*path to mongo*/)
var Node = mongoose.Schema({
id : String,
graph_id : String
})
var Edge = mongoose.Schema({
id : String,
source_id : String,
destination_id : String
})
var Nodes = Client.model('myNode', Node)
var Edges = Client.model('myEdge', Edge)
var funcs = []
var round = 1
var depth = 2
var query = {
node : {
id : '12345'
},
edge : {
id : '12345'
}
}
var addTask = function(Nodes, Edges, query, round, depth) {
return function(callback) {
queryData(Nodes, Edges, query, function(err, node_list) {
if(depth > round) {
round++
function_array.push(addTask(Nodes, Edges, query, round, depth))
}
})
}
}
var queryData = function(Nodes, Edges, query, cb) {
async.waterfall([
function(callback) {
Nodes.find(query.node, function(err, nodes) {
var node_keys = _.map(nodes, function(node) {
return node.id
})
callback(null, nodes, node_keys)
})
},
function(nodes, node_keys, callback) {
query.edge.$or = [ {'source_id' : {$in:node_keys}}, {'destination_id' : {$in:node_keys}} ]
Edges.find(query.edge, function(err, edges) {
var edge_keys = _.map(edges, function(edge) {
if(edge['_doc']['source_id'] != query.node.id) {
return edge['_doc']['source_id']
} else {
return edge['_doc']['destination_id']
}
callback(null, nodes, edges, node_keys, edge_keys)
})
})
}
], function(err, nodes, edges, node_keys, edge_keys) {
// update the results object then...
cb(null, _.uniq(edge_keys)
})
}
var function_array = []
function_array.push(addTask(Nodes, Edges, query, round, depth))
async.waterfall(function_array, function(err) {
Client.disconnect()
//this should have run more than just the initial task but does not
})
--------------------- UPDATE ---------------------------
So after playing around with trying to get Async waterfall or series to do this by adding trailing functions I decided to switch to using async.whilst and am now happy with the solution.
function GraphObject() {
this.function_array = []
}
GraphObject.prototype.doStuff = function() {
this.function_array.push(this.buildFunction(100))
this.runTasks(function(err) {
console.log('done with all tasks')
}
}
GraphObject.prototype.buildFunction = function(times) {
return function(cb) {
if(times != 0) {
this.function_array.push(this.buildFunction(times - 1))
}
cb(null)
}
}
GraphObject.prototype.runTasks = function(cb) {
var tasks_run = 0
async.whilst(
function(){
return this.function_array.length > 0
}.bind(this),
function(callback) {
var func = this.function_array.shift()
func.call(this, function(err) {
tasks_run++
callback(err)
})
}.bind(this),
function(err) {
console.log('runTasks ran '+tasks_run+' tasks')
if(err) {
cb(500)
}
cb(null)
}.bind(this)
)
}
A task in your function_array can only add a new task to the array provided it is NOT the last task in the array.
In your case, your function_array contained only 1 task. That task itself cannot add additional tasks since it's the last task.
The solution is to have 2 tasks in the array. A startTask to bootstrap the process, and a finalTask that is more of a dummy task. In that case,
function_array = [startTask, finalTask];
Then startTask would add taskA, taskB will add task C and eventually
function_array = [startTask, taskA, taskB, taskC, finalTask];
The sample code below that illustrates the concepts.
var async = require('async');
var max = 6;
var nodeTask = function(taskId, value, callback){
var r = Math.floor(Math.random() * 20) + 1;
console.log("From Node Task %d: %d", taskId, r);
// add an edge task
if (taskId < max) {
function_array.splice(function_array.length-1, 0, edgeTask);
}
callback(null, taskId + 1, value + r);
};
var edgeTask = function(taskId, value, callback){
var r = Math.floor(Math.random() * 20) + 1;
console.log("From Edge Task %d: %d", taskId, r);
// add a node task
if (taskId < max) {
function_array.splice(function_array.length-1, 0, nodeTask);
}
callback(null, taskId + 1, value + r);
};
var startTask = function(callback) {
function_array.splice(function_array.length-1, 0, nodeTask);
callback(null, 1, 0);
};
var finalTask = function(taskId, value, callback) {
callback(null, value);
};
var function_array = [startTask, finalTask];
async.waterfall(function_array, function (err, result) {
console.log("Sum is ", result);
});

Resources