ENOTSUP errnoException in NodeJS using mssql in cluster worker - node.js

I am trying to connect to an SQL Server database within a cluster worker. I am using node-mssql and Tedious as the driver. Here is my test code.
'use strict';
var os = require('os');
var numCPUs = os.cpus().length;
var cluster = require('cluster');
var mssql = (cluster.isMaster == false ? require('mssql') : null);
function workerLog(msg) {
process.send({msg:"CLUSTER-NODE-"+cluster.worker.id+": "+msg});
}
if(cluster.isMaster == true) {
console.log("MASTER: "+"SPAWNING "+numCPUs+" CLUSTER NODES");
for(var i = 0; i < numCPUs; i++) {
var worker = cluster.fork();
worker.on('message', function(msg) {
console.log(msg.msg);
});
}
var timeOuts = [];
var workerError = function(workerId) {
console.log("MASTER: "+"AN EXECUTION ISSUE OCCURRED WITH CLUSTER NODE "+workerId);
};
cluster.on('fork', function(worker) {
timeOuts[worker.id] = setTimeout(workerError,10000,worker.id);
console.log("MASTER: "+"SPAWNED CLUSTER NODE "+worker.id);
});
cluster.on('online', function(worker) {
console.log("MASTER: "+"CLUSTER NODE "+worker.id+" COMING ONLINE");
});
cluster.on('listening', function(worker,address) {
clearTimeout(timeOuts[worker.id]);
console.log("MASTER: "+"CLUSTER NODE "+worker.id+" IS LISTENING");
});
cluster.on('disconnect', function(worker) {
console.log("MASTER: "+"CLUSTER NODE "+worker.id+" HAS DISCONNECTED");
});
cluster.on('exit', function(worker,code,signal) {
clearTimeout(timeOuts[worker.id]);
console.log("MASTER: "+"CLUSTER NODE "+worker.id+" HAS EXITED");
});
} else {
var dbName = "Prefs";
var cfg = {
driver: 'tedious',
user: 'webmi',
password: 'webmi01',
server: "localhost\\SQLEXPRESS",
database: dbName,
options: {
useColumnNames: true,
isolationLevel: 1, // READ_UNCOMMITTED
connectionIsolationLevel: 1 // READ_UNCOMMITTED
}
};
var dbConn = new mssql.Connection(cfg);
workerLog("CONNECT TO "+dbName);
dbConn.connect().then(function() {
if(dbConn.connected) {
workerLog("CONNECTION TO "+dbName+" EXISTS");
} else {
workerLog("NOT CONNECTED TO "+dbName+" BUT NO ERROR DETECTED");
}
}).catch(function(error) {
workerLog("CANNOT CONNECT TO DATABASE\n"+error.stack);
});
}
And here is what I get when I run it: node test.js
D:\proj\CTech\9.2\Bin\Node>node test.js
MASTER: SPAWNING 2 CLUSTER NODES
MASTER: SPAWNED CLUSTER NODE 1
MASTER: SPAWNED CLUSTER NODE 2
MASTER: CLUSTER NODE 1 COMING ONLINE
MASTER: CLUSTER NODE 2 COMING ONLINE
CLUSTER-NODE-2: CONNECT TO Prefs
CLUSTER-NODE-1: CONNECT TO Prefs
events.js:85
throw er; // Unhandled 'error' event
^
Error: write ENOTSUP
at exports._errnoException (util.js:746:11)
at ChildProcess.target._send (child_process.js:484:28)
at ChildProcess.target.send (child_process.js:416:12)
at sendHelper (cluster.js:676:8)
at send (cluster.js:512:5)
at cluster.js:488:7
at SharedHandle.add (cluster.js:99:3)
at queryServer (cluster.js:480:12)
at Worker.onmessage (cluster.js:438:7)
at ChildProcess.<anonymous> (cluster.js:692:8)
D:\proj\CTech\9.2\Bin\Node>
Looking at child_process.js, It appears that the worker is trying to send the connection handle back to the master (for sharing purposes?) but I may be wrong on that. In any case, the worker faults on the send attempt.
Is there a way to prevent this fault? Or is there a way to have the worker NOT attempt to share this handle with the master?

Related

socket.io-redis adapter timeout reached while waiting for clients response error

In my project, I'm using (socket.io - 2.3.0) & (socket.io-redis - 5.2.0) for data broadcasting between servers. In that scenario, I'm having the redis timeout issue from time to time, and I'm not sure why. In my server I just run a single node process, and I use Redis to store data and share it with other connections. Is it correct that I use almost 5 dbs in redis out of 15? In production, I encountered this problem more than ten times in a single day. Please assist us in resolving this issue.
Stack Trace:
Error: timeout reached while waiting for clients response
at Timeout._onTimeout (/var/www/html/project/node_modules/socket.io-redis/index.js:485:48)
at listOnTimeout (internal/timers.js:555:17)
at processTimers (internal/timers.js:498:7)
Here it's my node entry point.
var port = 12300;
var io = require('socket.io')(port);
var redis = require('redis');
const redis_adap = require("socket.io-redis");
io.adapter(redis_adap({
host: '127.0.0.1',
port: 6379,
requestsTimeout: 5000, // i tried upto 20000 but still the issue is occured
}));
io.on('connection', function(socket) {
var player = new Player();
var Redis = new redis();
var roomId;
player.id = socket.id;
socket.on('ping', function() {
socket.emit('pong');
});
socket.on('message', function(data) {
let _messageIndex = data.e;
let _id = player.id;
let returnData = {
i: _id,
e: _messageIndex
}
socket.broadcast.to(player.roomid).emit('message', returnData);
});
socket.on('UpdateHorn', function() {
let _id = player.id;
let returnData = {
i: _id
}
socket.broadcast.to(player.roomid).emit('UpdateHorn', returnData);
})
socket.on('UpdateTiles', function(data) {
let returnData = {
t: data.t
}
socket.broadcast.to(player.roomid).emit('UpdateTiles', returnData);
});
socket.on('getLobbyDetails', function() {
socket.emit('getLobbyDetails', { lobby: CONFIG.getLobbyDetails() });
})
})

Nodejs cluster unable to kill worker running infinite loop

It is possible to kill a cluster worker that is running an infinite loop? I've tried, but unable to kill the worker. I guess the kill command cannot get onto the worker's js event loop. Any ideas on how else I can do this? When the master receives a "start" message, I want it to fork a worker. When the master receives a "stop" message, I want it to kill the worker.
const cluster = require('cluster');
const numCPUs = require('os').cpus().length;
const process = require('process');
const { nrpin, nrpout } = require("./helpers/PubSub");
const chalk = require('chalk');
//https://leanpub.com/thenodejsclustermodule/read
//https://gist.github.com/jpoehls/2232358
const arrWorkers = [];
if (cluster.isMaster) {
masterProcess();
} else {
childProcess();
}
function masterProcess() {
console.log(chalk.blueBright(`Master ${process.pid} is running`));
nrpin.on("start", async (bot) => {
console.log("Start:", bot._id);
if (arrWorkers.length == numCPUs){
console.log(chalk.yellowBright("No CPUs available to create worker!"));
}
const worker = cluster.fork();
arrWorkers.push({
workerId: worker.id,
botId: bot._id
})
})
nrpin.on("stop", async (bot) => {
console.log("Stop:", bot._id);
const worker = arrWorkers.find(x => x.botId == bot._id);
if (worker){
console.log("killing worker:", worker.workerId);
cluster.workers[worker.workerId].kill();
}
})
// Be notified when workers die
cluster.on('exit', function(worker, code, signal) {
if (worker.isDead()) {
console.info(`${chalk.redBright('worker dead pid')}: ${worker.process.pid}`);
}
});
}
function childProcess() {
console.log(chalk.green(`Worker ${process.pid} started...`));
while(true){
console.log(Date.now());
}
}
Never mind, I solved this using process.kill
let process_id = cluster.workers[worker.workerId].process.pid;
process.kill(process_id);

pm2 can't fork nodejs which is contain origin cluster code

my app is one using origin nodejs cluster code inside ,and run by node ./dist/main.js fine
but it go wrong when i use pm2 start ./dist/main.js
//my code
import cluster from 'cluster'
import http from 'http'
if(cluster.isMaster){
(async()=>{
const master = await (await import('./master'))
async function onMsg(pid:number,type?:string,num?:number,data?:{db:any,apiName:any,args:Array<any>}){
console.log(`master receiving message from cluster ${pid}`)
try{
let result = await master.publishMission(type,data)
// console.log(`${type} finish mission and send back to koa cluster ${pid}`)
cluster.workers[pid].send({num:num,status:true,data:result})
}catch(err){
cluster.workers[pid].send({num:num,err})
}
}
//cluster nums
for(let i=0;i<1;i++){
cluster.fork()
}
cluster.on('message',(worker,msg)=>{
onMsg(worker.id,...msg)
})
cluster.on('exit', (worker, code, signal) => {
console.log('worker %d died (%s). restarting...',worker.process.pid, signal || code);
cluster.fork();
});
})()
}else{
(async()=>{
const app = await (await import('./app')).app
try{
http.createServer(app).listen(5000)`enter code here`
console.log("fork new koa server",process.pid)
}catch(err){
console.log(err)
}
})()
}
//error log
TypeError: Found non-callable ##iterator
at EventEmitter. (C:\Users\yany\project\Jmrh_Warehouse\src\main.ts:22:13)
at EventEmitter.emit (events.js:315:20)
at Worker. (internal/cluster/master.js:174:13)
at Worker.emit (events.js:315:20)
at ChildProcess. (internal/cluster/worker.js:32:12)
at ChildProcess.emit (events.js:315:20)
at emit (internal/child_process.js:903:12)
at processTicksAndRejections (internal/process/task_queues.js:81:21)
TypeError: Found non-callable ##iterator
at EventEmitter. (C:\Users\yany\project\Jmrh_Warehouse\src\main.ts:22:13)
at EventEmitter.emit (events.js:315:20)
at Worker. (internal/cluster/master.js:174:13)
at Worker.emit (events.js:315:20)
at ChildProcess. (internal/cluster/worker.js:32:12)
at ChildProcess.emit (events.js:315:20)
at emit (internal/child_process.js:903:12)
at processTicksAndRejections (internal/process/task_queues.js:81:21)
pm2 implements clusters internally.
So there're may be a problem with the port sharing.
Where: dist/main.js is
(async()=>{
const app = await (await import('./app')).app
try{
http.createServer(app).listen(5000)`enter code here`
console.log("fork new koa server",process.pid)
}catch(err){
console.log(err)
}
})()
pm2 start dist/main.js -i max might work. -i starts pm2 in cluster mode.
https://pm2.keymetrics.io/docs/usage/cluster-mode/
by use pm2 start dist/main.js -i 1 this error end
so when use pm2 fork it may disable cluster message transform inside
but new problem come out
const master = await (await import('./master'))
this code don't work
it's fine when I use node dist/main.js but can't work in pm2
import * as cl from 'child_process'
import {EventEmitter} from 'events'
const event = new EventEmitter()
const autoReboot = false
var count = 0
console.log("fine")
interface masterStore{
[typeOfChild:string]:{
childGroup?:Array<cl.ChildProcess>,
workIndex:number,
maxNum?:number,
}
}
var master:masterStore={}
//child process of database
master['mongodb'] = {
childGroup:[],
workIndex:0,
maxNum:1
}
for(let i =0;i<master['mongodb'].maxNum;i++){
forkNewDB()
}
function forkNewDB(){
var db = cl.fork(`${__dirname}/data/index.js`)
master['mongodb'].childGroup.push(db)
db.on('exit',(signal)=>{
console.log(`db process ${db.pid} exit with code ${signal}`)
let index = master['mongodb'].childGroup.findIndex(el=>el.pid==db.pid)
master['mongodb'].childGroup.splice(index,1)
if(autoReboot){
forkNewDB()
}
})
db.on('message',async (msg:{des:string,type:string,ev:string,result?:any,err?:any,data?:any,num?:any})=>{
if(msg.des=="res"){
event.emit(msg.ev,{msg})
}else{
try{
let result = await publishMission(msg.type,msg.data)
db.send({des:"res",num:msg.num,status:true,data:result})
}catch(err){
db.send({des:"res",num:msg.num,err})
}
}
})
}
//child process of rabbitmq
master['rabbitmq'] = {
childGroup:[],
workIndex:0,
maxNum:1
}
function forkNewMQ(){
var mq = cl.fork(`${__dirname}/mq/index.js`)
master['rabbitmq'].childGroup.push(mq)
mq.on('exit',(signal)=>{
console.log(`mq process ${mq.pid} exit with code ${signal}`)
master['rabbitmq'].childGroup.splice(0,1)
if(autoReboot){
forkNewMQ()
}
})
mq.on('message',async (msg:{des:string,type:string,ev:string,result?:any,err?:any,data?:any,num?:any})=>{
if(msg.des=="res"){
event.emit(msg.ev,{msg})
}else{
try{
let result = await publishMission(msg.type,msg.data)
mq.send({des:"res",num:msg.num,status:true,data:result})
}catch(err){
mq.send({des:"res",num:msg.num,err})
}
}
})
}
forkNewMQ()
//child process of redis
master['redis'] = {
childGroup:[],
workIndex:0,
maxNum:1
}
function forkNewRedis(){
var redis = cl.fork(`${__dirname}/redis/index.js`)
master['redis'].childGroup.push(redis)
redis.on('exit',(signal)=>{
console.log(`redis process ${redis.pid} exit with code ${signal}`)
let index = master['redis'].childGroup.findIndex(el=>el.pid==redis.pid)
master['redis'].childGroup.splice(index,1)
if(autoReboot){
forkNewRedis()
}
})
redis.on('message',async (msg:{des:string,type:string,ev:string,result?:any,err?:any,data?:any,num?:any})=>{
if(msg.des=="res"){
event.emit(msg.ev,{msg})
}else{
try{
let result = await publishMission(msg.type,msg.data)
redis.send({des:"res",num:msg.num,status:true,data:result})
}catch(err){
redis.send({des:"res",num:msg.num,err})
}
}
})
}
forkNewRedis()
//init message publish
function publishMission(type?:string,data?:{db:any,apiName:any,args:Array<any>}):Promise<any>{
return new Promise(async (res,rej)=>{
try{
let tmp = master[type].workIndex
master[type].workIndex++
master[type].workIndex=master[type].workIndex%master[type].maxNum
let ev = count+""
// console.log(`master publish mission to ${type}`)
master[type].childGroup[tmp].send({data,ev,des:"req"})
count++
event.once(ev,(result)=>{
if(!result.msg.err){
res(result.msg.result)
}else{
rej(result.msg.err)
}
event.removeListener(ev,()=>{})
})
}catch(err){
rej(err)
}
})
}
export{publishMission}
node dist/main log should be
fine
child process 10116 rabbitmq is ready
fork new koa server 15948
child process 10008 create DB thread 1

NodeJS Cluster how share object array across workers

So I have setup a simple nodejs cluster game, I am new to nodejs. basically players connect to to my worker using socket.io then they get created to a Player Object then added to my PlayerManager.LIST array. Now this causes me some issues as the PlayerManager.LIST is on each of workers and are not sync'd.
So my question is, is there a better way of doing this so that if I connect to worker 2 I see same player list as worker 1's.
Structure at the moment:
app.js
-> worker
->-> PlayerManager (Contains List)
->->-> Player
Git Repo: https://github.com/mrhid6/game_app_v2
NodeJS Clusters are based on Nodejs Child Processes. In child processes you can send data between parent (Master in cluster) and child (worker in cluster) via messages over IPC channel. You can do the same with clusters using message events
var cluster = require('cluster');
var _ = require('lodash');
var http = require('http');
var workers = [];
var workerCount = 4;
if (cluster.isMaster) {
for (var i = 0; i < workerCount; i++) {
var worker = cluster.fork();
worker.on('message', function(msg) {
if (msg.task === 'sync') {
syncPlayerList(msg.data);
}
});
}
workers.push[worker];
} else {
var worker = new Worker();
process.on('message', function(msg) {
if (msg.task === 'sync') {
worker.playerList = msg.data;
}
});
}
function syncPlayerList (playerList) {
_.forEach(workers, function (worker) {
worker.send({
task: 'sync',
data: playerList
});
});
};
// worker class
function Worker() {
this.playerList = [];
}
Worker.prototype.sendSyncEvent = function () {
process.send({
task: 'sync',
data: this.playerList
})
};

On AWS node socketcluster can't connect more than 1000 connections

I am running a simple socketcluster node.js server and connecting to it from node.js websocket client.
By running the server on my local Ubuntu14.04, I could connect more than 10,000 clients to the server. But on AWS EC2 (c3-large) ubuntu14.04 instance the same code connects only less than 1000 connections.
Modified the etc/security/limits.conf and set the limits for "soft nofile" and "hard nofile" to 65535 on the EC2 instance.
Posix soft limit as suggested Node.js maxing out at 1000 concurrent connections is not helping.
Other sysctl parameter doesn't differ much between my local ubuntu and EC2 instance.
Latency might not be an issue, because I tried to connect to server from multiple client machines still number of connection remain < 1000.
Is there any AWS environment variable that could effect the performance?
Is number of messages to and from EC2 could be a limitation?
var posix = require('posix');
posix.setrlimit('nofile', {soft:10000});
var SocketCluster = require('socketcluster').SocketCluster;
var numCPUs = require('os').cpus().length;
var numWorkers = numCPUs;
var start = Date.now();
console.log("..... Starting Server....");
process.on('uncaughtException', function (err) {
console.log("***** SEVERE ERROR OCCURED!!! *****");
console.log(err);
});
var socketCluster = new SocketCluster({
balancers: 1,
workers: numWorkers,
stores: 1,
port: 7000,
appName: 'mysimapp',
workerController: __dirname + '/sim_server.js',
addressSocketLimit: 0,
socketEventLimit: 100,
rebootWorkerOnCrash: true,
useSmartBalancing: true
});
--sim_server.js--
module.exports.run = function(worker) {
var posix = require('posix');
posix.setrlimit('nofile', {soft:10000});
var connection_db = {};
var opencount = 0;
var closecount = 0;
var msgcount = 0;
function status()
{
console.log('open: ' + opencount);
console.log('close: ' + closecount);
//console.log('receive: ' + msgcount);
setTimeout(function(){
status();
},10000);
}
status();
websocket_server = worker.getSCServer();
websocket_server.on('connection', function(socket){
var mac;
socket.on('mac-id', function(data) {
opencount++;
mac = data;
connection_db[mac] = socket;
});
socket.on('message', function(data) {
msgcount++;
});
socket.on('close', function() {
delete connection_db[mac];
closecount++;
});
});
process.once('SIGINT', function() {
process.exit(0);
});
}
My bad, there wasn't any issue in code or with AWS.
In my setup the switch/isp-connection that was used for AWS setup was not able to handle many connections.

Resources