If I make a node cluster application with 4 workers (4 instances of my application), should I use mySQL pool or mysql pool cluster? If I use pool it will create one pool for each application but If I use pool cluster it will create 4 pools for each application (16 total). Is this a good implementation or will it actually slow the performance?
First, let's make the configuration file for the mysql database (important is the last section of this file where I make the db workers):
DBconfig.js
'use strict'
const mysql = require('mysql'),
workers = process.env.WORKERS || require('os').cpus().length,
cluster = require('cluster');
//Local Database Settings
const local_settings = {
user : 'user',
host : '127.0.0.1',
password : 'pass',
database : 'dbname',
debug : false,
dateStrings: true,
connectionLimit : 10,
defaultSelector : 'RR',
multipleStatements : true,
removeNodeErrorCount: 1
};
let poolCluster = module.exports = mysql.createPoolCluster( local_settings );
//here I make one db worker for each app worker
for(let i = 0; i < workers; i++){
poolCluster.add(`DBWORKER_${process.pid}_${i}`, local_settings);
}
Then I create a global Library to use in my application where I have my database connection function
globalLib.js
'use strict'
const path = require('path'),
poolCluster = require(path.join('path','to_the','DBconfig.js'));
global.db = obj => {
return new Promise( (resolve, reject) => {
poolCluster.getConnection(function(err, connection){
if(err) {reject(err);return}
connection.query(obj.query, obj.params, function(){
connection.release();
if(!err) resolve(rows)
else reject(err);
});
connection.on('error'), function(err){
reject(err);
});
}
})
}
app.js
'use strict'
const express = require('express'),
path = require('path');
let app = module.exports = express();
//here I handle all with express.Router()
const index = require(path.join('path','to_my','index.js'));
app.use('/', index)
and in the end I have a cluster file from where I start the server (everything here is important):
cluster.js
'use strict'
const path = require('path'),
cluster = require('cluster'),
sockets = require(path.join('path_to','sockets.js')),
app = require(path.join('path_to','app.js')),
pclust = require(path.join('path_to', 'DBconfig.js')),
workers = process.env.WORKERS || require('os').cpus().length;
if (cluster.isMaster) {
for (var i = 0; i < workers; ++i) {
var worker = cluster.fork();
}
cluster.on('disconnect', function(worker) {
pclust.remove(`DBWORKER_${worker.process.pid}_*`);//remove all connections relative to this process pid
var worker = cluster.fork();
});
}
else {
//Start Application
var server = app.listen(8080, '127.0.0.1', function(){
console.log('yeeeeey');
});
// initialize socket
sockets.initialize(server,app);
}
Let's assume that I have 4 cpus, so there will be 4 instances of my app.js. Therefore I create 4 database workers one for each cpu in each application so in the end I have:
cpuID server instances db workers
1 1 4
2 1 4
3 1 4
4 1 4
4 4 16 ( TOTAL )
So that leaves me with a lot of db workers ... The question is, is this a good implementation of nodeJS cluster and mySQL pool cluster? If not which is the right way?
Once you have MySQL up and running on your computer, you can access it by using Node. js. To access a MySQL database with Node. js, you need a MySQL driver.
First, initialize the node. js project in the particular folder in your machine. Download the mysql module in the project folder. After this create connection to the database with the createconnection() method of the mysql module.
The Node. js driver supports connection pooling. Connection pooling allows your application to reuse existing connections by automatically saving the connection to a pool so it can be reused, rather than repeatedly creating a new connection to the SAP HANA database server.
I am also curious about this answer. I am assuming that with the higher work with the number of processors it would generally perform better with less requests, but with a lot of traffic it would be better to use.
In the app.js file I am using these packages within app.js:
const cluster = require('cluster');
const http = require('http');
let osu = require('node-os-utils'); //get's number of cpus
var cpu = osu.cpu;
I have also installed autocannon.
if(cluster.isMaster) {
const availableCpus = cpu.count();
console.log('clustering to', availableCpus, 'processes');
for (let i = 0; i < availableCpus; i++) {
cluster.fork();
}
cluster.on('online', function(worker) {
console.log('worker:', worker.process.pid, 'is online');
})
cluster.on('exit', (worker, code, signal) => {
if(code !== 0 && !worker.exitedAfterDisconnect) {
console.log('Starting new worker:', worker.process.pid, + 'signal:', signal);
cluster.fork();
}
})
}
else {
app.get('/', function (req, res) {
let i= 100000000; while (i>0) {i--}
res.send();
});
app.listen(port, () => console.log("APP TEST: port %s", port));
}
Using autocannon without cluster:
Using autocannon with cluster and 8 workers:
Also, without node.js cluster: 44k requests. With node.js cluster and 8 workers: 40k requests.
I would find it helpful if anyone could give an explanation on this.
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With