I'm using knex with pg.
I have a project similar as following.
dbClient.js
const dbClient = require('knex')({
client: 'pg',
connection: {
host: '127.0.0.1',
user: 'user',
password: 'password',
database: 'staging',
port: '5431'
}
})
module.exports = dbClient
libs.js
const knex = require('./dbClient.js')
async function doThis(email) {
const last = await knex('users').where({email}).first('last_name').then(res => res.last_name)
// knex.destroy()
return last
}
async function doThat(email) {
const first = await knex('users').where({email}).first('first_name').then(res => res.first_name)
// knex.destroy()
return first
}
module.exports = {
doThat,
doThis
}
test01.js
const {doThis, doThat} = require('./libs.js');
(async () => {
try {
const res1 = await doThis('[email protected]')
console.log(res1)
const res2 = await doThat('[email protected]')
console.log(res2)
} catch (err) {
console.log(err)
}
})()
When knex.destroy()
was removed from libs.js
as shown above. node test01
could output res1
and res2
. But the issue is the connection hangs indefinitely and CMD never return.
But if I uncomment knex.destroy()
from libs.js
, then doThis
will execute, CMD will hangs at doThat
as there's no connection anymore which has been closed in doThis
.
My question is :
What is the best location for knex.destroy()
? Or there's other ways to do it?
Thanks for your time!
Knex destroy () seems to be a one time operation. After destroying a connection, one might require a brand new connection pool for the next operation. The db client module you export is cached into node module cache and a new connection pool is not created every time you require it.
Like if the pool only handles 8 connections, and those select * statements take 50ms, and you issue 16 requests in 25ms knex.destroy () should be called when you want to knex to throw away all connections from pool and stop all timers etc. so that application can end gracefully.
After destroying a connection, one might require a brand new connection pool for the next operation. The db client module you export is cached into node module cache and a new connection pool is not created every time you require it. This is intended usage, the pool is supposed to be destroyed when app exits or all the tests are done.
You may use knex.destroy by passing a callback, or by chaining as a promise, just not both. Sorry, something went wrong. Ah true! Thanks @tgriesser. Sorry, something went wrong. This works fine with the migrations API but the seed API doesn't seem to adhere to the destroy call.
Destroying the connection after every query is like packing your guitar up every time you play a note. Just pull it out at the beginning of the performance, play all the songs and put it away at the end.
Likewise, destroy the connection when you're done with it for the rest of the application, not after each query like this. In a web server, this is probably never since you're going to kill it with a signal at some indeterminate point and an active connection is likely a necessity for the app until then.
For tests, you'll probably want to make use of the destroy
function to avoid hanging. Similarly, in an (contrived?) application like you've shown, if you are experiencing a hang and the app gets stuck, destroy the connection one time when you're done with it.
Here's an illustrative example for Mocha, which was mentioned in a comment and seems like a pretty reasonable assumption that it (or something similar) is being used by folks who wind up in this thread. The pattern of setting up before all tests, tearing down after all tests, and doing per-test case setup and teardown is generic.
Relevant to your question, after(() => knex.destroy());
is the teardown call at the end of all tests. Without this, Mocha hangs. Note that we also shut down the http server per test so there are multiple candidates for hanging the test suite to look out for.
server.js
:const express = require("express");
const createServer = (knex, port=3000) => {
const app = express();
app.get("/users/:username", (request, response) => {
knex
.where("username", request.params.username)
.select()
.first()
.table("users")
.then(user => user ? response.json({data: user})
: response.sendStatus(404))
.catch(err => response.sendStatus(500))
});
const server = app.listen(port, () =>
console.log(`[server] listening on port ${port}`)
);
return {
app,
close: cb => server.close(() => {
console.log("[server] closed");
cb && cb();
})
};
};
module.exports = {createServer};
server.test.js
:const chai = require("chai");
const chaiHttp = require("chai-http");
const {createServer} = require("./server");
const {expect} = chai;
chai.use(chaiHttp);
chai.config.truncateThreshold = 0;
describe("server", function () {
this.timeout(3000);
let knex;
let server;
let app;
before(() => {
knex = require("knex")({
client: "pg",
connection: "postgresql://postgres@localhost",
});
});
beforeEach(done => {
server = createServer(knex);
app = server.app;
knex
.schema
.dropTableIfExists("users")
.then(() =>
knex.schema.createTable("users", table => {
table.increments();
table.string("username");
})
)
.then(() => knex("users").insert({
username: "foo"
}))
.then(() => done())
.catch(err => done(err));
});
afterEach(done => server.close(done));
after(() => knex.destroy());
it("should get user 'foo'", done => {
chai
.request(app)
.get("/users/foo")
.then(response => {
expect(response.status).to.equal(200);
expect(response).to.be.json;
expect(response.body).to.be.instanceOf(Object);
expect(response.body.data).to.be.instanceOf(Object);
expect(response.body.data.username).to.eq("foo");
done();
})
.catch(err => done(err));
});
});
"knex": "0.21.6",
"express": "4.17.1",
"mocha": "8.0.1",
"pg": "8.3.0",
"node": "12.19.0"
Knex destroy() seems to be a one time operation. After destroying a connection, one might require a brand new connection pool for the next operation.
The db client module you export is cached into node module cache and a new connection pool is not created every time you require it.
This is intended usage, the pool is supposed to be destroyed when app exits or all the tests are done. If you have reasons to create/destroy connections for every operation ( like in serverless environment) you should not reuse the destroyed client, rather create a new instance every time.
Otherwise, it defeats the purpose of connection pooling.
Update about lambda/server-less environments:
Technically a function and its resources are to be released after the lambda function has run, this includes any connections it might have opened. This is necessary for truly stateless functions. Therefore it is advisable to close connection when function is done. However, a lot of functions opening/closing a lot of connections may eventually make the DB server run out of connections (see this discussion for example). One solution might be to use an intermediate pool like PgBouncer or PgPool that negotiates connections between DB server and Lambda functions.
The other way is for the platform provider (AWS) to add special pooling capabilities to lambda environment and let them share long-lived resources.
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With