pool-mysql
v2.2.10
Published
[![Codacy Badge](https://api.codacy.com/project/badge/Grade/928cce8dd2ba4fcaa4d857552209fd16)](https://app.codacy.com/gh/TaxiGo-tw/pool-mysql?utm_source=github.com&utm_medium=referral&utm_content=TaxiGo-tw/pool-mysql&utm_campaign=Badge_Grade_Dashboard)
Downloads
2,292
Readme
ReadME
This is depend on mysql which made for migrating to features
multiple connection pool
connection writer/reader
async/await
model.query
log print
events
See the test Examples
Installation
npm i pool-mysql --save
Usage
- pool-mysql loads settings from process.env There is a helpful package dotenv
SQL_HOST={{writer}}
#reader is optional
SQL_HOST_READER={{reader}}
SQL_USER={{user}}
SQL_PASSWORD={{passwd}}
SQL_TABLE={{table name}}
- Require
pool-mysql
const pool = require('pool-mysql')
pool.query(sql, value, (err, data) => {
})
const options = {
writer: {
host: process.env.HOST2,
database: process.env.DB2
},
reader: {
host: process.env.HOST2,
database: process.env.DB2
},
forceWriter: true
}
const pool2 = pool.createPool({ options })
const connection = pool.connection()
//callback query
connection.query(sql, values, (err,data) => {
})
//support async/await
try {
const result = await connection.q(sql,value)
} catch(err) {
console.log(err)
}
pool of connection pool
limit max connection amount with same priority
// if equal or more than 5 connections which tagged `foo`, wait for releasing
const connection = pool.connection({ limit: 5 })
// higher priority to get connection than 0
const connection = pool.connection({ priority: 1 })
const Schema = require('pool-mysql').Schema
const Posts = class posts extends Schema {
get columns() {
return {
id: Schema.Types.PK,
user: require('./user') // one to one reference
//or
user2: {
ref: require('./user'), // one to one reference
column: 'user'
},
user3: {
type: Schema.Types.FK(require('./User.js'), 'id'),
required: true,
length: { min: 1, max: 20 },
},
user_type: {
type: Schema.Types.ENUM('A','B','C')
},
available_area: {
type: Schema.Types.Polygon
},
created_at: {
type: Schema.Types.DateTime
}
}
}
const User = class user extends Schema {
get columns() {
return {
id: Schema.Types.PK,
user: [require('./posts')] //one to many reference
}
}
await Posts
.SELECT() //default to columns()
.FROM()
.WHERE({id: 3}) //or you can use .WHERE('id = ?',3)
.POPULATE('user') //query reference
.PRINT() //print sql statement, query time, connection id and works on writer/reader
.WRITER //force query on writer
.exec()
// nest populate
const result = await Drivers
.SELECT()
.FROM()
.WHERE({ driver_id: 3925 })
.POPULATE({ trip_id: { driver_loc_FK_multiple: {} }})
.FIRST()
.exec()
const results = Trips.SELECT(Trips.KEYS, Users.KEYS)
.FROM()
.LEFTJOIN('user_info ON uid = trips.user_id')
.WHERE('trip_id = ?', 23890)
.AND('trip_id > 0')
.LIMIT()
.NESTTABLES()
.MAP(result => {
const trip = result.trips
trip.user = result.user_info
return trip
})
.FIRST()
.exec()
results.should.have.property('trip_id')
results.trip_id.should.equal(23890)
results.should.have.property('user_id')
results.should.have.property('user')
results.user.should.have.property('uid')
assert(results instanceof Trips)
for massive rows query
Replace exec() with stream()
Some modifier will not works
highWaterMark
- set to 1 :
onValue
.rows will beobject
- set to 2 or greater :
onValue
.rows will bearray
- rows.length will less or equal to
highWaterMark
- rows.length will less or equal to
- set to 1 :
TableA
.SELECT()
.FROM()
.LEFTJOIN('tableB on tableB.id = tableA.id')
.LIMIT(25)
.NESTTABLES()
.MAP(data => {
const tableA = data.tableA
return { ...tableA, user: data.tableB }
})
.stream({
connection, //optional
highWaterMark: 5, //optional, default to 1
onValue: (rows, done) => {
assert.equal(rows.length, 5)
expect(rows[0]).haveOwnProperty('id')
expect(rows[0]).haveOwnProperty('user')
done()
},
onEnd: (error) => {
ok()
}
})
async / await
done
will be a empty function
.stream({
connection, //optional
highWaterMark: 1, // if set to 1, will be object in `onValue`
onValue: async (row,done) => {
await doSomething()
},
onEnd: async (error) => {
ok()
}
})
// single
await FOO.INSERT()
.INTO()
.SET(obj)
.exec(connection)
// multiple
await FOO.INSERT()
.INTO('table (`id`, `some_one_field`)')
.VALUES(array)
.exec(connection)
- return value after updated
const results = await Block
.UPDATE()
.SET('id = id')
.WHERE({ blocked: 3925 })
.UPDATED('id', 'blocker')
.AFFECTED_ROWS(1) //throw if affectedRows !== 1
.CHANGED_ROWS(1) //throw if changedRows !== 1
.ON_ERR('error message') // custom error message, can be string or callback
.exec()
for (const result of results) {
result.should.have.property('id')
result.should.have.property('blocker')
}
const redis = require('redis')
const bluebird = require('bluebird')
bluebird.promisifyAll(redis.RedisClient.prototype)
bluebird.promisifyAll(redis.Multi.prototype)
const client = redis.createClient({
host: ...,
port: ...,
db: ...
})
pool.redisClient = Redis
//...
const connection = pool.connection
await connection.q('SELECT id FROM user WHERE uid = ?', userID, {
key: `api:user:id:${userID}`, //optional , default to queryString
EX: process.env.NODE_ENV == 'production' ? 240 : 12, //default to 0 , it's required if need cache
isJSON: true, //default to true
})
await connection.q('SELECT id FROM user WHERE uid = ?', userID, { EX: 60})
User.SELECT().FROM().WHERE('uid = ?',id).EX(60, { forceUpdate: true }).exec()
await Trips.UPDATE('user_info')
.SET({ user_id: 31 })
.WHERE({ uid: 31 })
.CHANGED_ROWS(1)
.ON_ERR(errMessage) // string
.exec()
// or callback
await Trips.UPDATE('user_info')
.SET({ user_id: 31 })
.WHERE({ uid: 31 })
.CHANGED_ROWS(1)
.ON_ERR(err => {
return 'error value'
})
.exec()
- mass queries in the same time, combined queries will query once only (scope in instance)
Trips.SELECT().FROM().WHERE({ trip_id:1 }).COMBINE().exec().then().catch()
Trips.SELECT().FROM().WHERE({ trip_id:1 }).COMBINE().exec().then().catch()
Trips.SELECT().FROM().WHERE({ trip_id:1 }).COMBINE().exec().then().catch()
Trips.SELECT().FROM().WHERE({ trip_id:1 }).COMBINE().exec().then().catch()
// the second ... latest query will wait result from first one
Every 300 seconds free half reader&writer connections
But will keep at least 10 reader&writer connections
log
logsnot suggested to subscribe
get
called when connection got from poolcreate
called when connection createdrelease
called when connection releasedwill_query
query
called when connection querydid_query
amount
called when connection pool changes amountend
called when connection endrequest
request a connection but capped on connection limitrecycle
free connection is backwarn
warningerr
error
pool.event.on('get', connection => {
console.log(connection.id)
})
Triggered on UPDATE()..SET(object) and INSERT()...SET(object)
values must be object
Variables
type: to limit type
required: default to false
- INSERT() checks all required
- UPDATE() checks SET()
length: limit something.length
// Custom Validator
class PlateNumber extends Scheme.Types.Base {
static validate(string) {
return string.match(/[0-9]+-[A-Z]+/)
}
}
module.exports = class driver_review_status extends Scheme {
get columns() {
return {
'uid': {
type: Scheme.Types.PK,
required: true
},
'first_name': {
type: Scheme.Types.String,
required: true,
},
'last_name': String,
'car_brand': {
type: Scheme.Types.JSONString
},
'model': {
type: String
},
'phone_number': {
type: Scheme.Types.String,
required: true,
length: 10
},
'plate_number': {
type: PlateNumber,
length: { min: 6 , max: 9 }
},
'email': {
type: Scheme.Types.Email,
required: true
}
}
}
}
every query return response from mock() and increase index
assign mock() to pool will reset index to 0
- rollback after execute
await Table.INSERT().INTO().rollback()
all
print logs anywhereerror
print logs if errornone
never print logs
default to error
pool.logger = 'error'
// [3] Reader 1ms: SELECT * FROM table
Custom Logger
pool._logger = (err, toPrint) => { }