Compare commits
5 Commits
Author | SHA1 | Date | |
---|---|---|---|
e1fa2480d0 | |||
b8c64cc693 | |||
aa02f9f5b1 | |||
9d1cc4bbec | |||
af44d48df1 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,2 +1,4 @@
|
||||
*.sw?
|
||||
nimcache/
|
||||
nimble.develop
|
||||
nimble.paths
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# Package
|
||||
|
||||
version = "3.0.0"
|
||||
version = "4.0.0"
|
||||
author = "Jonathan Bernard"
|
||||
description = "Lightweight Postgres ORM for Nim."
|
||||
license = "GPL-3.0"
|
||||
@@ -11,4 +11,4 @@ srcDir = "src"
|
||||
# Dependencies
|
||||
|
||||
requires @["nim >= 1.4.0", "uuids"]
|
||||
requires "namespaced_logging >= 1.0.0"
|
||||
requires "namespaced_logging >= 2.0.2"
|
||||
|
@@ -286,54 +286,29 @@
|
||||
##
|
||||
import std/[json, macros, options, sequtils, strutils]
|
||||
import db_connector/db_common
|
||||
import namespaced_logging, uuids
|
||||
import uuids
|
||||
|
||||
from std/unicode import capitalize
|
||||
|
||||
import ./fiber_orm/db_common as fiber_db_common
|
||||
import ./fiber_orm/pool
|
||||
import ./fiber_orm/util
|
||||
import ./fiber_orm/[pool, util]
|
||||
import ./fiber_orm/private/logging
|
||||
|
||||
export
|
||||
pool,
|
||||
util.columnNamesForModel,
|
||||
util.dbFormat,
|
||||
util.dbNameToIdent,
|
||||
util.identNameToDb,
|
||||
util.modelName,
|
||||
util.rowToModel,
|
||||
util.tableName
|
||||
export pool, util
|
||||
export logging.enableDbLogging
|
||||
|
||||
type
|
||||
PaginationParams* = object
|
||||
pageSize*: int
|
||||
offset*: int
|
||||
orderBy*: Option[seq[string]]
|
||||
|
||||
PagedRecords*[T] = object
|
||||
pagination*: Option[PaginationParams]
|
||||
records*: seq[T]
|
||||
totalRecords*: int
|
||||
|
||||
DbUpdateError* = object of CatchableError ##\
|
||||
DbUpdateError* = object of CatchableError
|
||||
## Error types raised when a DB modification fails.
|
||||
|
||||
NotFoundError* = object of CatchableError ##\
|
||||
NotFoundError* = object of CatchableError
|
||||
## Error type raised when no record matches a given ID
|
||||
|
||||
var logService {.threadvar.}: Option[LogService]
|
||||
|
||||
proc logQuery(methodName: string, sqlStmt: string, args: openArray[(string, string)] = []) =
|
||||
# namespaced_logging would do this check for us, but we don't want to even
|
||||
# build the log object if we're not actually logging
|
||||
if logService.isNone: return
|
||||
var log = %*{ "method": methodName, "sql": sqlStmt }
|
||||
for (k, v) in args: log[k] = %v
|
||||
logService.getLogger("fiber_orm/query").debug(log)
|
||||
|
||||
proc enableDbLogging*(svc: LogService) =
|
||||
logService = some(svc)
|
||||
|
||||
proc newMutateClauses(): MutateClauses =
|
||||
return MutateClauses(
|
||||
columns: @[],
|
||||
@@ -359,6 +334,8 @@ proc createRecord*[D: DbConnType, T](db: D, rec: T): T =
|
||||
" RETURNING " & columnNamesForModel(rec).join(",")
|
||||
|
||||
logQuery("createRecord", sqlStmt)
|
||||
debug(getLogger("query"), %*{ "values": mc.values })
|
||||
|
||||
let newRow = db.getRow(sql(sqlStmt), mc.values)
|
||||
|
||||
result = rowToModel(T, newRow)
|
||||
@@ -444,16 +421,7 @@ template findRecordsWhere*[D: DbConnType](
|
||||
"SELECT COUNT(*) FROM " & tableName(modelType) &
|
||||
" WHERE " & whereClause
|
||||
|
||||
if page.isSome:
|
||||
let p = page.get
|
||||
if p.orderBy.isSome:
|
||||
let orderByClause = p.orderBy.get.map(identNameToDb).join(",")
|
||||
fetchStmt &= " ORDER BY " & orderByClause
|
||||
else:
|
||||
fetchStmt &= " ORDER BY id"
|
||||
|
||||
fetchStmt &= " LIMIT " & $p.pageSize &
|
||||
" OFFSET " & $p.offset
|
||||
if page.isSome: fetchStmt &= getPagingClause(page.get)
|
||||
|
||||
logQuery("findRecordsWhere", fetchStmt, [("values", values.join(", "))])
|
||||
let records = db.getAllRows(sql(fetchStmt), values).mapIt(rowToModel(modelType, it))
|
||||
@@ -476,16 +444,7 @@ template getAllRecords*[D: DbConnType](
|
||||
|
||||
var countStmt = "SELECT COUNT(*) FROM " & tableName(modelType)
|
||||
|
||||
if page.isSome:
|
||||
let p = page.get
|
||||
if p.orderBy.isSome:
|
||||
let orderByClause = p.orderBy.get.map(identNameToDb).join(",")
|
||||
fetchStmt &= " ORDER BY " & orderByClause
|
||||
else:
|
||||
fetchStmt &= " ORDER BY id"
|
||||
|
||||
fetchStmt &= " LIMIT " & $p.pageSize &
|
||||
" OFFSET " & $p.offset
|
||||
if page.isSome: fetchStmt &= getPagingClause(page.get)
|
||||
|
||||
logQuery("getAllRecords", fetchStmt)
|
||||
let records = db.getAllRows(sql(fetchStmt)).mapIt(rowToModel(modelType, it))
|
||||
@@ -516,16 +475,7 @@ template findRecordsBy*[D: DbConnType](
|
||||
"SELECT COUNT(*) FROM " & tableName(modelType) &
|
||||
" WHERE " & whereClause
|
||||
|
||||
if page.isSome:
|
||||
let p = page.get
|
||||
if p.orderBy.isSome:
|
||||
let orderByClause = p.orderBy.get.map(identNameToDb).join(",")
|
||||
fetchStmt &= " ORDER BY " & orderByClause
|
||||
else:
|
||||
fetchStmt &= " ORDER BY id"
|
||||
|
||||
fetchStmt &= " LIMIT " & $p.pageSize &
|
||||
" OFFSET " & $p.offset
|
||||
if page.isSome: fetchStmt &= getPagingClause(page.get)
|
||||
|
||||
logQuery("findRecordsBy", fetchStmt, [("values", values.join(", "))])
|
||||
let records = db.getAllRows(sql(fetchStmt), values).mapIt(rowToModel(modelType, it))
|
||||
@@ -538,6 +488,91 @@ template findRecordsBy*[D: DbConnType](
|
||||
else: db.getRow(sql(countStmt), values)[0].parseInt)
|
||||
|
||||
|
||||
template associate*[D: DbConnType, I, J](
|
||||
db: D,
|
||||
joinTableName: string,
|
||||
rec1: I,
|
||||
rec2: J): void =
|
||||
## Associate two records via a join table.
|
||||
|
||||
let insertStmt =
|
||||
"INSERT INTO " & joinTableName &
|
||||
" (" & tableName(I) & "_id, " & tableName(J) & "_id) " &
|
||||
" VALUES (?, ?)"
|
||||
|
||||
logQuery("associate", insertStmt, [("id1", $rec1.id), ("id2", $rec2.id)])
|
||||
db.exec(sql(insertStmt), [$rec1.id, $rec2.id])
|
||||
|
||||
|
||||
template findViaJoinTable*[D: DbConnType, L](
|
||||
db: D,
|
||||
joinTableName: string,
|
||||
targetType: type,
|
||||
rec: L,
|
||||
page: Option[PaginationParams]): untyped =
|
||||
## Find all records of `targetType` that are associated with `rec` via a
|
||||
## join table.
|
||||
let columns = columnNamesForModel(targetType).mapIt("t." & it).join(",")
|
||||
|
||||
var fetchStmt =
|
||||
"SELECT " & columns &
|
||||
" FROM " & tableName(targetType) & " AS t " &
|
||||
" JOIN " & joinTableName & " AS j " &
|
||||
" ON t.id = jt." & tableName(targetType) & "_id " &
|
||||
" WHERE jt." & tableName(rec) & "_id = ?"
|
||||
|
||||
var countStmt =
|
||||
"SELECT COUNT(*) FROM " & joinTableName &
|
||||
" WHERE " & tableName(rec) & "_id = ?"
|
||||
|
||||
if page.isSome: fetchStmt &= getPagingClause(page.get)
|
||||
|
||||
logQuery("findViaJoinTable", fetchStmt, [("id", $rec.id)])
|
||||
let records = db.getAllRows(sql(fetchStmt), $rec.id)
|
||||
.mapIt(rowToModel(targetType, it))
|
||||
|
||||
PagedRecords[targetType](
|
||||
pagination: page,
|
||||
records: records,
|
||||
totalRecords:
|
||||
if page.isNone: records.len
|
||||
else: db.getRow(sql(countStmt))[0].parseInt)
|
||||
|
||||
template findViaJoinTable*[D: DbConnType](
|
||||
db: D,
|
||||
joinTableName: string,
|
||||
targetType: type,
|
||||
lookupType: type,
|
||||
id: typed,
|
||||
page: Option[PaginationParams]): untyped =
|
||||
## Find all records of `targetType` that are associated with a record of
|
||||
## `lookupType` via a join table.
|
||||
let columns = columnNamesForModel(targetType).mapIt("t." & it).join(",")
|
||||
|
||||
var fetchStmt =
|
||||
"SELECT " & columns &
|
||||
" FROM " & tableName(targetType) & " AS t " &
|
||||
" JOIN " & joinTableName & " AS j " &
|
||||
" ON t.id = jt." & tableName(targetType) & "_id " &
|
||||
" WHERE jt." & tableName(lookupType) & "_id = ?"
|
||||
|
||||
var countStmt =
|
||||
"SELECT COUNT(*) FROM " & joinTableName &
|
||||
" WHERE " & tableName(lookupType) & "_id = ?"
|
||||
|
||||
if page.isSome: fetchStmt &= getPagingClause(page.get)
|
||||
|
||||
logQuery("findViaJoinTable", fetchStmt, [("id", $id)])
|
||||
let records = db.getAllRows(sql(fetchStmt), $id)
|
||||
.mapIt(rowToModel(targetType, it))
|
||||
|
||||
PagedRecords[targetType](
|
||||
pagination: page,
|
||||
records: records,
|
||||
totalRecords:
|
||||
if page.isNone: records.len
|
||||
else: db.getRow(sql(countStmt))[0].parseInt)
|
||||
|
||||
macro generateProcsForModels*(dbType: type, modelTypes: openarray[type]): untyped =
|
||||
## Generate all standard access procedures for the given model types. For a
|
||||
## `model class`_ named `TodoItem`, this will generate the following
|
||||
@@ -694,34 +729,102 @@ macro generateProcsForFieldLookups*(dbType: type, modelsAndFields: openarray[tup
|
||||
|
||||
result.add procDefAST
|
||||
|
||||
proc initPool*[D: DbConnType](
|
||||
connect: proc(): D,
|
||||
poolSize = 10,
|
||||
hardCap = false,
|
||||
healthCheckQuery = "SELECT 'true' AS alive"): DbConnPool[D] =
|
||||
macro generateJoinTableProcs*(
|
||||
dbType, model1Type, model2Type: type,
|
||||
joinTableName: string): untyped =
|
||||
## Generate lookup procedures for a pair of models with a join table. For
|
||||
## example, given the TODO database demonstrated above, where `TodoItem` and
|
||||
## `TimeEntry` have a many-to-many relationship, you might have a join table
|
||||
## `todo_items_time_entries` with columns `todo_item_id` and `time_entry_id`.
|
||||
## This macro will generate the following procedures:
|
||||
##
|
||||
## .. code-block:: Nim
|
||||
## proc findTodoItemsByTimeEntry*(db: SampleDB, timeEntry: TimeEntry): seq[TodoItem]
|
||||
## proc findTimeEntriesByTodoItem*(db: SampleDB, todoItem: TodoItem): seq[TimeEntry]
|
||||
##
|
||||
## `dbType` is expected to be some type that has a defined `withConnection`
|
||||
## procedure (see `Database Object`_ for details).
|
||||
##
|
||||
## .. _Database Object: #database-object
|
||||
result = newStmtList()
|
||||
|
||||
## Initialize a new DbConnPool. See the `initDb` procedure in the `Example
|
||||
## Fiber ORM Usage`_ for an example
|
||||
##
|
||||
## * `connect` must be a factory which creates a new `DbConn`.
|
||||
## * `poolSize` sets the desired capacity of the connection pool.
|
||||
## * `hardCap` defaults to `false`.
|
||||
## When `false`, the pool can grow beyond the configured capacity, but will
|
||||
## release connections down to the its capacity (no less than `poolSize`).
|
||||
##
|
||||
## When `true` the pool will not create more than its configured capacity.
|
||||
## It a connection is requested, none are free, and the pool is at
|
||||
## capacity, this will result in an Error being raised.
|
||||
## * `healthCheckQuery` should be a simple and fast SQL query that the pool
|
||||
## can use to test the liveliness of pooled connections.
|
||||
##
|
||||
## .. _Example Fiber ORM Usage: #basic-usage-example-fiber-orm-usage
|
||||
if model1Type.getType[1].typeKind == ntyRef or
|
||||
model2Type.getType[1].typeKind == ntyRef:
|
||||
raise newException(ValueError,
|
||||
"fiber_orm model object must be objects, not refs")
|
||||
|
||||
initDbConnPool(DbConnPoolConfig[D](
|
||||
connect: connect,
|
||||
poolSize: poolSize,
|
||||
hardCap: hardCap,
|
||||
healthCheckQuery: healthCheckQuery))
|
||||
let model1Name = $(model1Type.getType[1])
|
||||
let model2Name = $(model2Type.getType[1])
|
||||
let getModel1Name = ident("get" & pluralize(model1Name) & "By" & model2Name)
|
||||
let getModel2Name = ident("get" & pluralize(model2Name) & "By" & model1Name)
|
||||
let id1Type = typeOfColumn(model1Type, "id")
|
||||
let id2Type = typeOfColumn(model2Type, "id")
|
||||
let joinTableNameNode = newStrLitNode($joinTableName)
|
||||
|
||||
result.add quote do:
|
||||
proc `getModel1Name`*(
|
||||
db: `dbType`,
|
||||
id: `id2Type`,
|
||||
pagination = none[PaginationParams]()): PagedRecords[`model1Type`] =
|
||||
db.withConnection conn:
|
||||
result = findViaJoinTable(
|
||||
conn,
|
||||
`joinTableNameNode`,
|
||||
`model1Type`,
|
||||
`model2Type`,
|
||||
id,
|
||||
pagination)
|
||||
|
||||
proc `getModel1Name`*(
|
||||
db: `dbType`,
|
||||
rec: `model2Type`,
|
||||
pagination = none[PaginationParams]()): PagedRecords[`model1Type`] =
|
||||
db.withConnection conn:
|
||||
result = findViaJoinTable(
|
||||
conn,
|
||||
`joinTableNameNode`,
|
||||
`model1Type`,
|
||||
rec,
|
||||
pagination)
|
||||
|
||||
proc `getModel2Name`*(
|
||||
db: `dbType`,
|
||||
id: `id1Type`,
|
||||
pagination = none[PaginationParams]()): Pagedrecords[`model2Type`] =
|
||||
db.withConnection conn:
|
||||
result = findViaJoinTable(
|
||||
conn,
|
||||
`joinTableNameNode`,
|
||||
`model2Type`,
|
||||
`model1Type`,
|
||||
id,
|
||||
pagination)
|
||||
|
||||
proc `getModel2Name`*(
|
||||
db: `dbType`,
|
||||
rec: `model1Type`,
|
||||
pagination = none[PaginationParams]()): Pagedrecords[`model2Type`] =
|
||||
db.withConnection conn:
|
||||
result = findViaJoinTable(
|
||||
conn,
|
||||
`joinTableNameNode`,
|
||||
`model2Type`,
|
||||
rec,
|
||||
pagination)
|
||||
|
||||
proc associate*(
|
||||
db: `dbType`,
|
||||
rec1: `model1Type`,
|
||||
rec2: `model2Type`): void =
|
||||
db.withConnection conn:
|
||||
associate(conn, `joinTableNameNode`, rec1, rec2)
|
||||
|
||||
proc associate*(
|
||||
db: `dbType`,
|
||||
rec2: `model2Type`,
|
||||
rec1: `model1Type`): void =
|
||||
db.withConnection conn:
|
||||
associate(conn, `joinTableNameNode`, rec1, rec2)
|
||||
|
||||
template inTransaction*(db, body: untyped) =
|
||||
db.withConnection conn:
|
||||
|
@@ -4,85 +4,77 @@
|
||||
|
||||
## Simple database connection pooling implementation compatible with Fiber ORM.
|
||||
|
||||
import std/[sequtils, strutils, sugar]
|
||||
when (NimMajor, NimMinor, NimPatch) < (2, 0, 0):
|
||||
when not defined(gcArc) and not defined (gcOrc):
|
||||
{.error: "fiber_orm requires either --mm:arc or --mm:orc.".}
|
||||
|
||||
import std/[deques, locks, sequtils, sugar]
|
||||
import db_connector/db_common
|
||||
|
||||
from db_connector/db_sqlite import getRow, close
|
||||
from db_connector/db_postgres import getRow, close
|
||||
|
||||
import ./db_common as fiber_db_common
|
||||
import ./private/logging
|
||||
|
||||
type
|
||||
DbConnPoolConfig*[D: DbConnType] = object
|
||||
connect*: () -> D ## Factory procedure to create a new DBConn
|
||||
poolSize*: int ## The pool capacity.
|
||||
DbConnPool*[D: DbConnType] = ptr DbConnPoolObj[D]
|
||||
|
||||
hardCap*: bool ## Is the pool capacity a hard cap?
|
||||
##
|
||||
## When `false`, the pool can grow beyond the
|
||||
## configured capacity, but will release connections
|
||||
## down to the its capacity (no less than `poolSize`).
|
||||
##
|
||||
## When `true` the pool will not create more than its
|
||||
## configured capacity. It a connection is requested,
|
||||
## none are free, and the pool is at capacity, this
|
||||
## will result in an Error being raised.
|
||||
|
||||
healthCheckQuery*: string ## Should be a simple and fast SQL query that the
|
||||
## pool can use to test the liveliness of pooled
|
||||
## connections.
|
||||
|
||||
PooledDbConn[D: DbConnType] = ref object
|
||||
conn: D
|
||||
id: int
|
||||
free: bool
|
||||
|
||||
DbConnPool*[D: DbConnType] = ref object
|
||||
DbConnPoolObj[D: DbConnType] = object
|
||||
## Database connection pool
|
||||
conns: seq[PooledDbConn[D]]
|
||||
cfg: DbConnPoolConfig[D]
|
||||
lastId: int
|
||||
connect: proc (): D {.raises: [DbError].}
|
||||
healthCheckQuery: SqlQuery
|
||||
entries: Deque[D]
|
||||
cond: Cond
|
||||
lock: Lock
|
||||
|
||||
proc initDbConnPool*[D: DbConnType](cfg: DbConnPoolConfig[D]): DbConnPool[D] =
|
||||
result = DbConnPool[D](
|
||||
conns: @[],
|
||||
cfg: cfg)
|
||||
|
||||
proc newConn[D: DbConnType](pool: DbConnPool[D]): PooledDbConn[D] =
|
||||
pool.lastId += 1
|
||||
{.gcsafe.}:
|
||||
let conn = pool.cfg.connect()
|
||||
result = PooledDbConn[D](
|
||||
conn: conn,
|
||||
id: pool.lastId,
|
||||
free: true)
|
||||
pool.conns.add(result)
|
||||
proc close*[D: DbConnType](pool: DbConnPool[D]) =
|
||||
## Safely close all connections and release resources for the given pool.
|
||||
getLogger("pool").debug("closing connection pool")
|
||||
withLock(pool.lock):
|
||||
while pool.entries.len > 0: close(pool.entries.popFirst())
|
||||
|
||||
proc maintain[D: DbConnType](pool: DbConnPool[D]): void =
|
||||
pool.conns.keepIf(proc (pc: PooledDbConn[D]): bool =
|
||||
if not pc.free: return true
|
||||
deinitLock(pool.lock)
|
||||
deinitCond(pool.cond)
|
||||
`=destroy`(pool[])
|
||||
deallocShared(pool)
|
||||
|
||||
try:
|
||||
discard getRow(pc.conn, sql(pool.cfg.healthCheckQuery), [])
|
||||
return true
|
||||
except:
|
||||
try: pc.conn.close() # try to close the connection
|
||||
except: discard ""
|
||||
return false
|
||||
)
|
||||
|
||||
let freeConns = pool.conns.filterIt(it.free)
|
||||
if pool.conns.len > pool.cfg.poolSize and freeConns.len > 0:
|
||||
let numToCull = min(freeConns.len, pool.conns.len - pool.cfg.poolSize)
|
||||
proc newDbConnPool*[D: DbConnType](
|
||||
poolSize: int,
|
||||
connectFunc: proc(): D {.raises: [DbError].},
|
||||
healthCheckQuery = "SELECT 1;"): DbConnPool[D] =
|
||||
## Initialize a new DbConnPool. See the `initDb` procedure in the `Example
|
||||
## Fiber ORM Usage`_ for an example
|
||||
##
|
||||
## * `connect` must be a factory which creates a new `DbConn`.
|
||||
## * `poolSize` sets the desired capacity of the connection pool.
|
||||
## * `healthCheckQuery` should be a simple and fast SQL query that the pool
|
||||
## can use to test the liveliness of pooled connections. By default it uses
|
||||
## `SELECT 1;`
|
||||
##
|
||||
## .. _Example Fiber ORM Usage: ../fiber_orm.html#basic-usage-example-fiber-orm-usage
|
||||
|
||||
if numToCull > 0:
|
||||
let toCull = freeConns[0..numToCull]
|
||||
pool.conns.keepIf((pc) => toCull.allIt(it.id != pc.id))
|
||||
for culled in toCull:
|
||||
try: culled.conn.close()
|
||||
except: discard ""
|
||||
result = cast[DbConnPool[D]](allocShared0(sizeof(DbConnPoolObj[D])))
|
||||
initCond(result.cond)
|
||||
initLock(result.lock)
|
||||
result.entries = initDeque[D](poolSize)
|
||||
result.connect = connectFunc
|
||||
result.healthCheckQuery = sql(healthCheckQuery)
|
||||
|
||||
proc take*[D: DbConnType](pool: DbConnPool[D]): tuple[id: int, conn: D] =
|
||||
try:
|
||||
for _ in 0 ..< poolSize: result.entries.addLast(connectFunc())
|
||||
except DbError as ex:
|
||||
try: result.close()
|
||||
except: discard
|
||||
getLogger("pool").error(
|
||||
msg = "unable to initialize connection pool",
|
||||
err = ex)
|
||||
raise ex
|
||||
|
||||
|
||||
proc take*[D: DbConnType](pool: DbConnPool[D]): D {.raises: [DbError], gcsafe.} =
|
||||
## Request a connection from the pool. Returns a DbConn if the pool has free
|
||||
## connections, or if it has the capacity to create a new connection. If the
|
||||
## pool is configured with a hard capacity limit and is out of free
|
||||
@@ -90,25 +82,33 @@ proc take*[D: DbConnType](pool: DbConnPool[D]): tuple[id: int, conn: D] =
|
||||
##
|
||||
## Connections taken must be returned via `release` when the caller is
|
||||
## finished using them in order for them to be released back to the pool.
|
||||
pool.maintain
|
||||
let freeConns = pool.conns.filterIt(it.free)
|
||||
withLock(pool.lock):
|
||||
while pool.entries.len == 0: wait(pool.cond, pool.lock)
|
||||
result = pool.entries.popFirst()
|
||||
|
||||
let reserved =
|
||||
if freeConns.len > 0: freeConns[0]
|
||||
else: pool.newConn()
|
||||
# check that the connection is healthy
|
||||
try: discard getRow(result, pool.healthCheckQuery, [])
|
||||
except DbError:
|
||||
{.gcsafe.}:
|
||||
# if it's not, let's try to close it and create a new connection
|
||||
try:
|
||||
getLogger("pool").info(
|
||||
"pooled connection failed health check, opening a new connection")
|
||||
close(result)
|
||||
except: discard
|
||||
result = pool.connect()
|
||||
|
||||
reserved.free = false
|
||||
return (id: reserved.id, conn: reserved.conn)
|
||||
|
||||
proc release*[D: DbConnType](pool: DbConnPool[D], connId: int): void =
|
||||
proc release*[D: DbConnType](pool: DbConnPool[D], conn: D) {.raises: [], gcsafe.} =
|
||||
## Release a connection back to the pool.
|
||||
let foundConn = pool.conns.filterIt(it.id == connId)
|
||||
if foundConn.len > 0: foundConn[0].free = true
|
||||
withLock(pool.lock):
|
||||
pool.entries.addLast(conn)
|
||||
signal(pool.cond)
|
||||
|
||||
template withConnection*[D: DbConnType](pool: DbConnPool[D], conn, stmt: untyped): untyped =
|
||||
## Convenience template to provide a connection from the pool for use in a
|
||||
## statement block, automatically releasing that connnection when done.
|
||||
block:
|
||||
let (connId, conn) = take(pool)
|
||||
let conn = take(pool)
|
||||
try: stmt
|
||||
finally: release(pool, connId)
|
||||
finally: release(pool, conn)
|
||||
|
34
src/fiber_orm/private/logging.nim
Normal file
34
src/fiber_orm/private/logging.nim
Normal file
@@ -0,0 +1,34 @@
|
||||
import std/[json, options]
|
||||
|
||||
import namespaced_logging
|
||||
|
||||
export namespaced_logging.log
|
||||
export namespaced_logging.debug
|
||||
export namespaced_logging.info
|
||||
export namespaced_logging.notice
|
||||
export namespaced_logging.warn
|
||||
export namespaced_logging.error
|
||||
export namespaced_logging.fatal
|
||||
|
||||
var logService {.threadvar.}: Option[ThreadLocalLogService]
|
||||
var logger {.threadvar.}: Option[Logger]
|
||||
|
||||
proc makeQueryLogEntry(
|
||||
m: string,
|
||||
sql: string,
|
||||
args: openArray[(string, string)] = []): JsonNode =
|
||||
result = %*{ "method": m, "sql": sql }
|
||||
for (k, v) in args: result[k] = %v
|
||||
|
||||
proc logQuery*(methodName: string, sqlStmt: string, args: openArray[(string, string)] = []) =
|
||||
# namespaced_logging would do this check for us, but we don't want to even
|
||||
# build the log object if we're not actually logging
|
||||
if logService.isNone: return
|
||||
if logger.isNone: logger = logService.getLogger("fiber_orm/query")
|
||||
logger.debug(makeQueryLogEntry(methodName, sqlStmt, args))
|
||||
|
||||
proc enableDbLogging*(svc: ThreadLocalLogService) =
|
||||
logService = some(svc)
|
||||
|
||||
proc getLogger*(scope: string): Option[Logger] =
|
||||
logService.getLogger("fiber_orm/" & scope)
|
@@ -9,6 +9,11 @@ import uuids
|
||||
import std/nre except toSeq
|
||||
|
||||
type
|
||||
PaginationParams* = object
|
||||
pageSize*: int
|
||||
offset*: int
|
||||
orderBy*: Option[seq[string]]
|
||||
|
||||
MutateClauses* = object
|
||||
## Data structure to hold information about the clauses that should be
|
||||
## added to a query. How these clauses are used will depend on the query.
|
||||
@@ -22,9 +27,11 @@ const ISO_8601_FORMATS = @[
|
||||
"yyyy-MM-dd'T'HH:mm:ssz",
|
||||
"yyyy-MM-dd'T'HH:mm:sszzz",
|
||||
"yyyy-MM-dd'T'HH:mm:ss'.'fffzzz",
|
||||
"yyyy-MM-dd'T'HH:mm:ss'.'ffffzzz",
|
||||
"yyyy-MM-dd HH:mm:ssz",
|
||||
"yyyy-MM-dd HH:mm:sszzz",
|
||||
"yyyy-MM-dd HH:mm:ss'.'fffzzz"
|
||||
"yyyy-MM-dd HH:mm:ss'.'fffzzz",
|
||||
"yyyy-MM-dd HH:mm:ss'.'ffffzzz"
|
||||
]
|
||||
|
||||
proc parseIso8601(val: string): DateTime =
|
||||
@@ -126,18 +133,20 @@ proc parsePGDatetime*(val: string): DateTime =
|
||||
|
||||
var correctedVal = val;
|
||||
|
||||
# PostgreSQL will truncate any trailing 0's in the millisecond value leading
|
||||
# to values like `2020-01-01 16:42.3+00`. This cannot currently be parsed by
|
||||
# the standard times format as it expects exactly three digits for
|
||||
# millisecond values. So we have to detect this and pad out the millisecond
|
||||
# value to 3 digits.
|
||||
let PG_PARTIAL_FORMAT_REGEX = re"(\d{4}-\d{2}-\d{2}( |'T')\d{2}:\d{2}:\d{2}\.)(\d{1,2})(\S+)?"
|
||||
# The Nim `times#format` function only recognizes 3-digit millisecond values
|
||||
# but PostgreSQL will sometimes send 1-2 digits, truncating any trailing 0's,
|
||||
# or sometimes provide more than three digits of preceision in the millisecond value leading
|
||||
# to values like `2020-01-01 16:42.3+00` or `2025-01-06 00:56:00.9007+00`.
|
||||
# This cannot currently be parsed by the standard times format as it expects
|
||||
# exactly three digits for millisecond values. So we have to detect this and
|
||||
# coerce the millisecond value to exactly 3 digits.
|
||||
let PG_PARTIAL_FORMAT_REGEX = re"(\d{4}-\d{2}-\d{2}( |'T')\d{2}:\d{2}:\d{2}\.)(\d+)(\S+)?"
|
||||
let match = val.match(PG_PARTIAL_FORMAT_REGEX)
|
||||
|
||||
if match.isSome:
|
||||
let c = match.get.captures
|
||||
if c.toSeq.len == 2: correctedVal = c[0] & alignLeft(c[2], 3, '0')
|
||||
else: correctedVal = c[0] & alignLeft(c[2], 3, '0') & c[3]
|
||||
if c.toSeq.len == 2: correctedVal = c[0] & alignLeft(c[2], 3, '0')[0..2]
|
||||
else: correctedVal = c[0] & alignLeft(c[2], 3, '0')[0..2] & c[3]
|
||||
|
||||
var errStr = ""
|
||||
|
||||
@@ -146,7 +155,7 @@ proc parsePGDatetime*(val: string): DateTime =
|
||||
try: return correctedVal.parse(df)
|
||||
except: errStr &= "\n\t" & getCurrentExceptionMsg()
|
||||
|
||||
raise newException(ValueError, "Cannot parse PG date. Tried:" & errStr)
|
||||
raise newException(ValueError, "Cannot parse PG date '" & correctedVal & "'. Tried:" & errStr)
|
||||
|
||||
proc parseDbArray*(val: string): seq[string] =
|
||||
## Parse a Postgres array column into a Nim seq[string]
|
||||
@@ -208,7 +217,7 @@ proc parseDbArray*(val: string): seq[string] =
|
||||
result.add(curStr)
|
||||
|
||||
func createParseStmt*(t, value: NimNode): NimNode =
|
||||
## Utility method to create the Nim cod required to parse a value coming from
|
||||
## Utility method to create the Nim code required to parse a value coming from
|
||||
## the a database query. This is used by functions like `rowToModel` to parse
|
||||
## the dataabase columns into the Nim object fields.
|
||||
|
||||
@@ -231,7 +240,7 @@ func createParseStmt*(t, value: NimNode): NimNode =
|
||||
elif t.getType == DateTime.getType:
|
||||
result = quote do: parsePGDatetime(`value`)
|
||||
|
||||
else: error "Unknown value object type: " & $t.getTypeInst
|
||||
else: error "Cannot parse column with unknown object type: " & $t.getTypeInst
|
||||
|
||||
elif t.typeKind == ntyGenericInst:
|
||||
|
||||
@@ -245,7 +254,7 @@ func createParseStmt*(t, value: NimNode): NimNode =
|
||||
if `value`.len == 0: none[`innerType`]()
|
||||
else: some(`parseStmt`)
|
||||
|
||||
else: error "Unknown generic instance type: " & $t.getTypeInst
|
||||
else: error "Cannot parse column with unknown generic instance type: " & $t.getTypeInst
|
||||
|
||||
elif t.typeKind == ntyRef:
|
||||
|
||||
@@ -253,7 +262,7 @@ func createParseStmt*(t, value: NimNode): NimNode =
|
||||
result = quote do: parseJson(`value`)
|
||||
|
||||
else:
|
||||
error "Unknown ref type: " & $t.getTypeInst
|
||||
error "Cannot parse column with unknown ref type: " & $t.getTypeInst
|
||||
|
||||
elif t.typeKind == ntySequence:
|
||||
let innerType = t[1]
|
||||
@@ -272,14 +281,14 @@ func createParseStmt*(t, value: NimNode): NimNode =
|
||||
result = quote do: parseFloat(`value`)
|
||||
|
||||
elif t.typeKind == ntyBool:
|
||||
result = quote do: "true".startsWith(`value`.toLower)
|
||||
result = quote do: "true".startsWith(`value`.toLower) or `value` == "1"
|
||||
|
||||
elif t.typeKind == ntyEnum:
|
||||
let innerType = t.getTypeInst
|
||||
result = quote do: parseEnum[`innerType`](`value`)
|
||||
|
||||
else:
|
||||
error "Unknown value type: " & $t.typeKind
|
||||
error "Cannot parse column with unknown value type: " & $t.typeKind
|
||||
|
||||
func fields(t: NimNode): seq[tuple[fieldIdent: NimNode, fieldType: NimNode]] =
|
||||
#[
|
||||
@@ -447,6 +456,19 @@ macro populateMutateClauses*(t: typed, newRecord: bool, mc: var MutateClauses):
|
||||
`mc`.placeholders.add("?")
|
||||
`mc`.values.add(dbFormat(`t`.`fieldIdent`))
|
||||
|
||||
|
||||
proc getPagingClause*(page: PaginationParams): string =
|
||||
## Given a `PaginationParams` object, return the SQL clause necessary to
|
||||
## limit the number of records returned by a query.
|
||||
result = ""
|
||||
if page.orderBy.isSome:
|
||||
let orderByClause = page.orderBy.get.map(identNameToDb).join(",")
|
||||
result &= " ORDER BY " & orderByClause
|
||||
else:
|
||||
result &= " ORDER BY id"
|
||||
|
||||
result &= " LIMIT " & $page.pageSize & " OFFSET " & $page.offset
|
||||
|
||||
## .. _model class: ../fiber_orm.html#objectminusrelational-modeling-model-class
|
||||
## .. _rules for name mapping: ../fiber_orm.html
|
||||
## .. _table name: ../fiber_orm.html
|
||||
|
Reference in New Issue
Block a user