Compare commits

..

No commits in common. "main" and "2.0.0" have entirely different histories.
main ... 2.0.0

6 changed files with 197 additions and 282 deletions

View File

@ -57,7 +57,7 @@ Models may be defined as:
.. code-block:: Nim
# models.nim
import std/[options, times]
import std/options, std/times
import uuids
type
@ -82,8 +82,6 @@ Using Fiber ORM we can generate a data access layer with:
.. code-block:: Nim
# db.nim
import std/[options]
import db_connectors/db_postgres
import fiber_orm
import ./models.nim
@ -104,7 +102,6 @@ This will generate the following procedures:
.. code-block:: Nim
proc getTodoItem*(db: TodoDB, id: UUID): TodoItem;
proc getTodoItemIfItExists*(db: TodoDB, id: UUID): Option[TodoItem];
proc getAllTodoItems*(db: TodoDB): seq[TodoItem];
proc createTodoItem*(db: TodoDB, rec: TodoItem): TodoItem;
proc updateTodoItem*(db: TodoDB, rec: TodoItem): bool;
@ -115,7 +112,6 @@ This will generate the following procedures:
values: varargs[string, dbFormat]): seq[TodoItem];
proc getTimeEntry*(db: TodoDB, id: UUID): TimeEntry;
proc getTimeEntryIfItExists*(db: TodoDB, id: UUID): Option[TimeEntry];
proc getAllTimeEntries*(db: TodoDB): seq[TimeEntry];
proc createTimeEntry*(db: TodoDB, rec: TimeEntry): TimeEntry;
proc updateTimeEntry*(db: TodoDB, rec: TimeEntry): bool;

View File

@ -1,6 +1,6 @@
# Package
version = "3.1.1"
version = "2.0.0"
author = "Jonathan Bernard"
description = "Lightweight Postgres ORM for Nim."
license = "GPL-3.0"
@ -11,4 +11,4 @@ srcDir = "src"
# Dependencies
requires @["nim >= 1.4.0", "uuids"]
requires "namespaced_logging >= 1.0.0"
requires "https://git.jdb-software.com/jdb/nim-namespaced-logging.git"

View File

@ -1,6 +1,6 @@
# Fiber ORM
#
# Copyright 2019-2024 Jonathan Bernard <jonathan@jdbernard.com>
# Copyright 2019 Jonathan Bernard <jonathan@jdbernard.com>
## Lightweight ORM supporting the `Postgres`_ and `SQLite`_ databases in Nim.
## It supports a simple, opinionated model mapper to generate SQL queries based
@ -107,7 +107,6 @@
##
## proc createTodoItem*(db: TodoDB, rec: TodoItem): TodoItem;
## proc updateTodoItem*(db: TodoDB, rec: TodoItem): bool;
## proc createOrUpdateTodoItem*(db: TodoDB, rec: TodoItem): bool;
## proc deleteTodoItem*(db: TodoDB, rec: TodoItem): bool;
## proc deleteTodoItem*(db: TodoDB, id: UUID): bool;
##
@ -264,64 +263,62 @@
## In the example above the `pool.DbConnPool`_ object is used as database
## object type (aliased as `TodoDB`). This is the intended usage pattern, but
## anything can be passed as the database object type so long as there is a
## defined `withConnection` template that provides a `conn: DbConn` object
## defined `withConn` template that provides an injected `conn: DbConn` object
## to the provided statement body.
##
## For example, a valid database object implementation that opens a new
## connection for every request might look like this:
##
## .. code-block:: Nim
## import db_connector/db_postgres
## import std/db_postgres
##
## type TodoDB* = object
## connString: string
##
## template withConnection*(db: TodoDB, stmt: untyped): untyped =
## block:
## let conn = open("", "", "", db.connString)
## try: stmt
## finally: close(conn)
## template withConn*(db: TodoDB, stmt: untyped): untyped =
## let conn {.inject.} = open("", "", "", db.connString)
## try: stmt
## finally: close(conn)
##
## .. _pool.DbConnPool: fiber_orm/pool.html#DbConnPool
##
import std/[json, macros, options, sequtils, strutils]
import db_connector/db_common
import std/db_postgres, std/macros, std/options, std/sequtils, std/strutils
import namespaced_logging, uuids
from std/unicode import capitalize
import ./fiber_orm/db_common as fiber_db_common
import ./fiber_orm/pool
import ./fiber_orm/util
export pool, util
export
pool,
util.columnNamesForModel,
util.dbFormat,
util.dbNameToIdent,
util.identNameToDb,
util.modelName,
util.rowToModel,
util.tableName
type
PaginationParams* = object
pageSize*: int
offset*: int
orderBy*: Option[string]
PagedRecords*[T] = object
pagination*: Option[PaginationParams]
records*: seq[T]
totalRecords*: int
DbUpdateError* = object of CatchableError ##\
## Error types raised when a DB modification fails.
NotFoundError* = object of CatchableError ##\
## Error type raised when no record matches a given ID
var logService {.threadvar.}: Option[LogService]
var logger {.threadvar.}: Option[Logger]
var logNs {.threadvar.}: LoggingNamespace
proc logQuery*(methodName: string, sqlStmt: string, args: openArray[(string, string)] = []) =
# namespaced_logging would do this check for us, but we don't want to even
# build the log object if we're not actually logging
if logService.isNone: return
if logger.isNone: logger = logService.getLogger("fiber_orm/query")
var log = %*{ "method": methodName, "sql": sqlStmt }
for (k, v) in args: log[k] = %v
logger.debug(log)
proc enableDbLogging*(svc: LogService) =
logService = some(svc)
template log(): untyped =
if logNs.isNil: logNs = initLoggingNamespace(name = "fiber_orm", level = lvlNotice)
logNs
proc newMutateClauses(): MutateClauses =
return MutateClauses(
@ -329,7 +326,7 @@ proc newMutateClauses(): MutateClauses =
placeholders: @[],
values: @[])
proc createRecord*[D: DbConnType, T](db: D, rec: T): T =
proc createRecord*[T](db: DbConn, rec: T): T =
## Create a new record. `rec` is expected to be a `model class`_. The `id`
## field is only set if it is non-empty (see `ID Field`_ for details).
##
@ -347,12 +344,12 @@ proc createRecord*[D: DbConnType, T](db: D, rec: T): T =
" VALUES (" & mc.placeholders.join(",") & ") " &
" RETURNING " & columnNamesForModel(rec).join(",")
logQuery("createRecord", sqlStmt)
log().debug "createRecord: [" & sqlStmt & "]"
let newRow = db.getRow(sql(sqlStmt), mc.values)
result = rowToModel(T, newRow)
proc updateRecord*[D: DbConnType, T](db: D, rec: T): bool =
proc updateRecord*[T](db: DbConn, rec: T): bool =
## Update a record by id. `rec` is expected to be a `model class`_.
var mc = newMutateClauses()
populateMutateClauses(rec, false, mc)
@ -363,51 +360,33 @@ proc updateRecord*[D: DbConnType, T](db: D, rec: T): bool =
" SET " & setClause &
" WHERE id = ? "
logQuery("updateRecord", sqlStmt, [("id", $rec.id)])
log().debug "updateRecord: [" & sqlStmt & "] id: " & $rec.id
let numRowsUpdated = db.execAffectedRows(sql(sqlStmt), mc.values.concat(@[$rec.id]))
return numRowsUpdated > 0;
proc createOrUpdateRecord*[D: DbConnType, T](db: D, rec: T): T =
## Create or update a record. `rec` is expected to be a `model class`_. If
## the `id` field is unset, or if there is no existing record with the given
## id, a new record is inserted. Otherwise, the existing record is updated.
##
## Note that this does not perform partial updates, all fields are updated.
let findRecordStmt = "SELECT id FROM " & tableName(rec) & " WHERE id = ?"
logQuery("createOrUpdateRecord", findRecordStmt, [("id", $rec.id)])
let rows = db.getAllRows(sql(findRecordStmt), [$rec.id])
if rows.len == 0: result = createRecord(db, rec)
else:
result = rec
if not updateRecord(db, rec):
raise newException(DbUpdateError,
"unable to update " & modelName(rec) & " for id " & $rec.id)
template deleteRecord*[D: DbConnType](db: D, modelType: type, id: typed): untyped =
template deleteRecord*(db: DbConn, modelType: type, id: typed): untyped =
## Delete a record by id.
let sqlStmt = "DELETE FROM " & tableName(modelType) & " WHERE id = ?"
logQuery("deleteRecord", sqlStmt, [("id", $id)])
log().debug "deleteRecord: [" & sqlStmt & "] id: " & $id
db.tryExec(sql(sqlStmt), $id)
proc deleteRecord*[D: DbConnType, T](db: D, rec: T): bool =
proc deleteRecord*[T](db: DbConn, rec: T): bool =
## Delete a record by `id`_.
##
## .. _id: #model-class-id-field
let sqlStmt = "DELETE FROM " & tableName(rec) & " WHERE id = ?"
logQuery("deleteRecord", sqlStmt, [("id", $rec.id)])
log().debug "deleteRecord: [" & sqlStmt & "] id: " & $rec.id
return db.tryExec(sql(sqlStmt), $rec.id)
template getRecord*[D: DbConnType](db: D, modelType: type, id: typed): untyped =
template getRecord*(db: DbConn, modelType: type, id: typed): untyped =
## Fetch a record by id.
let sqlStmt =
"SELECT " & columnNamesForModel(modelType).join(",") &
" FROM " & tableName(modelType) &
" WHERE id = ?"
logQuery("getRecord", sqlStmt, [("id", $id)])
log().debug "getRecord: [" & sqlStmt & "] id: " & $id
let row = db.getRow(sql(sqlStmt), @[$id])
if allIt(row, it.len == 0):
@ -415,8 +394,8 @@ template getRecord*[D: DbConnType](db: D, modelType: type, id: typed): untyped =
rowToModel(modelType, row)
template findRecordsWhere*[D: DbConnType](
db: D,
template findRecordsWhere*(
db: DbConn,
modelType: type,
whereClause: string,
values: varargs[string, dbFormat],
@ -433,9 +412,17 @@ template findRecordsWhere*[D: DbConnType](
"SELECT COUNT(*) FROM " & tableName(modelType) &
" WHERE " & whereClause
if page.isSome: fetchStmt &= getPagingClause(page.get)
if page.isSome:
let p = page.get
if p.orderBy.isSome:
fetchStmt &= " ORDER BY " & p.orderBy.get
else:
fetchStmt &= " ORDER BY id"
logQuery("findRecordsWhere", fetchStmt, [("values", values.join(", "))])
fetchStmt &= " LIMIT " & $p.pageSize &
" OFFSET " & $p.offset
log().debug "findRecordsWhere: [" & fetchStmt & "] values: (" & values.join(", ") & ")"
let records = db.getAllRows(sql(fetchStmt), values).mapIt(rowToModel(modelType, it))
PagedRecords[modelType](
@ -445,8 +432,8 @@ template findRecordsWhere*[D: DbConnType](
if page.isNone: records.len
else: db.getRow(sql(countStmt), values)[0].parseInt)
template getAllRecords*[D: DbConnType](
db: D,
template getAllRecords*(
db: DbConn,
modelType: type,
page: Option[PaginationParams]): untyped =
## Fetch all records of the given type.
@ -456,9 +443,17 @@ template getAllRecords*[D: DbConnType](
var countStmt = "SELECT COUNT(*) FROM " & tableName(modelType)
if page.isSome: fetchStmt &= getPagingClause(page.get)
if page.isSome:
let p = page.get
if p.orderBy.isSome:
fetchStmt &= " ORDER BY " & p.orderBy.get
else:
fetchStmt &= " ORDER BY id"
logQuery("getAllRecords", fetchStmt)
fetchStmt &= " LIMIT " & $p.pageSize &
" OFFSET " & $p.offset
log().debug "getAllRecords: [" & fetchStmt & "]"
let records = db.getAllRows(sql(fetchStmt)).mapIt(rowToModel(modelType, it))
PagedRecords[modelType](
@ -469,8 +464,8 @@ template getAllRecords*[D: DbConnType](
else: db.getRow(sql(countStmt))[0].parseInt)
template findRecordsBy*[D: DbConnType](
db: D,
template findRecordsBy*(
db: DbConn,
modelType: type,
lookups: seq[tuple[field: string, value: string]],
page: Option[PaginationParams]): untyped =
@ -487,9 +482,17 @@ template findRecordsBy*[D: DbConnType](
"SELECT COUNT(*) FROM " & tableName(modelType) &
" WHERE " & whereClause
if page.isSome: fetchStmt &= getPagingClause(page.get)
if page.isSome:
let p = page.get
if p.orderBy.isSome:
fetchStmt &= " ORDER BY " & p.orderBy.get
else:
fetchStmt &= " ORDER BY id"
logQuery("findRecordsBy", fetchStmt, [("values", values.join(", "))])
fetchStmt &= " LIMIT " & $p.pageSize &
" OFFSET " & $p.offset
log().debug "findRecordsBy: [" & fetchStmt & "] values (" & values.join(", ") & ")"
let records = db.getAllRows(sql(fetchStmt), values).mapIt(rowToModel(modelType, it))
PagedRecords[modelType](
@ -512,66 +515,51 @@ macro generateProcsForModels*(dbType: type, modelTypes: openarray[type]): untype
## proc deleteTodoItem*(db: TodoDB, rec: TodoItem): bool;
## proc deleteTodoItem*(db: TodoDB, id: idType): bool;
## proc updateTodoItem*(db: TodoDB, rec: TodoItem): bool;
## proc createOrUpdateTodoItem*(db: TodoDB, rec: TodoItem): bool;
##
## proc findTodoItemsWhere*(
## db: TodoDB, whereClause: string, values: varargs[string]): TodoItem;
##
## `dbType` is expected to be some type that has a defined `withConnection`
## `dbType` is expected to be some type that has a defined `withConn`
## procedure (see `Database Object`_ for details).
##
## .. _Database Object: #database-object
result = newStmtList()
for t in modelTypes:
if t.getType[1].typeKind == ntyRef:
raise newException(ValueError,
"fiber_orm model object must be objects, not refs")
let modelName = $(t.getType[1])
let getName = ident("get" & modelName)
let getIfExistsName = ident("get" & modelName & "IfItExists")
let getAllName = ident("getAll" & pluralize(modelName))
let findWhereName = ident("find" & pluralize(modelName) & "Where")
let createName = ident("create" & modelName)
let updateName = ident("update" & modelName)
let createOrUpdateName = ident("createOrUpdate" & modelName)
let deleteName = ident("delete" & modelName)
let idType = typeOfColumn(t, "id")
result.add quote do:
proc `getName`*(db: `dbType`, id: `idType`): `t` =
db.withConnection conn: result = getRecord(conn, `t`, id)
proc `getIfExistsName`*(db: `dbType`, id: `idType`): Option[`t`] =
db.withConnection conn:
try: result = some(getRecord(conn, `t`, id))
except NotFoundError: result = none[`t`]()
db.withConn: result = getRecord(conn, `t`, id)
proc `getAllName`*(db: `dbType`, pagination = none[PaginationParams]()): PagedRecords[`t`] =
db.withConnection conn: result = getAllRecords(conn, `t`, pagination)
db.withConn: result = getAllRecords(conn, `t`, pagination)
proc `findWhereName`*(
db: `dbType`,
whereClause: string,
values: varargs[string, dbFormat],
pagination = none[PaginationParams]()): PagedRecords[`t`] =
db.withConnection conn:
db.withConn:
result = findRecordsWhere(conn, `t`, whereClause, values, pagination)
proc `createName`*(db: `dbType`, rec: `t`): `t` =
db.withConnection conn: result = createRecord(conn, rec)
db.withConn: result = createRecord(conn, rec)
proc `updateName`*(db: `dbType`, rec: `t`): bool =
db.withConnection conn: result = updateRecord(conn, rec)
proc `createOrUpdateName`*(db: `dbType`, rec: `t`): `t` =
db.inTransaction: result = createOrUpdateRecord(conn, rec)
db.withConn: result = updateRecord(conn, rec)
proc `deleteName`*(db: `dbType`, rec: `t`): bool =
db.withConnection conn: result = deleteRecord(conn, rec)
db.withConn: result = deleteRecord(conn, rec)
proc `deleteName`*(db: `dbType`, id: `idType`): bool =
db.withConnection conn: result = deleteRecord(conn, `t`, id)
db.withConn: result = deleteRecord(conn, `t`, id)
macro generateLookup*(dbType: type, modelType: type, fields: seq[string]): untyped =
## Create a lookup procedure for a given set of field names. For example,
@ -591,7 +579,7 @@ macro generateLookup*(dbType: type, modelType: type, fields: seq[string]): untyp
# Create proc skeleton
result = quote do:
proc `procName`*(db: `dbType`): PagedRecords[`modelType`] =
db.withConnection conn: result = findRecordsBy(conn, `modelType`)
db.withConn: result = findRecordsBy(conn, `modelType`)
var callParams = quote do: @[]
@ -615,11 +603,11 @@ macro generateLookup*(dbType: type, modelType: type, fields: seq[string]): untyp
# Add the call params to the inner procedure call
# result[6][0][1][0][1] is
# ProcDef -> [6]: StmtList (body) -> [0]: Command ->
# [2]: StmtList (withConnection body) -> [0]: Asgn (result =) ->
# ProcDef -> [6]: StmtList (body) -> [0]: Call ->
# [1]: StmtList (withConn body) -> [0]: Asgn (result =) ->
# [1]: Call (inner findRecords invocation)
result[6][0][2][0][1].add(callParams)
result[6][0][2][0][1].add(quote do: pagination)
result[6][0][1][0][1].add(callParams)
result[6][0][1][0][1].add(quote do: pagination)
macro generateProcsForFieldLookups*(dbType: type, modelsAndFields: openarray[tuple[t: type, fields: seq[string]]]): untyped =
result = newStmtList()
@ -633,7 +621,7 @@ macro generateProcsForFieldLookups*(dbType: type, modelsAndFields: openarray[tup
# Create proc skeleton
let procDefAST = quote do:
proc `procName`*(db: `dbType`): PagedRecords[`modelType`] =
db.withConnection conn: result = findRecordsBy(conn, `modelType`)
db.withConn: result = findRecordsBy(conn, `modelType`)
var callParams = quote do: @[]
@ -656,12 +644,11 @@ macro generateProcsForFieldLookups*(dbType: type, modelsAndFields: openarray[tup
result.add procDefAST
proc initPool*[D: DbConnType](
connect: proc(): D,
proc initPool*(
connect: proc(): DbConn,
poolSize = 10,
hardCap = false,
healthCheckQuery = "SELECT 'true' AS alive"): DbConnPool[D] =
healthCheckQuery = "SELECT 'true' AS alive"): DbConnPool =
## Initialize a new DbConnPool. See the `initDb` procedure in the `Example
## Fiber ORM Usage`_ for an example
##
@ -679,14 +666,14 @@ proc initPool*[D: DbConnType](
##
## .. _Example Fiber ORM Usage: #basic-usage-example-fiber-orm-usage
initDbConnPool(DbConnPoolConfig[D](
initDbConnPool(DbConnPoolConfig(
connect: connect,
poolSize: poolSize,
hardCap: hardCap,
healthCheckQuery: healthCheckQuery))
template inTransaction*(db, body: untyped) =
db.withConnection conn:
template inTransaction*(db: DbConnPool, body: untyped) =
pool.withConn(db):
conn.exec(sql"BEGIN TRANSACTION")
try:
body

View File

@ -1,3 +0,0 @@
import db_connector/[db_postgres, db_sqlite]
type DbConnType* = db_postgres.DbConn or db_sqlite.DbConn

View File

@ -4,62 +4,65 @@
## Simple database connection pooling implementation compatible with Fiber ORM.
import std/[sequtils, strutils, sugar]
import db_connector/db_common
import std/db_postgres, std/sequtils, std/strutils, std/sugar
from db_connector/db_sqlite import getRow, close
from db_connector/db_postgres import getRow, close
import namespaced_logging
import ./db_common as fiber_db_common
type
DbConnPoolConfig*[D: DbConnType] = object
connect*: () -> D ## Factory procedure to create a new DBConn
poolSize*: int ## The pool capacity.
hardCap*: bool ## Is the pool capacity a hard cap?
##
## When `false`, the pool can grow beyond the
## configured capacity, but will release connections
## down to the its capacity (no less than `poolSize`).
##
## When `true` the pool will not create more than its
## configured capacity. It a connection is requested,
## none are free, and the pool is at capacity, this
## will result in an Error being raised.
DbConnPoolConfig* = object
connect*: () -> DbConn ## Factory procedure to create a new DBConn
poolSize*: int ## The pool capacity.
hardCap*: bool ## Is the pool capacity a hard cap?
##
## When `false`, the pool can grow beyond the configured
## capacity, but will release connections down to the its
## capacity (no less than `poolSize`).
##
## When `true` the pool will not create more than its
## configured capacity. It a connection is requested, none
## are free, and the pool is at capacity, this will result
## in an Error being raised.
healthCheckQuery*: string ## Should be a simple and fast SQL query that the
## pool can use to test the liveliness of pooled
## connections.
PooledDbConn[D: DbConnType] = ref object
conn: D
PooledDbConn = ref object
conn: DbConn
id: int
free: bool
DbConnPool*[D: DbConnType] = ref object
DbConnPool* = ref object
## Database connection pool
conns: seq[PooledDbConn[D]]
cfg: DbConnPoolConfig[D]
conns: seq[PooledDbConn]
cfg: DbConnPoolConfig
lastId: int
proc initDbConnPool*[D: DbConnType](cfg: DbConnPoolConfig[D]): DbConnPool[D] =
result = DbConnPool[D](
var logNs {.threadvar.}: LoggingNamespace
template log(): untyped =
if logNs.isNil: logNs = initLoggingNamespace(name = "fiber_orm/pool", level = lvlNotice)
logNs
proc initDbConnPool*(cfg: DbConnPoolConfig): DbConnPool =
log().debug("Initializing new pool (size: " & $cfg.poolSize)
result = DbConnPool(
conns: @[],
cfg: cfg)
proc newConn[D: DbConnType](pool: DbConnPool[D]): PooledDbConn[D] =
proc newConn(pool: DbConnPool): PooledDbConn =
log().debug("Creating a new connection to add to the pool.")
pool.lastId += 1
{.gcsafe.}:
let conn = pool.cfg.connect()
result = PooledDbConn[D](
conn: conn,
id: pool.lastId,
free: true)
pool.conns.add(result)
let conn = pool.cfg.connect()
result = PooledDbConn(
conn: conn,
id: pool.lastId,
free: true)
pool.conns.add(result)
proc maintain[D: DbConnType](pool: DbConnPool[D]): void =
pool.conns.keepIf(proc (pc: PooledDbConn[D]): bool =
proc maintain(pool: DbConnPool): void =
log().debug("Maintaining pool. $# connections." % [$pool.conns.len])
pool.conns.keepIf(proc (pc: PooledDbConn): bool =
if not pc.free: return true
try:
@ -70,6 +73,9 @@ proc maintain[D: DbConnType](pool: DbConnPool[D]): void =
except: discard ""
return false
)
log().debug(
"Pruned dead connections. $# connections remaining." %
[$pool.conns.len])
let freeConns = pool.conns.filterIt(it.free)
if pool.conns.len > pool.cfg.poolSize and freeConns.len > 0:
@ -81,8 +87,11 @@ proc maintain[D: DbConnType](pool: DbConnPool[D]): void =
for culled in toCull:
try: culled.conn.close()
except: discard ""
log().debug(
"Trimming pool size. Culled $# free connections. $# connections remaining." %
[$toCull.len, $pool.conns.len])
proc take*[D: DbConnType](pool: DbConnPool[D]): tuple[id: int, conn: D] =
proc take*(pool: DbConnPool): tuple[id: int, conn: DbConn] =
## Request a connection from the pool. Returns a DbConn if the pool has free
## connections, or if it has the capacity to create a new connection. If the
## pool is configured with a hard capacity limit and is out of free
@ -93,22 +102,29 @@ proc take*[D: DbConnType](pool: DbConnPool[D]): tuple[id: int, conn: D] =
pool.maintain
let freeConns = pool.conns.filterIt(it.free)
log().debug(
"Providing a new connection ($# currently free)." % [$freeConns.len])
let reserved =
if freeConns.len > 0: freeConns[0]
else: pool.newConn()
reserved.free = false
log().debug("Reserve connection $#" % [$reserved.id])
return (id: reserved.id, conn: reserved.conn)
proc release*[D: DbConnType](pool: DbConnPool[D], connId: int): void =
proc release*(pool: DbConnPool, connId: int): void =
## Release a connection back to the pool.
log().debug("Reclaiming released connaction $#" % [$connId])
let foundConn = pool.conns.filterIt(it.id == connId)
if foundConn.len > 0: foundConn[0].free = true
template withConnection*[D: DbConnType](pool: DbConnPool[D], conn, stmt: untyped): untyped =
template withConn*(pool: DbConnPool, stmt: untyped): untyped =
## Convenience template to provide a connection from the pool for use in a
## statement block, automatically releasing that connnection when done.
block:
let (connId, conn) = take(pool)
try: stmt
finally: release(pool, connId)
##
## The provided connection is injected as the variable `conn` in the
## statement body.
let (connId, conn {.inject.}) = take(pool)
try: stmt
finally: release(pool, connId)

View File

@ -3,17 +3,12 @@
# Copyright 2019 Jonathan Bernard <jonathan@jdbernard.com>
## Utility methods used internally by Fiber ORM.
import std/[json, macros, options, sequtils, strutils, times, unicode]
import uuids
import json, macros, options, sequtils, strutils, times, unicode,
uuids
import std/nre except toSeq
import nre except toSeq
type
PaginationParams* = object
pageSize*: int
offset*: int
orderBy*: Option[seq[string]]
MutateClauses* = object
## Data structure to hold information about the clauses that should be
## added to a query. How these clauses are used will depend on the query.
@ -27,11 +22,9 @@ const ISO_8601_FORMATS = @[
"yyyy-MM-dd'T'HH:mm:ssz",
"yyyy-MM-dd'T'HH:mm:sszzz",
"yyyy-MM-dd'T'HH:mm:ss'.'fffzzz",
"yyyy-MM-dd'T'HH:mm:ss'.'ffffzzz",
"yyyy-MM-dd HH:mm:ssz",
"yyyy-MM-dd HH:mm:sszzz",
"yyyy-MM-dd HH:mm:ss'.'fffzzz",
"yyyy-MM-dd HH:mm:ss'.'ffffzzz"
"yyyy-MM-dd HH:mm:ss'.'fffzzz"
]
proc parseIso8601(val: string): DateTime =
@ -109,7 +102,7 @@ proc dbFormat*[T](list: seq[T]): string =
proc dbFormat*[T](item: T): string =
## For all other types, fall back on a defined `$` function to create a
## string version of the value we can include in an SQL query.
## string version of the value we can include in an SQL query>
return $item
type DbArrayParseState = enum
@ -133,20 +126,18 @@ proc parsePGDatetime*(val: string): DateTime =
var correctedVal = val;
# The Nim `times#format` function only recognizes 3-digit millisecond values
# but PostgreSQL will sometimes send 1-2 digits, truncating any trailing 0's,
# or sometimes provide more than three digits of preceision in the millisecond value leading
# to values like `2020-01-01 16:42.3+00` or `2025-01-06 00:56:00.9007+00`.
# This cannot currently be parsed by the standard times format as it expects
# exactly three digits for millisecond values. So we have to detect this and
# coerce the millisecond value to exactly 3 digits.
let PG_PARTIAL_FORMAT_REGEX = re"(\d{4}-\d{2}-\d{2}( |'T')\d{2}:\d{2}:\d{2}\.)(\d+)(\S+)?"
# PostgreSQL will truncate any trailing 0's in the millisecond value leading
# to values like `2020-01-01 16:42.3+00`. This cannot currently be parsed by
# the standard times format as it expects exactly three digits for
# millisecond values. So we have to detect this and pad out the millisecond
# value to 3 digits.
let PG_PARTIAL_FORMAT_REGEX = re"(\d{4}-\d{2}-\d{2}( |'T')\d{2}:\d{2}:\d{2}\.)(\d{1,2})(\S+)?"
let match = val.match(PG_PARTIAL_FORMAT_REGEX)
if match.isSome:
let c = match.get.captures
if c.toSeq.len == 2: correctedVal = c[0] & alignLeft(c[2], 3, '0')[0..2]
else: correctedVal = c[0] & alignLeft(c[2], 3, '0')[0..2] & c[3]
if c.toSeq.len == 2: correctedVal = c[0] & alignLeft(c[2], 3, '0')
else: correctedVal = c[0] & alignLeft(c[2], 3, '0') & c[3]
var errStr = ""
@ -155,7 +146,7 @@ proc parsePGDatetime*(val: string): DateTime =
try: return correctedVal.parse(df)
except: errStr &= "\n\t" & getCurrentExceptionMsg()
raise newException(ValueError, "Cannot parse PG date '" & correctedVal & "'. Tried:" & errStr)
raise newException(ValueError, "Cannot parse PG date. Tried:" & errStr)
proc parseDbArray*(val: string): seq[string] =
## Parse a Postgres array column into a Nim seq[string]
@ -216,14 +207,21 @@ proc parseDbArray*(val: string): seq[string] =
if not (parseState == inQuote) and curStr.len > 0:
result.add(curStr)
func createParseStmt*(t, value: NimNode): NimNode =
proc createParseStmt*(t, value: NimNode): NimNode =
## Utility method to create the Nim cod required to parse a value coming from
## the a database query. This is used by functions like `rowToModel` to parse
## the dataabase columns into the Nim object fields.
#echo "Creating parse statment for ", t.treeRepr
if t.typeKind == ntyObject:
if t.getTypeInst == Option.getType:
if t.getType == UUID.getType:
result = quote do: parseUUID(`value`)
elif t.getType == DateTime.getType:
result = quote do: parsePGDatetime(`value`)
elif t.getTypeInst == Option.getType:
var innerType = t.getTypeImpl[2][0] # start at the first RecList
# If the value is a non-pointer type, there is another inner RecList
if innerType.kind == nnkRecList: innerType = innerType[0]
@ -234,28 +232,8 @@ func createParseStmt*(t, value: NimNode): NimNode =
if `value`.len == 0: none[`innerType`]()
else: some(`parseStmt`)
elif t.getType == UUID.getType:
result = quote do: parseUUID(`value`)
elif t.getType == DateTime.getType:
result = quote do: parsePGDatetime(`value`)
else: error "Unknown value object type: " & $t.getTypeInst
elif t.typeKind == ntyGenericInst:
if t.kind == nnkBracketExpr and
t.len > 0 and
t[0] == Option.getType:
var innerType = t.getTypeInst[1]
let parseStmt = createParseStmt(innerType, value)
result = quote do:
if `value`.len == 0: none[`innerType`]()
else: some(`parseStmt`)
else: error "Unknown generic instance type: " & $t.getTypeInst
elif t.typeKind == ntyRef:
if $t.getTypeInst == "JsonNode":
@ -290,72 +268,28 @@ func createParseStmt*(t, value: NimNode): NimNode =
else:
error "Unknown value type: " & $t.typeKind
func fields(t: NimNode): seq[tuple[fieldIdent: NimNode, fieldType: NimNode]] =
#[
debugEcho "T: " & t.treeRepr
debugEcho "T.kind: " & $t.kind
debugEcho "T.typeKind: " & $t.typeKind
debugEcho "T.GET_TYPE[1]: " & t.getType[1].treeRepr
debugEcho "T.GET_TYPE[1].kind: " & $t.getType[1].kind
debugEcho "T.GET_TYPE[1].typeKind: " & $t.getType[1].typeKind
debugEcho "T.GET_TYPE: " & t.getType.treeRepr
debugEcho "T.GET_TYPE[1].GET_TYPE: " & t.getType[1].getType.treeRepr
]#
# Get the object type AST, with base object (if present) and record list.
var objDefAst: NimNode
if t.typeKind == ntyObject: objDefAst = t.getType
elif t.typeKind == ntyTypeDesc:
# In this case we have a type AST that is like:
# BracketExpr
# Sym "typeDesc"
# Sym "ModelType"
objDefAst = t.
getType[1]. # get the Sym "ModelType"
getType # get the object definition type
if objDefAst.kind != nnkObjectTy:
error ("unable to enumerate the fields for model type '$#', " &
"tried to resolve the type of the provided symbol to an object " &
"definition (nnkObjectTy) but got a '$#'.\pAST:\p$#") % [
$t, $objDefAst.kind, objDefAst.treeRepr ]
else:
error ("unable to enumerate the fields for model type '$#', " &
"expected a symbol with type ntyTypeDesc but got a '$#'.\pAST:\p$#") % [
$t, $t.typeKind, t.treeRepr ]
# At this point objDefAst should look something like:
# ObjectTy
# Empty
# Sym "BaseObject"" | Empty
# RecList
# Sym "field1"
# Sym "field2"
# ...
if objDefAst[1].kind == nnkSym:
# We have a base class symbol, let's recurse and try and resolve the fields
# for the base class
for fieldDef in objDefAst[1].fields: result.add(fieldDef)
for fieldDef in objDefAst[2].children:
# objDefAst[2] is a RecList of
# ignore AST nodes that are not field definitions
if fieldDef.kind == nnkIdentDefs: result.add((fieldDef[0], fieldDef[1]))
elif fieldDef.kind == nnkSym: result.add((fieldDef, fieldDef.getTypeInst))
else: error "unknown object field definition AST: $#" % $fieldDef.kind
template walkFieldDefs*(t: NimNode, body: untyped) =
## Iterate over every field of the given Nim object, yielding and defining
## `fieldIdent` and `fieldType`, the name of the field as a Nim Ident node
## and the type of the field as a Nim Type node respectively.
for (fieldIdent {.inject.}, fieldType {.inject.}) in t.fields: body
let tTypeImpl = t.getTypeImpl
#[ TODO: replace walkFieldDefs with things like this:
func columnNamesForModel*(modelType: typedesc): seq[string] =
modelType.fields.mapIt(identNameToDb($it[0]))
]#
var nodeToItr: NimNode
if tTypeImpl.typeKind == ntyObject: nodeToItr = tTypeImpl[2]
elif tTypeImpl.typeKind == ntyTypeDesc: nodeToItr = tTypeImpl.getType[1].getType[2]
else: error $t & " is not an object or type desc (it's a " & $tTypeImpl.typeKind & ")."
for fieldDef {.inject.} in nodeToItr.children:
# ignore AST nodes that are not field definitions
if fieldDef.kind == nnkIdentDefs:
let fieldIdent {.inject.} = fieldDef[0]
let fieldType {.inject.} = fieldDef[1]
body
elif fieldDef.kind == nnkSym:
let fieldIdent {.inject.} = fieldDef
let fieldType {.inject.} = fieldDef.getType
body
macro columnNamesForModel*(modelType: typed): seq[string] =
## Return the column names corresponding to the the fields of the given
@ -383,7 +317,6 @@ macro rowToModel*(modelType: typed, row: seq[string]): untyped =
createParseStmt(fieldType, itemLookup)))
idx += 1
#[
macro listFields*(t: typed): untyped =
var fields: seq[tuple[n: string, t: string]] = @[]
t.walkFieldDefs:
@ -391,7 +324,6 @@ macro listFields*(t: typed): untyped =
else: fields.add((n: $fieldIdent, t: $fieldType))
result = newLit(fields)
]#
proc typeOfColumn*(modelType: NimNode, colName: string): NimNode =
## Given a model type and a column name, return the Nim type for that column.
@ -438,8 +370,8 @@ macro populateMutateClauses*(t: typed, newRecord: bool, mc: var MutateClauses):
# if we're looking at an optional field, add logic to check for presence
elif fieldType.kind == nnkBracketExpr and
fieldType.len > 0 and
fieldType[0] == Option.getType:
fieldType.len > 0 and
fieldType[0] == Option.getType:
result.add quote do:
`mc`.columns.add(identNameToDb(`fieldName`))
@ -456,19 +388,6 @@ macro populateMutateClauses*(t: typed, newRecord: bool, mc: var MutateClauses):
`mc`.placeholders.add("?")
`mc`.values.add(dbFormat(`t`.`fieldIdent`))
proc getPagingClause*(page: PaginationParams): string =
## Given a `PaginationParams` object, return the SQL clause necessary to
## limit the number of records returned by a query.
result = ""
if page.orderBy.isSome:
let orderByClause = page.orderBy.get.map(identNameToDb).join(",")
result &= " ORDER BY " & orderByClause
else:
result &= " ORDER BY id"
result &= " LIMIT " & $page.pageSize & " OFFSET " & $page.offset
## .. _model class: ../fiber_orm.html#objectminusrelational-modeling-model-class
## .. _rules for name mapping: ../fiber_orm.html
## .. _table name: ../fiber_orm.html