Compare commits

..

8 Commits
2.0.0 ... main

Author SHA1 Message Date
9d1cc4bbec Cache logger instance. 2025-01-20 06:39:02 -06:00
af44d48df1 Extract pagination logic into a common, exported function. Fix PG date parsing (again). 2025-01-10 20:25:49 -06:00
2030fd4490 Use namespaced_logging 1.x for logging (optionally). 2025-01-05 02:06:57 -06:00
0599d41061 Support Nim 2.x, compatibility with waterpark.
- Nim 2.x has moved the DB connectors outside the standard library to
  the `db_connector` package.
- Refactor the pooling implementation and macro expectations to use the
  `withConnection` name instead of `withConn`. This change allows a
  caller to use a [waterpark](https://github.com/guzba/waterpark) pool
  instance instead of the builtin pool instance. Waterpark provides
  better support for multi-threaded environments. The builtin pooling
  mechanism may be deprecated in favor of waterpark in the future.
- Add the `getModelIfItExists` generated proc to the list of standard
  procs we generate. This is a flavour of `getModel` that returns an
  `Option` instead of raising an exception when there is no model for
  the given id.
- Change `PaginationParams#orderBy` to accept a `seq[string]` to allow
  for sorting on multiple fields.
2025-01-03 07:55:05 -06:00
fb74d84cb7 Map names to db ident names for columns passed for ordering in paginated queries. 2023-08-09 09:16:10 -05:00
fbd20de71f Add createOrUpdateRecord and record method generators.
`createOrUpdateRecord` implements upsert: update an existing record if
it exists or create a new record if not. A new error `DbUpdateError` was
added to be raised when an existing record does exist but was not able
to be updated.
2023-08-09 09:13:12 -05:00
540d0d2f67 Fix missing import in pooling implementation. 2023-02-04 19:04:50 -06:00
a05555ee67 WIP - Initial stab at making it generic to support db_sqlite. 2022-11-03 16:38:14 -05:00
6 changed files with 282 additions and 197 deletions

View File

@ -57,7 +57,7 @@ Models may be defined as:
.. code-block:: Nim .. code-block:: Nim
# models.nim # models.nim
import std/options, std/times import std/[options, times]
import uuids import uuids
type type
@ -82,6 +82,8 @@ Using Fiber ORM we can generate a data access layer with:
.. code-block:: Nim .. code-block:: Nim
# db.nim # db.nim
import std/[options]
import db_connectors/db_postgres
import fiber_orm import fiber_orm
import ./models.nim import ./models.nim
@ -102,6 +104,7 @@ This will generate the following procedures:
.. code-block:: Nim .. code-block:: Nim
proc getTodoItem*(db: TodoDB, id: UUID): TodoItem; proc getTodoItem*(db: TodoDB, id: UUID): TodoItem;
proc getTodoItemIfItExists*(db: TodoDB, id: UUID): Option[TodoItem];
proc getAllTodoItems*(db: TodoDB): seq[TodoItem]; proc getAllTodoItems*(db: TodoDB): seq[TodoItem];
proc createTodoItem*(db: TodoDB, rec: TodoItem): TodoItem; proc createTodoItem*(db: TodoDB, rec: TodoItem): TodoItem;
proc updateTodoItem*(db: TodoDB, rec: TodoItem): bool; proc updateTodoItem*(db: TodoDB, rec: TodoItem): bool;
@ -112,6 +115,7 @@ This will generate the following procedures:
values: varargs[string, dbFormat]): seq[TodoItem]; values: varargs[string, dbFormat]): seq[TodoItem];
proc getTimeEntry*(db: TodoDB, id: UUID): TimeEntry; proc getTimeEntry*(db: TodoDB, id: UUID): TimeEntry;
proc getTimeEntryIfItExists*(db: TodoDB, id: UUID): Option[TimeEntry];
proc getAllTimeEntries*(db: TodoDB): seq[TimeEntry]; proc getAllTimeEntries*(db: TodoDB): seq[TimeEntry];
proc createTimeEntry*(db: TodoDB, rec: TimeEntry): TimeEntry; proc createTimeEntry*(db: TodoDB, rec: TimeEntry): TimeEntry;
proc updateTimeEntry*(db: TodoDB, rec: TimeEntry): bool; proc updateTimeEntry*(db: TodoDB, rec: TimeEntry): bool;

View File

@ -1,6 +1,6 @@
# Package # Package
version = "2.0.0" version = "3.1.1"
author = "Jonathan Bernard" author = "Jonathan Bernard"
description = "Lightweight Postgres ORM for Nim." description = "Lightweight Postgres ORM for Nim."
license = "GPL-3.0" license = "GPL-3.0"
@ -11,4 +11,4 @@ srcDir = "src"
# Dependencies # Dependencies
requires @["nim >= 1.4.0", "uuids"] requires @["nim >= 1.4.0", "uuids"]
requires "https://git.jdb-software.com/jdb/nim-namespaced-logging.git" requires "namespaced_logging >= 1.0.0"

View File

@ -1,6 +1,6 @@
# Fiber ORM # Fiber ORM
# #
# Copyright 2019 Jonathan Bernard <jonathan@jdbernard.com> # Copyright 2019-2024 Jonathan Bernard <jonathan@jdbernard.com>
## Lightweight ORM supporting the `Postgres`_ and `SQLite`_ databases in Nim. ## Lightweight ORM supporting the `Postgres`_ and `SQLite`_ databases in Nim.
## It supports a simple, opinionated model mapper to generate SQL queries based ## It supports a simple, opinionated model mapper to generate SQL queries based
@ -107,6 +107,7 @@
## ##
## proc createTodoItem*(db: TodoDB, rec: TodoItem): TodoItem; ## proc createTodoItem*(db: TodoDB, rec: TodoItem): TodoItem;
## proc updateTodoItem*(db: TodoDB, rec: TodoItem): bool; ## proc updateTodoItem*(db: TodoDB, rec: TodoItem): bool;
## proc createOrUpdateTodoItem*(db: TodoDB, rec: TodoItem): bool;
## proc deleteTodoItem*(db: TodoDB, rec: TodoItem): bool; ## proc deleteTodoItem*(db: TodoDB, rec: TodoItem): bool;
## proc deleteTodoItem*(db: TodoDB, id: UUID): bool; ## proc deleteTodoItem*(db: TodoDB, id: UUID): bool;
## ##
@ -263,62 +264,64 @@
## In the example above the `pool.DbConnPool`_ object is used as database ## In the example above the `pool.DbConnPool`_ object is used as database
## object type (aliased as `TodoDB`). This is the intended usage pattern, but ## object type (aliased as `TodoDB`). This is the intended usage pattern, but
## anything can be passed as the database object type so long as there is a ## anything can be passed as the database object type so long as there is a
## defined `withConn` template that provides an injected `conn: DbConn` object ## defined `withConnection` template that provides a `conn: DbConn` object
## to the provided statement body. ## to the provided statement body.
## ##
## For example, a valid database object implementation that opens a new ## For example, a valid database object implementation that opens a new
## connection for every request might look like this: ## connection for every request might look like this:
## ##
## .. code-block:: Nim ## .. code-block:: Nim
## import std/db_postgres ## import db_connector/db_postgres
## ##
## type TodoDB* = object ## type TodoDB* = object
## connString: string ## connString: string
## ##
## template withConn*(db: TodoDB, stmt: untyped): untyped = ## template withConnection*(db: TodoDB, stmt: untyped): untyped =
## let conn {.inject.} = open("", "", "", db.connString) ## block:
## try: stmt ## let conn = open("", "", "", db.connString)
## finally: close(conn) ## try: stmt
## finally: close(conn)
## ##
## .. _pool.DbConnPool: fiber_orm/pool.html#DbConnPool ## .. _pool.DbConnPool: fiber_orm/pool.html#DbConnPool
## ##
import std/db_postgres, std/macros, std/options, std/sequtils, std/strutils import std/[json, macros, options, sequtils, strutils]
import db_connector/db_common
import namespaced_logging, uuids import namespaced_logging, uuids
from std/unicode import capitalize from std/unicode import capitalize
import ./fiber_orm/db_common as fiber_db_common
import ./fiber_orm/pool import ./fiber_orm/pool
import ./fiber_orm/util import ./fiber_orm/util
export export pool, util
pool,
util.columnNamesForModel,
util.dbFormat,
util.dbNameToIdent,
util.identNameToDb,
util.modelName,
util.rowToModel,
util.tableName
type type
PaginationParams* = object
pageSize*: int
offset*: int
orderBy*: Option[string]
PagedRecords*[T] = object PagedRecords*[T] = object
pagination*: Option[PaginationParams] pagination*: Option[PaginationParams]
records*: seq[T] records*: seq[T]
totalRecords*: int totalRecords*: int
DbUpdateError* = object of CatchableError ##\
## Error types raised when a DB modification fails.
NotFoundError* = object of CatchableError ##\ NotFoundError* = object of CatchableError ##\
## Error type raised when no record matches a given ID ## Error type raised when no record matches a given ID
var logNs {.threadvar.}: LoggingNamespace var logService {.threadvar.}: Option[LogService]
var logger {.threadvar.}: Option[Logger]
template log(): untyped = proc logQuery*(methodName: string, sqlStmt: string, args: openArray[(string, string)] = []) =
if logNs.isNil: logNs = initLoggingNamespace(name = "fiber_orm", level = lvlNotice) # namespaced_logging would do this check for us, but we don't want to even
logNs # build the log object if we're not actually logging
if logService.isNone: return
if logger.isNone: logger = logService.getLogger("fiber_orm/query")
var log = %*{ "method": methodName, "sql": sqlStmt }
for (k, v) in args: log[k] = %v
logger.debug(log)
proc enableDbLogging*(svc: LogService) =
logService = some(svc)
proc newMutateClauses(): MutateClauses = proc newMutateClauses(): MutateClauses =
return MutateClauses( return MutateClauses(
@ -326,7 +329,7 @@ proc newMutateClauses(): MutateClauses =
placeholders: @[], placeholders: @[],
values: @[]) values: @[])
proc createRecord*[T](db: DbConn, rec: T): T = proc createRecord*[D: DbConnType, T](db: D, rec: T): T =
## Create a new record. `rec` is expected to be a `model class`_. The `id` ## Create a new record. `rec` is expected to be a `model class`_. The `id`
## field is only set if it is non-empty (see `ID Field`_ for details). ## field is only set if it is non-empty (see `ID Field`_ for details).
## ##
@ -344,12 +347,12 @@ proc createRecord*[T](db: DbConn, rec: T): T =
" VALUES (" & mc.placeholders.join(",") & ") " & " VALUES (" & mc.placeholders.join(",") & ") " &
" RETURNING " & columnNamesForModel(rec).join(",") " RETURNING " & columnNamesForModel(rec).join(",")
log().debug "createRecord: [" & sqlStmt & "]" logQuery("createRecord", sqlStmt)
let newRow = db.getRow(sql(sqlStmt), mc.values) let newRow = db.getRow(sql(sqlStmt), mc.values)
result = rowToModel(T, newRow) result = rowToModel(T, newRow)
proc updateRecord*[T](db: DbConn, rec: T): bool = proc updateRecord*[D: DbConnType, T](db: D, rec: T): bool =
## Update a record by id. `rec` is expected to be a `model class`_. ## Update a record by id. `rec` is expected to be a `model class`_.
var mc = newMutateClauses() var mc = newMutateClauses()
populateMutateClauses(rec, false, mc) populateMutateClauses(rec, false, mc)
@ -360,33 +363,51 @@ proc updateRecord*[T](db: DbConn, rec: T): bool =
" SET " & setClause & " SET " & setClause &
" WHERE id = ? " " WHERE id = ? "
log().debug "updateRecord: [" & sqlStmt & "] id: " & $rec.id logQuery("updateRecord", sqlStmt, [("id", $rec.id)])
let numRowsUpdated = db.execAffectedRows(sql(sqlStmt), mc.values.concat(@[$rec.id])) let numRowsUpdated = db.execAffectedRows(sql(sqlStmt), mc.values.concat(@[$rec.id]))
return numRowsUpdated > 0; return numRowsUpdated > 0;
template deleteRecord*(db: DbConn, modelType: type, id: typed): untyped = proc createOrUpdateRecord*[D: DbConnType, T](db: D, rec: T): T =
## Create or update a record. `rec` is expected to be a `model class`_. If
## the `id` field is unset, or if there is no existing record with the given
## id, a new record is inserted. Otherwise, the existing record is updated.
##
## Note that this does not perform partial updates, all fields are updated.
let findRecordStmt = "SELECT id FROM " & tableName(rec) & " WHERE id = ?"
logQuery("createOrUpdateRecord", findRecordStmt, [("id", $rec.id)])
let rows = db.getAllRows(sql(findRecordStmt), [$rec.id])
if rows.len == 0: result = createRecord(db, rec)
else:
result = rec
if not updateRecord(db, rec):
raise newException(DbUpdateError,
"unable to update " & modelName(rec) & " for id " & $rec.id)
template deleteRecord*[D: DbConnType](db: D, modelType: type, id: typed): untyped =
## Delete a record by id. ## Delete a record by id.
let sqlStmt = "DELETE FROM " & tableName(modelType) & " WHERE id = ?" let sqlStmt = "DELETE FROM " & tableName(modelType) & " WHERE id = ?"
log().debug "deleteRecord: [" & sqlStmt & "] id: " & $id logQuery("deleteRecord", sqlStmt, [("id", $id)])
db.tryExec(sql(sqlStmt), $id) db.tryExec(sql(sqlStmt), $id)
proc deleteRecord*[T](db: DbConn, rec: T): bool = proc deleteRecord*[D: DbConnType, T](db: D, rec: T): bool =
## Delete a record by `id`_. ## Delete a record by `id`_.
## ##
## .. _id: #model-class-id-field ## .. _id: #model-class-id-field
let sqlStmt = "DELETE FROM " & tableName(rec) & " WHERE id = ?" let sqlStmt = "DELETE FROM " & tableName(rec) & " WHERE id = ?"
log().debug "deleteRecord: [" & sqlStmt & "] id: " & $rec.id logQuery("deleteRecord", sqlStmt, [("id", $rec.id)])
return db.tryExec(sql(sqlStmt), $rec.id) return db.tryExec(sql(sqlStmt), $rec.id)
template getRecord*(db: DbConn, modelType: type, id: typed): untyped = template getRecord*[D: DbConnType](db: D, modelType: type, id: typed): untyped =
## Fetch a record by id. ## Fetch a record by id.
let sqlStmt = let sqlStmt =
"SELECT " & columnNamesForModel(modelType).join(",") & "SELECT " & columnNamesForModel(modelType).join(",") &
" FROM " & tableName(modelType) & " FROM " & tableName(modelType) &
" WHERE id = ?" " WHERE id = ?"
log().debug "getRecord: [" & sqlStmt & "] id: " & $id logQuery("getRecord", sqlStmt, [("id", $id)])
let row = db.getRow(sql(sqlStmt), @[$id]) let row = db.getRow(sql(sqlStmt), @[$id])
if allIt(row, it.len == 0): if allIt(row, it.len == 0):
@ -394,8 +415,8 @@ template getRecord*(db: DbConn, modelType: type, id: typed): untyped =
rowToModel(modelType, row) rowToModel(modelType, row)
template findRecordsWhere*( template findRecordsWhere*[D: DbConnType](
db: DbConn, db: D,
modelType: type, modelType: type,
whereClause: string, whereClause: string,
values: varargs[string, dbFormat], values: varargs[string, dbFormat],
@ -412,17 +433,9 @@ template findRecordsWhere*(
"SELECT COUNT(*) FROM " & tableName(modelType) & "SELECT COUNT(*) FROM " & tableName(modelType) &
" WHERE " & whereClause " WHERE " & whereClause
if page.isSome: if page.isSome: fetchStmt &= getPagingClause(page.get)
let p = page.get
if p.orderBy.isSome:
fetchStmt &= " ORDER BY " & p.orderBy.get
else:
fetchStmt &= " ORDER BY id"
fetchStmt &= " LIMIT " & $p.pageSize & logQuery("findRecordsWhere", fetchStmt, [("values", values.join(", "))])
" OFFSET " & $p.offset
log().debug "findRecordsWhere: [" & fetchStmt & "] values: (" & values.join(", ") & ")"
let records = db.getAllRows(sql(fetchStmt), values).mapIt(rowToModel(modelType, it)) let records = db.getAllRows(sql(fetchStmt), values).mapIt(rowToModel(modelType, it))
PagedRecords[modelType]( PagedRecords[modelType](
@ -432,8 +445,8 @@ template findRecordsWhere*(
if page.isNone: records.len if page.isNone: records.len
else: db.getRow(sql(countStmt), values)[0].parseInt) else: db.getRow(sql(countStmt), values)[0].parseInt)
template getAllRecords*( template getAllRecords*[D: DbConnType](
db: DbConn, db: D,
modelType: type, modelType: type,
page: Option[PaginationParams]): untyped = page: Option[PaginationParams]): untyped =
## Fetch all records of the given type. ## Fetch all records of the given type.
@ -443,17 +456,9 @@ template getAllRecords*(
var countStmt = "SELECT COUNT(*) FROM " & tableName(modelType) var countStmt = "SELECT COUNT(*) FROM " & tableName(modelType)
if page.isSome: if page.isSome: fetchStmt &= getPagingClause(page.get)
let p = page.get
if p.orderBy.isSome:
fetchStmt &= " ORDER BY " & p.orderBy.get
else:
fetchStmt &= " ORDER BY id"
fetchStmt &= " LIMIT " & $p.pageSize & logQuery("getAllRecords", fetchStmt)
" OFFSET " & $p.offset
log().debug "getAllRecords: [" & fetchStmt & "]"
let records = db.getAllRows(sql(fetchStmt)).mapIt(rowToModel(modelType, it)) let records = db.getAllRows(sql(fetchStmt)).mapIt(rowToModel(modelType, it))
PagedRecords[modelType]( PagedRecords[modelType](
@ -464,8 +469,8 @@ template getAllRecords*(
else: db.getRow(sql(countStmt))[0].parseInt) else: db.getRow(sql(countStmt))[0].parseInt)
template findRecordsBy*( template findRecordsBy*[D: DbConnType](
db: DbConn, db: D,
modelType: type, modelType: type,
lookups: seq[tuple[field: string, value: string]], lookups: seq[tuple[field: string, value: string]],
page: Option[PaginationParams]): untyped = page: Option[PaginationParams]): untyped =
@ -482,17 +487,9 @@ template findRecordsBy*(
"SELECT COUNT(*) FROM " & tableName(modelType) & "SELECT COUNT(*) FROM " & tableName(modelType) &
" WHERE " & whereClause " WHERE " & whereClause
if page.isSome: if page.isSome: fetchStmt &= getPagingClause(page.get)
let p = page.get
if p.orderBy.isSome:
fetchStmt &= " ORDER BY " & p.orderBy.get
else:
fetchStmt &= " ORDER BY id"
fetchStmt &= " LIMIT " & $p.pageSize & logQuery("findRecordsBy", fetchStmt, [("values", values.join(", "))])
" OFFSET " & $p.offset
log().debug "findRecordsBy: [" & fetchStmt & "] values (" & values.join(", ") & ")"
let records = db.getAllRows(sql(fetchStmt), values).mapIt(rowToModel(modelType, it)) let records = db.getAllRows(sql(fetchStmt), values).mapIt(rowToModel(modelType, it))
PagedRecords[modelType]( PagedRecords[modelType](
@ -515,51 +512,66 @@ macro generateProcsForModels*(dbType: type, modelTypes: openarray[type]): untype
## proc deleteTodoItem*(db: TodoDB, rec: TodoItem): bool; ## proc deleteTodoItem*(db: TodoDB, rec: TodoItem): bool;
## proc deleteTodoItem*(db: TodoDB, id: idType): bool; ## proc deleteTodoItem*(db: TodoDB, id: idType): bool;
## proc updateTodoItem*(db: TodoDB, rec: TodoItem): bool; ## proc updateTodoItem*(db: TodoDB, rec: TodoItem): bool;
## proc createOrUpdateTodoItem*(db: TodoDB, rec: TodoItem): bool;
## ##
## proc findTodoItemsWhere*( ## proc findTodoItemsWhere*(
## db: TodoDB, whereClause: string, values: varargs[string]): TodoItem; ## db: TodoDB, whereClause: string, values: varargs[string]): TodoItem;
## ##
## `dbType` is expected to be some type that has a defined `withConn` ## `dbType` is expected to be some type that has a defined `withConnection`
## procedure (see `Database Object`_ for details). ## procedure (see `Database Object`_ for details).
## ##
## .. _Database Object: #database-object ## .. _Database Object: #database-object
result = newStmtList() result = newStmtList()
for t in modelTypes: for t in modelTypes:
if t.getType[1].typeKind == ntyRef:
raise newException(ValueError,
"fiber_orm model object must be objects, not refs")
let modelName = $(t.getType[1]) let modelName = $(t.getType[1])
let getName = ident("get" & modelName) let getName = ident("get" & modelName)
let getIfExistsName = ident("get" & modelName & "IfItExists")
let getAllName = ident("getAll" & pluralize(modelName)) let getAllName = ident("getAll" & pluralize(modelName))
let findWhereName = ident("find" & pluralize(modelName) & "Where") let findWhereName = ident("find" & pluralize(modelName) & "Where")
let createName = ident("create" & modelName) let createName = ident("create" & modelName)
let updateName = ident("update" & modelName) let updateName = ident("update" & modelName)
let createOrUpdateName = ident("createOrUpdate" & modelName)
let deleteName = ident("delete" & modelName) let deleteName = ident("delete" & modelName)
let idType = typeOfColumn(t, "id") let idType = typeOfColumn(t, "id")
result.add quote do: result.add quote do:
proc `getName`*(db: `dbType`, id: `idType`): `t` = proc `getName`*(db: `dbType`, id: `idType`): `t` =
db.withConn: result = getRecord(conn, `t`, id) db.withConnection conn: result = getRecord(conn, `t`, id)
proc `getIfExistsName`*(db: `dbType`, id: `idType`): Option[`t`] =
db.withConnection conn:
try: result = some(getRecord(conn, `t`, id))
except NotFoundError: result = none[`t`]()
proc `getAllName`*(db: `dbType`, pagination = none[PaginationParams]()): PagedRecords[`t`] = proc `getAllName`*(db: `dbType`, pagination = none[PaginationParams]()): PagedRecords[`t`] =
db.withConn: result = getAllRecords(conn, `t`, pagination) db.withConnection conn: result = getAllRecords(conn, `t`, pagination)
proc `findWhereName`*( proc `findWhereName`*(
db: `dbType`, db: `dbType`,
whereClause: string, whereClause: string,
values: varargs[string, dbFormat], values: varargs[string, dbFormat],
pagination = none[PaginationParams]()): PagedRecords[`t`] = pagination = none[PaginationParams]()): PagedRecords[`t`] =
db.withConn: db.withConnection conn:
result = findRecordsWhere(conn, `t`, whereClause, values, pagination) result = findRecordsWhere(conn, `t`, whereClause, values, pagination)
proc `createName`*(db: `dbType`, rec: `t`): `t` = proc `createName`*(db: `dbType`, rec: `t`): `t` =
db.withConn: result = createRecord(conn, rec) db.withConnection conn: result = createRecord(conn, rec)
proc `updateName`*(db: `dbType`, rec: `t`): bool = proc `updateName`*(db: `dbType`, rec: `t`): bool =
db.withConn: result = updateRecord(conn, rec) db.withConnection conn: result = updateRecord(conn, rec)
proc `createOrUpdateName`*(db: `dbType`, rec: `t`): `t` =
db.inTransaction: result = createOrUpdateRecord(conn, rec)
proc `deleteName`*(db: `dbType`, rec: `t`): bool = proc `deleteName`*(db: `dbType`, rec: `t`): bool =
db.withConn: result = deleteRecord(conn, rec) db.withConnection conn: result = deleteRecord(conn, rec)
proc `deleteName`*(db: `dbType`, id: `idType`): bool = proc `deleteName`*(db: `dbType`, id: `idType`): bool =
db.withConn: result = deleteRecord(conn, `t`, id) db.withConnection conn: result = deleteRecord(conn, `t`, id)
macro generateLookup*(dbType: type, modelType: type, fields: seq[string]): untyped = macro generateLookup*(dbType: type, modelType: type, fields: seq[string]): untyped =
## Create a lookup procedure for a given set of field names. For example, ## Create a lookup procedure for a given set of field names. For example,
@ -579,7 +591,7 @@ macro generateLookup*(dbType: type, modelType: type, fields: seq[string]): untyp
# Create proc skeleton # Create proc skeleton
result = quote do: result = quote do:
proc `procName`*(db: `dbType`): PagedRecords[`modelType`] = proc `procName`*(db: `dbType`): PagedRecords[`modelType`] =
db.withConn: result = findRecordsBy(conn, `modelType`) db.withConnection conn: result = findRecordsBy(conn, `modelType`)
var callParams = quote do: @[] var callParams = quote do: @[]
@ -603,11 +615,11 @@ macro generateLookup*(dbType: type, modelType: type, fields: seq[string]): untyp
# Add the call params to the inner procedure call # Add the call params to the inner procedure call
# result[6][0][1][0][1] is # result[6][0][1][0][1] is
# ProcDef -> [6]: StmtList (body) -> [0]: Call -> # ProcDef -> [6]: StmtList (body) -> [0]: Command ->
# [1]: StmtList (withConn body) -> [0]: Asgn (result =) -> # [2]: StmtList (withConnection body) -> [0]: Asgn (result =) ->
# [1]: Call (inner findRecords invocation) # [1]: Call (inner findRecords invocation)
result[6][0][1][0][1].add(callParams) result[6][0][2][0][1].add(callParams)
result[6][0][1][0][1].add(quote do: pagination) result[6][0][2][0][1].add(quote do: pagination)
macro generateProcsForFieldLookups*(dbType: type, modelsAndFields: openarray[tuple[t: type, fields: seq[string]]]): untyped = macro generateProcsForFieldLookups*(dbType: type, modelsAndFields: openarray[tuple[t: type, fields: seq[string]]]): untyped =
result = newStmtList() result = newStmtList()
@ -621,7 +633,7 @@ macro generateProcsForFieldLookups*(dbType: type, modelsAndFields: openarray[tup
# Create proc skeleton # Create proc skeleton
let procDefAST = quote do: let procDefAST = quote do:
proc `procName`*(db: `dbType`): PagedRecords[`modelType`] = proc `procName`*(db: `dbType`): PagedRecords[`modelType`] =
db.withConn: result = findRecordsBy(conn, `modelType`) db.withConnection conn: result = findRecordsBy(conn, `modelType`)
var callParams = quote do: @[] var callParams = quote do: @[]
@ -644,11 +656,12 @@ macro generateProcsForFieldLookups*(dbType: type, modelsAndFields: openarray[tup
result.add procDefAST result.add procDefAST
proc initPool*( proc initPool*[D: DbConnType](
connect: proc(): DbConn, connect: proc(): D,
poolSize = 10, poolSize = 10,
hardCap = false, hardCap = false,
healthCheckQuery = "SELECT 'true' AS alive"): DbConnPool = healthCheckQuery = "SELECT 'true' AS alive"): DbConnPool[D] =
## Initialize a new DbConnPool. See the `initDb` procedure in the `Example ## Initialize a new DbConnPool. See the `initDb` procedure in the `Example
## Fiber ORM Usage`_ for an example ## Fiber ORM Usage`_ for an example
## ##
@ -666,14 +679,14 @@ proc initPool*(
## ##
## .. _Example Fiber ORM Usage: #basic-usage-example-fiber-orm-usage ## .. _Example Fiber ORM Usage: #basic-usage-example-fiber-orm-usage
initDbConnPool(DbConnPoolConfig( initDbConnPool(DbConnPoolConfig[D](
connect: connect, connect: connect,
poolSize: poolSize, poolSize: poolSize,
hardCap: hardCap, hardCap: hardCap,
healthCheckQuery: healthCheckQuery)) healthCheckQuery: healthCheckQuery))
template inTransaction*(db: DbConnPool, body: untyped) = template inTransaction*(db, body: untyped) =
pool.withConn(db): db.withConnection conn:
conn.exec(sql"BEGIN TRANSACTION") conn.exec(sql"BEGIN TRANSACTION")
try: try:
body body

View File

@ -0,0 +1,3 @@
import db_connector/[db_postgres, db_sqlite]
type DbConnType* = db_postgres.DbConn or db_sqlite.DbConn

View File

@ -4,65 +4,62 @@
## Simple database connection pooling implementation compatible with Fiber ORM. ## Simple database connection pooling implementation compatible with Fiber ORM.
import std/db_postgres, std/sequtils, std/strutils, std/sugar import std/[sequtils, strutils, sugar]
import db_connector/db_common
import namespaced_logging from db_connector/db_sqlite import getRow, close
from db_connector/db_postgres import getRow, close
import ./db_common as fiber_db_common
type type
DbConnPoolConfig* = object DbConnPoolConfig*[D: DbConnType] = object
connect*: () -> DbConn ## Factory procedure to create a new DBConn connect*: () -> D ## Factory procedure to create a new DBConn
poolSize*: int ## The pool capacity. poolSize*: int ## The pool capacity.
hardCap*: bool ## Is the pool capacity a hard cap?
## hardCap*: bool ## Is the pool capacity a hard cap?
## When `false`, the pool can grow beyond the configured ##
## capacity, but will release connections down to the its ## When `false`, the pool can grow beyond the
## capacity (no less than `poolSize`). ## configured capacity, but will release connections
## ## down to the its capacity (no less than `poolSize`).
## When `true` the pool will not create more than its ##
## configured capacity. It a connection is requested, none ## When `true` the pool will not create more than its
## are free, and the pool is at capacity, this will result ## configured capacity. It a connection is requested,
## in an Error being raised. ## none are free, and the pool is at capacity, this
## will result in an Error being raised.
healthCheckQuery*: string ## Should be a simple and fast SQL query that the healthCheckQuery*: string ## Should be a simple and fast SQL query that the
## pool can use to test the liveliness of pooled ## pool can use to test the liveliness of pooled
## connections. ## connections.
PooledDbConn = ref object PooledDbConn[D: DbConnType] = ref object
conn: DbConn conn: D
id: int id: int
free: bool free: bool
DbConnPool* = ref object DbConnPool*[D: DbConnType] = ref object
## Database connection pool ## Database connection pool
conns: seq[PooledDbConn] conns: seq[PooledDbConn[D]]
cfg: DbConnPoolConfig cfg: DbConnPoolConfig[D]
lastId: int lastId: int
var logNs {.threadvar.}: LoggingNamespace proc initDbConnPool*[D: DbConnType](cfg: DbConnPoolConfig[D]): DbConnPool[D] =
result = DbConnPool[D](
template log(): untyped =
if logNs.isNil: logNs = initLoggingNamespace(name = "fiber_orm/pool", level = lvlNotice)
logNs
proc initDbConnPool*(cfg: DbConnPoolConfig): DbConnPool =
log().debug("Initializing new pool (size: " & $cfg.poolSize)
result = DbConnPool(
conns: @[], conns: @[],
cfg: cfg) cfg: cfg)
proc newConn(pool: DbConnPool): PooledDbConn = proc newConn[D: DbConnType](pool: DbConnPool[D]): PooledDbConn[D] =
log().debug("Creating a new connection to add to the pool.")
pool.lastId += 1 pool.lastId += 1
let conn = pool.cfg.connect() {.gcsafe.}:
result = PooledDbConn( let conn = pool.cfg.connect()
conn: conn, result = PooledDbConn[D](
id: pool.lastId, conn: conn,
free: true) id: pool.lastId,
pool.conns.add(result) free: true)
pool.conns.add(result)
proc maintain(pool: DbConnPool): void = proc maintain[D: DbConnType](pool: DbConnPool[D]): void =
log().debug("Maintaining pool. $# connections." % [$pool.conns.len]) pool.conns.keepIf(proc (pc: PooledDbConn[D]): bool =
pool.conns.keepIf(proc (pc: PooledDbConn): bool =
if not pc.free: return true if not pc.free: return true
try: try:
@ -73,9 +70,6 @@ proc maintain(pool: DbConnPool): void =
except: discard "" except: discard ""
return false return false
) )
log().debug(
"Pruned dead connections. $# connections remaining." %
[$pool.conns.len])
let freeConns = pool.conns.filterIt(it.free) let freeConns = pool.conns.filterIt(it.free)
if pool.conns.len > pool.cfg.poolSize and freeConns.len > 0: if pool.conns.len > pool.cfg.poolSize and freeConns.len > 0:
@ -87,11 +81,8 @@ proc maintain(pool: DbConnPool): void =
for culled in toCull: for culled in toCull:
try: culled.conn.close() try: culled.conn.close()
except: discard "" except: discard ""
log().debug(
"Trimming pool size. Culled $# free connections. $# connections remaining." %
[$toCull.len, $pool.conns.len])
proc take*(pool: DbConnPool): tuple[id: int, conn: DbConn] = proc take*[D: DbConnType](pool: DbConnPool[D]): tuple[id: int, conn: D] =
## Request a connection from the pool. Returns a DbConn if the pool has free ## Request a connection from the pool. Returns a DbConn if the pool has free
## connections, or if it has the capacity to create a new connection. If the ## connections, or if it has the capacity to create a new connection. If the
## pool is configured with a hard capacity limit and is out of free ## pool is configured with a hard capacity limit and is out of free
@ -102,29 +93,22 @@ proc take*(pool: DbConnPool): tuple[id: int, conn: DbConn] =
pool.maintain pool.maintain
let freeConns = pool.conns.filterIt(it.free) let freeConns = pool.conns.filterIt(it.free)
log().debug(
"Providing a new connection ($# currently free)." % [$freeConns.len])
let reserved = let reserved =
if freeConns.len > 0: freeConns[0] if freeConns.len > 0: freeConns[0]
else: pool.newConn() else: pool.newConn()
reserved.free = false reserved.free = false
log().debug("Reserve connection $#" % [$reserved.id])
return (id: reserved.id, conn: reserved.conn) return (id: reserved.id, conn: reserved.conn)
proc release*(pool: DbConnPool, connId: int): void = proc release*[D: DbConnType](pool: DbConnPool[D], connId: int): void =
## Release a connection back to the pool. ## Release a connection back to the pool.
log().debug("Reclaiming released connaction $#" % [$connId])
let foundConn = pool.conns.filterIt(it.id == connId) let foundConn = pool.conns.filterIt(it.id == connId)
if foundConn.len > 0: foundConn[0].free = true if foundConn.len > 0: foundConn[0].free = true
template withConn*(pool: DbConnPool, stmt: untyped): untyped = template withConnection*[D: DbConnType](pool: DbConnPool[D], conn, stmt: untyped): untyped =
## Convenience template to provide a connection from the pool for use in a ## Convenience template to provide a connection from the pool for use in a
## statement block, automatically releasing that connnection when done. ## statement block, automatically releasing that connnection when done.
## block:
## The provided connection is injected as the variable `conn` in the let (connId, conn) = take(pool)
## statement body. try: stmt
let (connId, conn {.inject.}) = take(pool) finally: release(pool, connId)
try: stmt
finally: release(pool, connId)

View File

@ -3,12 +3,17 @@
# Copyright 2019 Jonathan Bernard <jonathan@jdbernard.com> # Copyright 2019 Jonathan Bernard <jonathan@jdbernard.com>
## Utility methods used internally by Fiber ORM. ## Utility methods used internally by Fiber ORM.
import json, macros, options, sequtils, strutils, times, unicode, import std/[json, macros, options, sequtils, strutils, times, unicode]
uuids import uuids
import nre except toSeq import std/nre except toSeq
type type
PaginationParams* = object
pageSize*: int
offset*: int
orderBy*: Option[seq[string]]
MutateClauses* = object MutateClauses* = object
## Data structure to hold information about the clauses that should be ## Data structure to hold information about the clauses that should be
## added to a query. How these clauses are used will depend on the query. ## added to a query. How these clauses are used will depend on the query.
@ -22,9 +27,11 @@ const ISO_8601_FORMATS = @[
"yyyy-MM-dd'T'HH:mm:ssz", "yyyy-MM-dd'T'HH:mm:ssz",
"yyyy-MM-dd'T'HH:mm:sszzz", "yyyy-MM-dd'T'HH:mm:sszzz",
"yyyy-MM-dd'T'HH:mm:ss'.'fffzzz", "yyyy-MM-dd'T'HH:mm:ss'.'fffzzz",
"yyyy-MM-dd'T'HH:mm:ss'.'ffffzzz",
"yyyy-MM-dd HH:mm:ssz", "yyyy-MM-dd HH:mm:ssz",
"yyyy-MM-dd HH:mm:sszzz", "yyyy-MM-dd HH:mm:sszzz",
"yyyy-MM-dd HH:mm:ss'.'fffzzz" "yyyy-MM-dd HH:mm:ss'.'fffzzz",
"yyyy-MM-dd HH:mm:ss'.'ffffzzz"
] ]
proc parseIso8601(val: string): DateTime = proc parseIso8601(val: string): DateTime =
@ -102,7 +109,7 @@ proc dbFormat*[T](list: seq[T]): string =
proc dbFormat*[T](item: T): string = proc dbFormat*[T](item: T): string =
## For all other types, fall back on a defined `$` function to create a ## For all other types, fall back on a defined `$` function to create a
## string version of the value we can include in an SQL query> ## string version of the value we can include in an SQL query.
return $item return $item
type DbArrayParseState = enum type DbArrayParseState = enum
@ -126,18 +133,20 @@ proc parsePGDatetime*(val: string): DateTime =
var correctedVal = val; var correctedVal = val;
# PostgreSQL will truncate any trailing 0's in the millisecond value leading # The Nim `times#format` function only recognizes 3-digit millisecond values
# to values like `2020-01-01 16:42.3+00`. This cannot currently be parsed by # but PostgreSQL will sometimes send 1-2 digits, truncating any trailing 0's,
# the standard times format as it expects exactly three digits for # or sometimes provide more than three digits of preceision in the millisecond value leading
# millisecond values. So we have to detect this and pad out the millisecond # to values like `2020-01-01 16:42.3+00` or `2025-01-06 00:56:00.9007+00`.
# value to 3 digits. # This cannot currently be parsed by the standard times format as it expects
let PG_PARTIAL_FORMAT_REGEX = re"(\d{4}-\d{2}-\d{2}( |'T')\d{2}:\d{2}:\d{2}\.)(\d{1,2})(\S+)?" # exactly three digits for millisecond values. So we have to detect this and
# coerce the millisecond value to exactly 3 digits.
let PG_PARTIAL_FORMAT_REGEX = re"(\d{4}-\d{2}-\d{2}( |'T')\d{2}:\d{2}:\d{2}\.)(\d+)(\S+)?"
let match = val.match(PG_PARTIAL_FORMAT_REGEX) let match = val.match(PG_PARTIAL_FORMAT_REGEX)
if match.isSome: if match.isSome:
let c = match.get.captures let c = match.get.captures
if c.toSeq.len == 2: correctedVal = c[0] & alignLeft(c[2], 3, '0') if c.toSeq.len == 2: correctedVal = c[0] & alignLeft(c[2], 3, '0')[0..2]
else: correctedVal = c[0] & alignLeft(c[2], 3, '0') & c[3] else: correctedVal = c[0] & alignLeft(c[2], 3, '0')[0..2] & c[3]
var errStr = "" var errStr = ""
@ -146,7 +155,7 @@ proc parsePGDatetime*(val: string): DateTime =
try: return correctedVal.parse(df) try: return correctedVal.parse(df)
except: errStr &= "\n\t" & getCurrentExceptionMsg() except: errStr &= "\n\t" & getCurrentExceptionMsg()
raise newException(ValueError, "Cannot parse PG date. Tried:" & errStr) raise newException(ValueError, "Cannot parse PG date '" & correctedVal & "'. Tried:" & errStr)
proc parseDbArray*(val: string): seq[string] = proc parseDbArray*(val: string): seq[string] =
## Parse a Postgres array column into a Nim seq[string] ## Parse a Postgres array column into a Nim seq[string]
@ -207,21 +216,14 @@ proc parseDbArray*(val: string): seq[string] =
if not (parseState == inQuote) and curStr.len > 0: if not (parseState == inQuote) and curStr.len > 0:
result.add(curStr) result.add(curStr)
proc createParseStmt*(t, value: NimNode): NimNode = func createParseStmt*(t, value: NimNode): NimNode =
## Utility method to create the Nim cod required to parse a value coming from ## Utility method to create the Nim cod required to parse a value coming from
## the a database query. This is used by functions like `rowToModel` to parse ## the a database query. This is used by functions like `rowToModel` to parse
## the dataabase columns into the Nim object fields. ## the dataabase columns into the Nim object fields.
#echo "Creating parse statment for ", t.treeRepr
if t.typeKind == ntyObject: if t.typeKind == ntyObject:
if t.getType == UUID.getType: if t.getTypeInst == Option.getType:
result = quote do: parseUUID(`value`)
elif t.getType == DateTime.getType:
result = quote do: parsePGDatetime(`value`)
elif t.getTypeInst == Option.getType:
var innerType = t.getTypeImpl[2][0] # start at the first RecList var innerType = t.getTypeImpl[2][0] # start at the first RecList
# If the value is a non-pointer type, there is another inner RecList # If the value is a non-pointer type, there is another inner RecList
if innerType.kind == nnkRecList: innerType = innerType[0] if innerType.kind == nnkRecList: innerType = innerType[0]
@ -232,8 +234,28 @@ proc createParseStmt*(t, value: NimNode): NimNode =
if `value`.len == 0: none[`innerType`]() if `value`.len == 0: none[`innerType`]()
else: some(`parseStmt`) else: some(`parseStmt`)
elif t.getType == UUID.getType:
result = quote do: parseUUID(`value`)
elif t.getType == DateTime.getType:
result = quote do: parsePGDatetime(`value`)
else: error "Unknown value object type: " & $t.getTypeInst else: error "Unknown value object type: " & $t.getTypeInst
elif t.typeKind == ntyGenericInst:
if t.kind == nnkBracketExpr and
t.len > 0 and
t[0] == Option.getType:
var innerType = t.getTypeInst[1]
let parseStmt = createParseStmt(innerType, value)
result = quote do:
if `value`.len == 0: none[`innerType`]()
else: some(`parseStmt`)
else: error "Unknown generic instance type: " & $t.getTypeInst
elif t.typeKind == ntyRef: elif t.typeKind == ntyRef:
if $t.getTypeInst == "JsonNode": if $t.getTypeInst == "JsonNode":
@ -268,28 +290,72 @@ proc createParseStmt*(t, value: NimNode): NimNode =
else: else:
error "Unknown value type: " & $t.typeKind error "Unknown value type: " & $t.typeKind
func fields(t: NimNode): seq[tuple[fieldIdent: NimNode, fieldType: NimNode]] =
#[
debugEcho "T: " & t.treeRepr
debugEcho "T.kind: " & $t.kind
debugEcho "T.typeKind: " & $t.typeKind
debugEcho "T.GET_TYPE[1]: " & t.getType[1].treeRepr
debugEcho "T.GET_TYPE[1].kind: " & $t.getType[1].kind
debugEcho "T.GET_TYPE[1].typeKind: " & $t.getType[1].typeKind
debugEcho "T.GET_TYPE: " & t.getType.treeRepr
debugEcho "T.GET_TYPE[1].GET_TYPE: " & t.getType[1].getType.treeRepr
]#
# Get the object type AST, with base object (if present) and record list.
var objDefAst: NimNode
if t.typeKind == ntyObject: objDefAst = t.getType
elif t.typeKind == ntyTypeDesc:
# In this case we have a type AST that is like:
# BracketExpr
# Sym "typeDesc"
# Sym "ModelType"
objDefAst = t.
getType[1]. # get the Sym "ModelType"
getType # get the object definition type
if objDefAst.kind != nnkObjectTy:
error ("unable to enumerate the fields for model type '$#', " &
"tried to resolve the type of the provided symbol to an object " &
"definition (nnkObjectTy) but got a '$#'.\pAST:\p$#") % [
$t, $objDefAst.kind, objDefAst.treeRepr ]
else:
error ("unable to enumerate the fields for model type '$#', " &
"expected a symbol with type ntyTypeDesc but got a '$#'.\pAST:\p$#") % [
$t, $t.typeKind, t.treeRepr ]
# At this point objDefAst should look something like:
# ObjectTy
# Empty
# Sym "BaseObject"" | Empty
# RecList
# Sym "field1"
# Sym "field2"
# ...
if objDefAst[1].kind == nnkSym:
# We have a base class symbol, let's recurse and try and resolve the fields
# for the base class
for fieldDef in objDefAst[1].fields: result.add(fieldDef)
for fieldDef in objDefAst[2].children:
# objDefAst[2] is a RecList of
# ignore AST nodes that are not field definitions
if fieldDef.kind == nnkIdentDefs: result.add((fieldDef[0], fieldDef[1]))
elif fieldDef.kind == nnkSym: result.add((fieldDef, fieldDef.getTypeInst))
else: error "unknown object field definition AST: $#" % $fieldDef.kind
template walkFieldDefs*(t: NimNode, body: untyped) = template walkFieldDefs*(t: NimNode, body: untyped) =
## Iterate over every field of the given Nim object, yielding and defining ## Iterate over every field of the given Nim object, yielding and defining
## `fieldIdent` and `fieldType`, the name of the field as a Nim Ident node ## `fieldIdent` and `fieldType`, the name of the field as a Nim Ident node
## and the type of the field as a Nim Type node respectively. ## and the type of the field as a Nim Type node respectively.
let tTypeImpl = t.getTypeImpl for (fieldIdent {.inject.}, fieldType {.inject.}) in t.fields: body
var nodeToItr: NimNode #[ TODO: replace walkFieldDefs with things like this:
if tTypeImpl.typeKind == ntyObject: nodeToItr = tTypeImpl[2] func columnNamesForModel*(modelType: typedesc): seq[string] =
elif tTypeImpl.typeKind == ntyTypeDesc: nodeToItr = tTypeImpl.getType[1].getType[2] modelType.fields.mapIt(identNameToDb($it[0]))
else: error $t & " is not an object or type desc (it's a " & $tTypeImpl.typeKind & ")." ]#
for fieldDef {.inject.} in nodeToItr.children:
# ignore AST nodes that are not field definitions
if fieldDef.kind == nnkIdentDefs:
let fieldIdent {.inject.} = fieldDef[0]
let fieldType {.inject.} = fieldDef[1]
body
elif fieldDef.kind == nnkSym:
let fieldIdent {.inject.} = fieldDef
let fieldType {.inject.} = fieldDef.getType
body
macro columnNamesForModel*(modelType: typed): seq[string] = macro columnNamesForModel*(modelType: typed): seq[string] =
## Return the column names corresponding to the the fields of the given ## Return the column names corresponding to the the fields of the given
@ -317,6 +383,7 @@ macro rowToModel*(modelType: typed, row: seq[string]): untyped =
createParseStmt(fieldType, itemLookup))) createParseStmt(fieldType, itemLookup)))
idx += 1 idx += 1
#[
macro listFields*(t: typed): untyped = macro listFields*(t: typed): untyped =
var fields: seq[tuple[n: string, t: string]] = @[] var fields: seq[tuple[n: string, t: string]] = @[]
t.walkFieldDefs: t.walkFieldDefs:
@ -324,6 +391,7 @@ macro listFields*(t: typed): untyped =
else: fields.add((n: $fieldIdent, t: $fieldType)) else: fields.add((n: $fieldIdent, t: $fieldType))
result = newLit(fields) result = newLit(fields)
]#
proc typeOfColumn*(modelType: NimNode, colName: string): NimNode = proc typeOfColumn*(modelType: NimNode, colName: string): NimNode =
## Given a model type and a column name, return the Nim type for that column. ## Given a model type and a column name, return the Nim type for that column.
@ -370,8 +438,8 @@ macro populateMutateClauses*(t: typed, newRecord: bool, mc: var MutateClauses):
# if we're looking at an optional field, add logic to check for presence # if we're looking at an optional field, add logic to check for presence
elif fieldType.kind == nnkBracketExpr and elif fieldType.kind == nnkBracketExpr and
fieldType.len > 0 and fieldType.len > 0 and
fieldType[0] == Option.getType: fieldType[0] == Option.getType:
result.add quote do: result.add quote do:
`mc`.columns.add(identNameToDb(`fieldName`)) `mc`.columns.add(identNameToDb(`fieldName`))
@ -388,6 +456,19 @@ macro populateMutateClauses*(t: typed, newRecord: bool, mc: var MutateClauses):
`mc`.placeholders.add("?") `mc`.placeholders.add("?")
`mc`.values.add(dbFormat(`t`.`fieldIdent`)) `mc`.values.add(dbFormat(`t`.`fieldIdent`))
proc getPagingClause*(page: PaginationParams): string =
## Given a `PaginationParams` object, return the SQL clause necessary to
## limit the number of records returned by a query.
result = ""
if page.orderBy.isSome:
let orderByClause = page.orderBy.get.map(identNameToDb).join(",")
result &= " ORDER BY " & orderByClause
else:
result &= " ORDER BY id"
result &= " LIMIT " & $page.pageSize & " OFFSET " & $page.offset
## .. _model class: ../fiber_orm.html#objectminusrelational-modeling-model-class ## .. _model class: ../fiber_orm.html#objectminusrelational-modeling-model-class
## .. _rules for name mapping: ../fiber_orm.html ## .. _rules for name mapping: ../fiber_orm.html
## .. _table name: ../fiber_orm.html ## .. _table name: ../fiber_orm.html