10 Commits
2.1.0 ... 4.0.0

Author SHA1 Message Date
e1fa2480d0 Major update to provide thread-safe, robust connection pooling.
Taking inspiration from the waterpark library, the connection pooling
mechanism has been refactored to be thread-safe. Additionally, the
pooling logic detects and handles stale connections in the pool. When a
connection is requested from the pool, the pool first validates that it
is healthy and replaces it with a fresh connection if necessary. This is
transparent to the requester.

Additionally we refactored the internal logging implementation to make
it more conventient to access logging infrastructure and log from
various sub-scopes within fiber_orm (query, pool, etc.)
2025-07-27 17:47:07 -05:00
b8c64cc693 Migrate to namespaced_logging v2. 2025-07-12 07:54:13 -05:00
aa02f9f5b1 Add support for records associated via join tables. 2025-05-19 17:56:40 -05:00
9d1cc4bbec Cache logger instance. 2025-01-20 06:39:02 -06:00
af44d48df1 Extract pagination logic into a common, exported function. Fix PG date parsing (again). 2025-01-10 20:25:49 -06:00
2030fd4490 Use namespaced_logging 1.x for logging (optionally). 2025-01-05 02:06:57 -06:00
0599d41061 Support Nim 2.x, compatibility with waterpark.
- Nim 2.x has moved the DB connectors outside the standard library to
  the `db_connector` package.
- Refactor the pooling implementation and macro expectations to use the
  `withConnection` name instead of `withConn`. This change allows a
  caller to use a [waterpark](https://github.com/guzba/waterpark) pool
  instance instead of the builtin pool instance. Waterpark provides
  better support for multi-threaded environments. The builtin pooling
  mechanism may be deprecated in favor of waterpark in the future.
- Add the `getModelIfItExists` generated proc to the list of standard
  procs we generate. This is a flavour of `getModel` that returns an
  `Option` instead of raising an exception when there is no model for
  the given id.
- Change `PaginationParams#orderBy` to accept a `seq[string]` to allow
  for sorting on multiple fields.
2025-01-03 07:55:05 -06:00
fb74d84cb7 Map names to db ident names for columns passed for ordering in paginated queries. 2023-08-09 09:16:10 -05:00
fbd20de71f Add createOrUpdateRecord and record method generators.
`createOrUpdateRecord` implements upsert: update an existing record if
it exists or create a new record if not. A new error `DbUpdateError` was
added to be raised when an existing record does exist but was not able
to be updated.
2023-08-09 09:13:12 -05:00
540d0d2f67 Fix missing import in pooling implementation. 2023-02-04 19:04:50 -06:00
8 changed files with 416 additions and 227 deletions

2
.gitignore vendored
View File

@@ -1,2 +1,4 @@
*.sw? *.sw?
nimcache/ nimcache/
nimble.develop
nimble.paths

View File

@@ -57,7 +57,7 @@ Models may be defined as:
.. code-block:: Nim .. code-block:: Nim
# models.nim # models.nim
import std/options, std/times import std/[options, times]
import uuids import uuids
type type
@@ -82,6 +82,8 @@ Using Fiber ORM we can generate a data access layer with:
.. code-block:: Nim .. code-block:: Nim
# db.nim # db.nim
import std/[options]
import db_connectors/db_postgres
import fiber_orm import fiber_orm
import ./models.nim import ./models.nim
@@ -102,6 +104,7 @@ This will generate the following procedures:
.. code-block:: Nim .. code-block:: Nim
proc getTodoItem*(db: TodoDB, id: UUID): TodoItem; proc getTodoItem*(db: TodoDB, id: UUID): TodoItem;
proc getTodoItemIfItExists*(db: TodoDB, id: UUID): Option[TodoItem];
proc getAllTodoItems*(db: TodoDB): seq[TodoItem]; proc getAllTodoItems*(db: TodoDB): seq[TodoItem];
proc createTodoItem*(db: TodoDB, rec: TodoItem): TodoItem; proc createTodoItem*(db: TodoDB, rec: TodoItem): TodoItem;
proc updateTodoItem*(db: TodoDB, rec: TodoItem): bool; proc updateTodoItem*(db: TodoDB, rec: TodoItem): bool;
@@ -112,6 +115,7 @@ This will generate the following procedures:
values: varargs[string, dbFormat]): seq[TodoItem]; values: varargs[string, dbFormat]): seq[TodoItem];
proc getTimeEntry*(db: TodoDB, id: UUID): TimeEntry; proc getTimeEntry*(db: TodoDB, id: UUID): TimeEntry;
proc getTimeEntryIfItExists*(db: TodoDB, id: UUID): Option[TimeEntry];
proc getAllTimeEntries*(db: TodoDB): seq[TimeEntry]; proc getAllTimeEntries*(db: TodoDB): seq[TimeEntry];
proc createTimeEntry*(db: TodoDB, rec: TimeEntry): TimeEntry; proc createTimeEntry*(db: TodoDB, rec: TimeEntry): TimeEntry;
proc updateTimeEntry*(db: TodoDB, rec: TimeEntry): bool; proc updateTimeEntry*(db: TodoDB, rec: TimeEntry): bool;

View File

@@ -1,6 +1,6 @@
# Package # Package
version = "2.1.0" version = "4.0.0"
author = "Jonathan Bernard" author = "Jonathan Bernard"
description = "Lightweight Postgres ORM for Nim." description = "Lightweight Postgres ORM for Nim."
license = "GPL-3.0" license = "GPL-3.0"
@@ -11,4 +11,4 @@ srcDir = "src"
# Dependencies # Dependencies
requires @["nim >= 1.4.0", "uuids"] requires @["nim >= 1.4.0", "uuids"]
requires "https://git.jdb-software.com/jdb/nim-namespaced-logging.git" requires "namespaced_logging >= 2.0.2"

View File

@@ -1,6 +1,6 @@
# Fiber ORM # Fiber ORM
# #
# Copyright 2019 Jonathan Bernard <jonathan@jdbernard.com> # Copyright 2019-2024 Jonathan Bernard <jonathan@jdbernard.com>
## Lightweight ORM supporting the `Postgres`_ and `SQLite`_ databases in Nim. ## Lightweight ORM supporting the `Postgres`_ and `SQLite`_ databases in Nim.
## It supports a simple, opinionated model mapper to generate SQL queries based ## It supports a simple, opinionated model mapper to generate SQL queries based
@@ -107,6 +107,7 @@
## ##
## proc createTodoItem*(db: TodoDB, rec: TodoItem): TodoItem; ## proc createTodoItem*(db: TodoDB, rec: TodoItem): TodoItem;
## proc updateTodoItem*(db: TodoDB, rec: TodoItem): bool; ## proc updateTodoItem*(db: TodoDB, rec: TodoItem): bool;
## proc createOrUpdateTodoItem*(db: TodoDB, rec: TodoItem): bool;
## proc deleteTodoItem*(db: TodoDB, rec: TodoItem): bool; ## proc deleteTodoItem*(db: TodoDB, rec: TodoItem): bool;
## proc deleteTodoItem*(db: TodoDB, id: UUID): bool; ## proc deleteTodoItem*(db: TodoDB, id: UUID): bool;
## ##
@@ -263,64 +264,51 @@
## In the example above the `pool.DbConnPool`_ object is used as database ## In the example above the `pool.DbConnPool`_ object is used as database
## object type (aliased as `TodoDB`). This is the intended usage pattern, but ## object type (aliased as `TodoDB`). This is the intended usage pattern, but
## anything can be passed as the database object type so long as there is a ## anything can be passed as the database object type so long as there is a
## defined `withConn` template that provides an injected `conn: DbConn` object ## defined `withConnection` template that provides a `conn: DbConn` object
## to the provided statement body. ## to the provided statement body.
## ##
## For example, a valid database object implementation that opens a new ## For example, a valid database object implementation that opens a new
## connection for every request might look like this: ## connection for every request might look like this:
## ##
## .. code-block:: Nim ## .. code-block:: Nim
## import std/db_postgres ## import db_connector/db_postgres
## ##
## type TodoDB* = object ## type TodoDB* = object
## connString: string ## connString: string
## ##
## template withConn*(db: TodoDB, stmt: untyped): untyped = ## template withConnection*(db: TodoDB, stmt: untyped): untyped =
## let conn {.inject.} = open("", "", "", db.connString) ## block:
## try: stmt ## let conn = open("", "", "", db.connString)
## finally: close(conn) ## try: stmt
## finally: close(conn)
## ##
## .. _pool.DbConnPool: fiber_orm/pool.html#DbConnPool ## .. _pool.DbConnPool: fiber_orm/pool.html#DbConnPool
## ##
import std/[db_common, logging, macros, options, sequtils, strutils] import std/[json, macros, options, sequtils, strutils]
import namespaced_logging, uuids import db_connector/db_common
import uuids
from std/unicode import capitalize from std/unicode import capitalize
import ./fiber_orm/db_common as fiber_db_common import ./fiber_orm/db_common as fiber_db_common
import ./fiber_orm/pool import ./fiber_orm/[pool, util]
import ./fiber_orm/util import ./fiber_orm/private/logging
export export pool, util
pool, export logging.enableDbLogging
util.columnNamesForModel,
util.dbFormat,
util.dbNameToIdent,
util.identNameToDb,
util.modelName,
util.rowToModel,
util.tableName
type type
PaginationParams* = object
pageSize*: int
offset*: int
orderBy*: Option[string]
PagedRecords*[T] = object PagedRecords*[T] = object
pagination*: Option[PaginationParams] pagination*: Option[PaginationParams]
records*: seq[T] records*: seq[T]
totalRecords*: int totalRecords*: int
NotFoundError* = object of CatchableError ##\ DbUpdateError* = object of CatchableError
## Error types raised when a DB modification fails.
NotFoundError* = object of CatchableError
## Error type raised when no record matches a given ID ## Error type raised when no record matches a given ID
var logNs {.threadvar.}: LoggingNamespace
template log(): untyped =
if logNs.isNil: logNs = getLoggerForNamespace(namespace = "fiber_orm", level = lvlNotice)
logNs
proc newMutateClauses(): MutateClauses = proc newMutateClauses(): MutateClauses =
return MutateClauses( return MutateClauses(
columns: @[], columns: @[],
@@ -345,7 +333,9 @@ proc createRecord*[D: DbConnType, T](db: D, rec: T): T =
" VALUES (" & mc.placeholders.join(",") & ") " & " VALUES (" & mc.placeholders.join(",") & ") " &
" RETURNING " & columnNamesForModel(rec).join(",") " RETURNING " & columnNamesForModel(rec).join(",")
log().debug "createRecord: [" & sqlStmt & "]" logQuery("createRecord", sqlStmt)
debug(getLogger("query"), %*{ "values": mc.values })
let newRow = db.getRow(sql(sqlStmt), mc.values) let newRow = db.getRow(sql(sqlStmt), mc.values)
result = rowToModel(T, newRow) result = rowToModel(T, newRow)
@@ -361,15 +351,33 @@ proc updateRecord*[D: DbConnType, T](db: D, rec: T): bool =
" SET " & setClause & " SET " & setClause &
" WHERE id = ? " " WHERE id = ? "
log().debug "updateRecord: [" & sqlStmt & "] id: " & $rec.id logQuery("updateRecord", sqlStmt, [("id", $rec.id)])
let numRowsUpdated = db.execAffectedRows(sql(sqlStmt), mc.values.concat(@[$rec.id])) let numRowsUpdated = db.execAffectedRows(sql(sqlStmt), mc.values.concat(@[$rec.id]))
return numRowsUpdated > 0; return numRowsUpdated > 0;
proc createOrUpdateRecord*[D: DbConnType, T](db: D, rec: T): T =
## Create or update a record. `rec` is expected to be a `model class`_. If
## the `id` field is unset, or if there is no existing record with the given
## id, a new record is inserted. Otherwise, the existing record is updated.
##
## Note that this does not perform partial updates, all fields are updated.
let findRecordStmt = "SELECT id FROM " & tableName(rec) & " WHERE id = ?"
logQuery("createOrUpdateRecord", findRecordStmt, [("id", $rec.id)])
let rows = db.getAllRows(sql(findRecordStmt), [$rec.id])
if rows.len == 0: result = createRecord(db, rec)
else:
result = rec
if not updateRecord(db, rec):
raise newException(DbUpdateError,
"unable to update " & modelName(rec) & " for id " & $rec.id)
template deleteRecord*[D: DbConnType](db: D, modelType: type, id: typed): untyped = template deleteRecord*[D: DbConnType](db: D, modelType: type, id: typed): untyped =
## Delete a record by id. ## Delete a record by id.
let sqlStmt = "DELETE FROM " & tableName(modelType) & " WHERE id = ?" let sqlStmt = "DELETE FROM " & tableName(modelType) & " WHERE id = ?"
log().debug "deleteRecord: [" & sqlStmt & "] id: " & $id logQuery("deleteRecord", sqlStmt, [("id", $id)])
db.tryExec(sql(sqlStmt), $id) db.tryExec(sql(sqlStmt), $id)
proc deleteRecord*[D: DbConnType, T](db: D, rec: T): bool = proc deleteRecord*[D: DbConnType, T](db: D, rec: T): bool =
@@ -377,7 +385,7 @@ proc deleteRecord*[D: DbConnType, T](db: D, rec: T): bool =
## ##
## .. _id: #model-class-id-field ## .. _id: #model-class-id-field
let sqlStmt = "DELETE FROM " & tableName(rec) & " WHERE id = ?" let sqlStmt = "DELETE FROM " & tableName(rec) & " WHERE id = ?"
log().debug "deleteRecord: [" & sqlStmt & "] id: " & $rec.id logQuery("deleteRecord", sqlStmt, [("id", $rec.id)])
return db.tryExec(sql(sqlStmt), $rec.id) return db.tryExec(sql(sqlStmt), $rec.id)
template getRecord*[D: DbConnType](db: D, modelType: type, id: typed): untyped = template getRecord*[D: DbConnType](db: D, modelType: type, id: typed): untyped =
@@ -387,7 +395,7 @@ template getRecord*[D: DbConnType](db: D, modelType: type, id: typed): untyped =
" FROM " & tableName(modelType) & " FROM " & tableName(modelType) &
" WHERE id = ?" " WHERE id = ?"
log().debug "getRecord: [" & sqlStmt & "] id: " & $id logQuery("getRecord", sqlStmt, [("id", $id)])
let row = db.getRow(sql(sqlStmt), @[$id]) let row = db.getRow(sql(sqlStmt), @[$id])
if allIt(row, it.len == 0): if allIt(row, it.len == 0):
@@ -413,17 +421,9 @@ template findRecordsWhere*[D: DbConnType](
"SELECT COUNT(*) FROM " & tableName(modelType) & "SELECT COUNT(*) FROM " & tableName(modelType) &
" WHERE " & whereClause " WHERE " & whereClause
if page.isSome: if page.isSome: fetchStmt &= getPagingClause(page.get)
let p = page.get
if p.orderBy.isSome:
fetchStmt &= " ORDER BY " & p.orderBy.get
else:
fetchStmt &= " ORDER BY id"
fetchStmt &= " LIMIT " & $p.pageSize & logQuery("findRecordsWhere", fetchStmt, [("values", values.join(", "))])
" OFFSET " & $p.offset
log().debug "findRecordsWhere: [" & fetchStmt & "] values: (" & values.join(", ") & ")"
let records = db.getAllRows(sql(fetchStmt), values).mapIt(rowToModel(modelType, it)) let records = db.getAllRows(sql(fetchStmt), values).mapIt(rowToModel(modelType, it))
PagedRecords[modelType]( PagedRecords[modelType](
@@ -444,17 +444,9 @@ template getAllRecords*[D: DbConnType](
var countStmt = "SELECT COUNT(*) FROM " & tableName(modelType) var countStmt = "SELECT COUNT(*) FROM " & tableName(modelType)
if page.isSome: if page.isSome: fetchStmt &= getPagingClause(page.get)
let p = page.get
if p.orderBy.isSome:
fetchStmt &= " ORDER BY " & p.orderBy.get
else:
fetchStmt &= " ORDER BY id"
fetchStmt &= " LIMIT " & $p.pageSize & logQuery("getAllRecords", fetchStmt)
" OFFSET " & $p.offset
log().debug "getAllRecords: [" & fetchStmt & "]"
let records = db.getAllRows(sql(fetchStmt)).mapIt(rowToModel(modelType, it)) let records = db.getAllRows(sql(fetchStmt)).mapIt(rowToModel(modelType, it))
PagedRecords[modelType]( PagedRecords[modelType](
@@ -483,17 +475,9 @@ template findRecordsBy*[D: DbConnType](
"SELECT COUNT(*) FROM " & tableName(modelType) & "SELECT COUNT(*) FROM " & tableName(modelType) &
" WHERE " & whereClause " WHERE " & whereClause
if page.isSome: if page.isSome: fetchStmt &= getPagingClause(page.get)
let p = page.get
if p.orderBy.isSome:
fetchStmt &= " ORDER BY " & p.orderBy.get
else:
fetchStmt &= " ORDER BY id"
fetchStmt &= " LIMIT " & $p.pageSize & logQuery("findRecordsBy", fetchStmt, [("values", values.join(", "))])
" OFFSET " & $p.offset
log().debug "findRecordsBy: [" & fetchStmt & "] values (" & values.join(", ") & ")"
let records = db.getAllRows(sql(fetchStmt), values).mapIt(rowToModel(modelType, it)) let records = db.getAllRows(sql(fetchStmt), values).mapIt(rowToModel(modelType, it))
PagedRecords[modelType]( PagedRecords[modelType](
@@ -504,6 +488,91 @@ template findRecordsBy*[D: DbConnType](
else: db.getRow(sql(countStmt), values)[0].parseInt) else: db.getRow(sql(countStmt), values)[0].parseInt)
template associate*[D: DbConnType, I, J](
db: D,
joinTableName: string,
rec1: I,
rec2: J): void =
## Associate two records via a join table.
let insertStmt =
"INSERT INTO " & joinTableName &
" (" & tableName(I) & "_id, " & tableName(J) & "_id) " &
" VALUES (?, ?)"
logQuery("associate", insertStmt, [("id1", $rec1.id), ("id2", $rec2.id)])
db.exec(sql(insertStmt), [$rec1.id, $rec2.id])
template findViaJoinTable*[D: DbConnType, L](
db: D,
joinTableName: string,
targetType: type,
rec: L,
page: Option[PaginationParams]): untyped =
## Find all records of `targetType` that are associated with `rec` via a
## join table.
let columns = columnNamesForModel(targetType).mapIt("t." & it).join(",")
var fetchStmt =
"SELECT " & columns &
" FROM " & tableName(targetType) & " AS t " &
" JOIN " & joinTableName & " AS j " &
" ON t.id = jt." & tableName(targetType) & "_id " &
" WHERE jt." & tableName(rec) & "_id = ?"
var countStmt =
"SELECT COUNT(*) FROM " & joinTableName &
" WHERE " & tableName(rec) & "_id = ?"
if page.isSome: fetchStmt &= getPagingClause(page.get)
logQuery("findViaJoinTable", fetchStmt, [("id", $rec.id)])
let records = db.getAllRows(sql(fetchStmt), $rec.id)
.mapIt(rowToModel(targetType, it))
PagedRecords[targetType](
pagination: page,
records: records,
totalRecords:
if page.isNone: records.len
else: db.getRow(sql(countStmt))[0].parseInt)
template findViaJoinTable*[D: DbConnType](
db: D,
joinTableName: string,
targetType: type,
lookupType: type,
id: typed,
page: Option[PaginationParams]): untyped =
## Find all records of `targetType` that are associated with a record of
## `lookupType` via a join table.
let columns = columnNamesForModel(targetType).mapIt("t." & it).join(",")
var fetchStmt =
"SELECT " & columns &
" FROM " & tableName(targetType) & " AS t " &
" JOIN " & joinTableName & " AS j " &
" ON t.id = jt." & tableName(targetType) & "_id " &
" WHERE jt." & tableName(lookupType) & "_id = ?"
var countStmt =
"SELECT COUNT(*) FROM " & joinTableName &
" WHERE " & tableName(lookupType) & "_id = ?"
if page.isSome: fetchStmt &= getPagingClause(page.get)
logQuery("findViaJoinTable", fetchStmt, [("id", $id)])
let records = db.getAllRows(sql(fetchStmt), $id)
.mapIt(rowToModel(targetType, it))
PagedRecords[targetType](
pagination: page,
records: records,
totalRecords:
if page.isNone: records.len
else: db.getRow(sql(countStmt))[0].parseInt)
macro generateProcsForModels*(dbType: type, modelTypes: openarray[type]): untyped = macro generateProcsForModels*(dbType: type, modelTypes: openarray[type]): untyped =
## Generate all standard access procedures for the given model types. For a ## Generate all standard access procedures for the given model types. For a
## `model class`_ named `TodoItem`, this will generate the following ## `model class`_ named `TodoItem`, this will generate the following
@@ -516,11 +585,12 @@ macro generateProcsForModels*(dbType: type, modelTypes: openarray[type]): untype
## proc deleteTodoItem*(db: TodoDB, rec: TodoItem): bool; ## proc deleteTodoItem*(db: TodoDB, rec: TodoItem): bool;
## proc deleteTodoItem*(db: TodoDB, id: idType): bool; ## proc deleteTodoItem*(db: TodoDB, id: idType): bool;
## proc updateTodoItem*(db: TodoDB, rec: TodoItem): bool; ## proc updateTodoItem*(db: TodoDB, rec: TodoItem): bool;
## proc createOrUpdateTodoItem*(db: TodoDB, rec: TodoItem): bool;
## ##
## proc findTodoItemsWhere*( ## proc findTodoItemsWhere*(
## db: TodoDB, whereClause: string, values: varargs[string]): TodoItem; ## db: TodoDB, whereClause: string, values: varargs[string]): TodoItem;
## ##
## `dbType` is expected to be some type that has a defined `withConn` ## `dbType` is expected to be some type that has a defined `withConnection`
## procedure (see `Database Object`_ for details). ## procedure (see `Database Object`_ for details).
## ##
## .. _Database Object: #database-object ## .. _Database Object: #database-object
@@ -533,38 +603,48 @@ macro generateProcsForModels*(dbType: type, modelTypes: openarray[type]): untype
let modelName = $(t.getType[1]) let modelName = $(t.getType[1])
let getName = ident("get" & modelName) let getName = ident("get" & modelName)
let getIfExistsName = ident("get" & modelName & "IfItExists")
let getAllName = ident("getAll" & pluralize(modelName)) let getAllName = ident("getAll" & pluralize(modelName))
let findWhereName = ident("find" & pluralize(modelName) & "Where") let findWhereName = ident("find" & pluralize(modelName) & "Where")
let createName = ident("create" & modelName) let createName = ident("create" & modelName)
let updateName = ident("update" & modelName) let updateName = ident("update" & modelName)
let createOrUpdateName = ident("createOrUpdate" & modelName)
let deleteName = ident("delete" & modelName) let deleteName = ident("delete" & modelName)
let idType = typeOfColumn(t, "id") let idType = typeOfColumn(t, "id")
result.add quote do: result.add quote do:
proc `getName`*(db: `dbType`, id: `idType`): `t` = proc `getName`*(db: `dbType`, id: `idType`): `t` =
db.withConn: result = getRecord(conn, `t`, id) db.withConnection conn: result = getRecord(conn, `t`, id)
proc `getIfExistsName`*(db: `dbType`, id: `idType`): Option[`t`] =
db.withConnection conn:
try: result = some(getRecord(conn, `t`, id))
except NotFoundError: result = none[`t`]()
proc `getAllName`*(db: `dbType`, pagination = none[PaginationParams]()): PagedRecords[`t`] = proc `getAllName`*(db: `dbType`, pagination = none[PaginationParams]()): PagedRecords[`t`] =
db.withConn: result = getAllRecords(conn, `t`, pagination) db.withConnection conn: result = getAllRecords(conn, `t`, pagination)
proc `findWhereName`*( proc `findWhereName`*(
db: `dbType`, db: `dbType`,
whereClause: string, whereClause: string,
values: varargs[string, dbFormat], values: varargs[string, dbFormat],
pagination = none[PaginationParams]()): PagedRecords[`t`] = pagination = none[PaginationParams]()): PagedRecords[`t`] =
db.withConn: db.withConnection conn:
result = findRecordsWhere(conn, `t`, whereClause, values, pagination) result = findRecordsWhere(conn, `t`, whereClause, values, pagination)
proc `createName`*(db: `dbType`, rec: `t`): `t` = proc `createName`*(db: `dbType`, rec: `t`): `t` =
db.withConn: result = createRecord(conn, rec) db.withConnection conn: result = createRecord(conn, rec)
proc `updateName`*(db: `dbType`, rec: `t`): bool = proc `updateName`*(db: `dbType`, rec: `t`): bool =
db.withConn: result = updateRecord(conn, rec) db.withConnection conn: result = updateRecord(conn, rec)
proc `createOrUpdateName`*(db: `dbType`, rec: `t`): `t` =
db.inTransaction: result = createOrUpdateRecord(conn, rec)
proc `deleteName`*(db: `dbType`, rec: `t`): bool = proc `deleteName`*(db: `dbType`, rec: `t`): bool =
db.withConn: result = deleteRecord(conn, rec) db.withConnection conn: result = deleteRecord(conn, rec)
proc `deleteName`*(db: `dbType`, id: `idType`): bool = proc `deleteName`*(db: `dbType`, id: `idType`): bool =
db.withConn: result = deleteRecord(conn, `t`, id) db.withConnection conn: result = deleteRecord(conn, `t`, id)
macro generateLookup*(dbType: type, modelType: type, fields: seq[string]): untyped = macro generateLookup*(dbType: type, modelType: type, fields: seq[string]): untyped =
## Create a lookup procedure for a given set of field names. For example, ## Create a lookup procedure for a given set of field names. For example,
@@ -584,7 +664,7 @@ macro generateLookup*(dbType: type, modelType: type, fields: seq[string]): untyp
# Create proc skeleton # Create proc skeleton
result = quote do: result = quote do:
proc `procName`*(db: `dbType`): PagedRecords[`modelType`] = proc `procName`*(db: `dbType`): PagedRecords[`modelType`] =
db.withConn: result = findRecordsBy(conn, `modelType`) db.withConnection conn: result = findRecordsBy(conn, `modelType`)
var callParams = quote do: @[] var callParams = quote do: @[]
@@ -608,11 +688,11 @@ macro generateLookup*(dbType: type, modelType: type, fields: seq[string]): untyp
# Add the call params to the inner procedure call # Add the call params to the inner procedure call
# result[6][0][1][0][1] is # result[6][0][1][0][1] is
# ProcDef -> [6]: StmtList (body) -> [0]: Call -> # ProcDef -> [6]: StmtList (body) -> [0]: Command ->
# [1]: StmtList (withConn body) -> [0]: Asgn (result =) -> # [2]: StmtList (withConnection body) -> [0]: Asgn (result =) ->
# [1]: Call (inner findRecords invocation) # [1]: Call (inner findRecords invocation)
result[6][0][1][0][1].add(callParams) result[6][0][2][0][1].add(callParams)
result[6][0][1][0][1].add(quote do: pagination) result[6][0][2][0][1].add(quote do: pagination)
macro generateProcsForFieldLookups*(dbType: type, modelsAndFields: openarray[tuple[t: type, fields: seq[string]]]): untyped = macro generateProcsForFieldLookups*(dbType: type, modelsAndFields: openarray[tuple[t: type, fields: seq[string]]]): untyped =
result = newStmtList() result = newStmtList()
@@ -626,7 +706,7 @@ macro generateProcsForFieldLookups*(dbType: type, modelsAndFields: openarray[tup
# Create proc skeleton # Create proc skeleton
let procDefAST = quote do: let procDefAST = quote do:
proc `procName`*(db: `dbType`): PagedRecords[`modelType`] = proc `procName`*(db: `dbType`): PagedRecords[`modelType`] =
db.withConn: result = findRecordsBy(conn, `modelType`) db.withConnection conn: result = findRecordsBy(conn, `modelType`)
var callParams = quote do: @[] var callParams = quote do: @[]
@@ -649,37 +729,105 @@ macro generateProcsForFieldLookups*(dbType: type, modelsAndFields: openarray[tup
result.add procDefAST result.add procDefAST
proc initPool*[D: DbConnType]( macro generateJoinTableProcs*(
connect: proc(): D, dbType, model1Type, model2Type: type,
poolSize = 10, joinTableName: string): untyped =
hardCap = false, ## Generate lookup procedures for a pair of models with a join table. For
healthCheckQuery = "SELECT 'true' AS alive"): DbConnPool[D] = ## example, given the TODO database demonstrated above, where `TodoItem` and
## `TimeEntry` have a many-to-many relationship, you might have a join table
## Initialize a new DbConnPool. See the `initDb` procedure in the `Example ## `todo_items_time_entries` with columns `todo_item_id` and `time_entry_id`.
## Fiber ORM Usage`_ for an example ## This macro will generate the following procedures:
## ##
## * `connect` must be a factory which creates a new `DbConn`. ## .. code-block:: Nim
## * `poolSize` sets the desired capacity of the connection pool. ## proc findTodoItemsByTimeEntry*(db: SampleDB, timeEntry: TimeEntry): seq[TodoItem]
## * `hardCap` defaults to `false`. ## proc findTimeEntriesByTodoItem*(db: SampleDB, todoItem: TodoItem): seq[TimeEntry]
## When `false`, the pool can grow beyond the configured capacity, but will
## release connections down to the its capacity (no less than `poolSize`).
## ##
## When `true` the pool will not create more than its configured capacity. ## `dbType` is expected to be some type that has a defined `withConnection`
## It a connection is requested, none are free, and the pool is at ## procedure (see `Database Object`_ for details).
## capacity, this will result in an Error being raised.
## * `healthCheckQuery` should be a simple and fast SQL query that the pool
## can use to test the liveliness of pooled connections.
## ##
## .. _Example Fiber ORM Usage: #basic-usage-example-fiber-orm-usage ## .. _Database Object: #database-object
result = newStmtList()
initDbConnPool(DbConnPoolConfig[D]( if model1Type.getType[1].typeKind == ntyRef or
connect: connect, model2Type.getType[1].typeKind == ntyRef:
poolSize: poolSize, raise newException(ValueError,
hardCap: hardCap, "fiber_orm model object must be objects, not refs")
healthCheckQuery: healthCheckQuery))
template inTransaction*[D: DbConnType](db: DbConnPool[D], body: untyped) = let model1Name = $(model1Type.getType[1])
pool.withConn(db): let model2Name = $(model2Type.getType[1])
let getModel1Name = ident("get" & pluralize(model1Name) & "By" & model2Name)
let getModel2Name = ident("get" & pluralize(model2Name) & "By" & model1Name)
let id1Type = typeOfColumn(model1Type, "id")
let id2Type = typeOfColumn(model2Type, "id")
let joinTableNameNode = newStrLitNode($joinTableName)
result.add quote do:
proc `getModel1Name`*(
db: `dbType`,
id: `id2Type`,
pagination = none[PaginationParams]()): PagedRecords[`model1Type`] =
db.withConnection conn:
result = findViaJoinTable(
conn,
`joinTableNameNode`,
`model1Type`,
`model2Type`,
id,
pagination)
proc `getModel1Name`*(
db: `dbType`,
rec: `model2Type`,
pagination = none[PaginationParams]()): PagedRecords[`model1Type`] =
db.withConnection conn:
result = findViaJoinTable(
conn,
`joinTableNameNode`,
`model1Type`,
rec,
pagination)
proc `getModel2Name`*(
db: `dbType`,
id: `id1Type`,
pagination = none[PaginationParams]()): Pagedrecords[`model2Type`] =
db.withConnection conn:
result = findViaJoinTable(
conn,
`joinTableNameNode`,
`model2Type`,
`model1Type`,
id,
pagination)
proc `getModel2Name`*(
db: `dbType`,
rec: `model1Type`,
pagination = none[PaginationParams]()): Pagedrecords[`model2Type`] =
db.withConnection conn:
result = findViaJoinTable(
conn,
`joinTableNameNode`,
`model2Type`,
rec,
pagination)
proc associate*(
db: `dbType`,
rec1: `model1Type`,
rec2: `model2Type`): void =
db.withConnection conn:
associate(conn, `joinTableNameNode`, rec1, rec2)
proc associate*(
db: `dbType`,
rec2: `model2Type`,
rec1: `model1Type`): void =
db.withConnection conn:
associate(conn, `joinTableNameNode`, rec1, rec2)
template inTransaction*(db, body: untyped) =
db.withConnection conn:
conn.exec(sql"BEGIN TRANSACTION") conn.exec(sql"BEGIN TRANSACTION")
try: try:
body body

View File

@@ -1,3 +1,3 @@
import std/[db_postgres, db_sqlite] import db_connector/[db_postgres, db_sqlite]
type DbConnType* = db_postgres.DbConn or db_sqlite.DbConn type DbConnType* = db_postgres.DbConn or db_sqlite.DbConn

View File

@@ -4,99 +4,77 @@
## Simple database connection pooling implementation compatible with Fiber ORM. ## Simple database connection pooling implementation compatible with Fiber ORM.
import std/[db_common, logging, sequtils, strutils, sugar] when (NimMajor, NimMinor, NimPatch) < (2, 0, 0):
when not defined(gcArc) and not defined (gcOrc):
{.error: "fiber_orm requires either --mm:arc or --mm:orc.".}
from std/db_sqlite import getRow import std/[deques, locks, sequtils, sugar]
from std/db_postgres import getRow import db_connector/db_common
from db_connector/db_sqlite import getRow, close
from db_connector/db_postgres import getRow, close
import namespaced_logging
import ./db_common as fiber_db_common import ./db_common as fiber_db_common
import ./private/logging
type type
DbConnPoolConfig*[D: DbConnType] = object DbConnPool*[D: DbConnType] = ptr DbConnPoolObj[D]
connect*: () -> D ## Factory procedure to create a new DBConn
poolSize*: int ## The pool capacity.
hardCap*: bool ## Is the pool capacity a hard cap? DbConnPoolObj[D: DbConnType] = object
##
## When `false`, the pool can grow beyond the
## configured capacity, but will release connections
## down to the its capacity (no less than `poolSize`).
##
## When `true` the pool will not create more than its
## configured capacity. It a connection is requested,
## none are free, and the pool is at capacity, this
## will result in an Error being raised.
healthCheckQuery*: string ## Should be a simple and fast SQL query that the
## pool can use to test the liveliness of pooled
## connections.
PooledDbConn[D: DbConnType] = ref object
conn: D
id: int
free: bool
DbConnPool*[D: DbConnType] = ref object
## Database connection pool ## Database connection pool
conns: seq[PooledDbConn[D]] connect: proc (): D {.raises: [DbError].}
cfg: DbConnPoolConfig[D] healthCheckQuery: SqlQuery
lastId: int entries: Deque[D]
cond: Cond
lock: Lock
var logNs {.threadvar.}: LoggingNamespace
template log(): untyped = proc close*[D: DbConnType](pool: DbConnPool[D]) =
if logNs.isNil: logNs = getLoggerForNamespace(namespace = "fiber_orm/pool", level = lvlNotice) ## Safely close all connections and release resources for the given pool.
logNs getLogger("pool").debug("closing connection pool")
withLock(pool.lock):
while pool.entries.len > 0: close(pool.entries.popFirst())
proc initDbConnPool*[D: DbConnType](cfg: DbConnPoolConfig[D]): DbConnPool[D] = deinitLock(pool.lock)
log().debug("Initializing new pool (size: " & $cfg.poolSize) deinitCond(pool.cond)
result = DbConnPool[D]( `=destroy`(pool[])
conns: @[], deallocShared(pool)
cfg: cfg)
proc newConn[D: DbConnType](pool: DbConnPool[D]): PooledDbConn[D] =
log().debug("Creating a new connection to add to the pool.")
pool.lastId += 1
let conn = pool.cfg.connect()
result = PooledDbConn[D](
conn: conn,
id: pool.lastId,
free: true)
pool.conns.add(result)
proc maintain[D: DbConnType](pool: DbConnPool[D]): void = proc newDbConnPool*[D: DbConnType](
log().debug("Maintaining pool. $# connections." % [$pool.conns.len]) poolSize: int,
pool.conns.keepIf(proc (pc: PooledDbConn[D]): bool = connectFunc: proc(): D {.raises: [DbError].},
if not pc.free: return true healthCheckQuery = "SELECT 1;"): DbConnPool[D] =
## Initialize a new DbConnPool. See the `initDb` procedure in the `Example
## Fiber ORM Usage`_ for an example
##
## * `connect` must be a factory which creates a new `DbConn`.
## * `poolSize` sets the desired capacity of the connection pool.
## * `healthCheckQuery` should be a simple and fast SQL query that the pool
## can use to test the liveliness of pooled connections. By default it uses
## `SELECT 1;`
##
## .. _Example Fiber ORM Usage: ../fiber_orm.html#basic-usage-example-fiber-orm-usage
try: result = cast[DbConnPool[D]](allocShared0(sizeof(DbConnPoolObj[D])))
discard getRow(pc.conn, sql(pool.cfg.healthCheckQuery), []) initCond(result.cond)
return true initLock(result.lock)
except: result.entries = initDeque[D](poolSize)
try: pc.conn.close() # try to close the connection result.connect = connectFunc
except: discard "" result.healthCheckQuery = sql(healthCheckQuery)
return false
)
log().debug(
"Pruned dead connections. $# connections remaining." %
[$pool.conns.len])
let freeConns = pool.conns.filterIt(it.free) try:
if pool.conns.len > pool.cfg.poolSize and freeConns.len > 0: for _ in 0 ..< poolSize: result.entries.addLast(connectFunc())
let numToCull = min(freeConns.len, pool.conns.len - pool.cfg.poolSize) except DbError as ex:
try: result.close()
except: discard
getLogger("pool").error(
msg = "unable to initialize connection pool",
err = ex)
raise ex
if numToCull > 0:
let toCull = freeConns[0..numToCull]
pool.conns.keepIf((pc) => toCull.allIt(it.id != pc.id))
for culled in toCull:
try: culled.conn.close()
except: discard ""
log().debug(
"Trimming pool size. Culled $# free connections. $# connections remaining." %
[$toCull.len, $pool.conns.len])
proc take*[D: DbConnType](pool: DbConnPool[D]): tuple[id: int, conn: D] = proc take*[D: DbConnType](pool: DbConnPool[D]): D {.raises: [DbError], gcsafe.} =
## Request a connection from the pool. Returns a DbConn if the pool has free ## Request a connection from the pool. Returns a DbConn if the pool has free
## connections, or if it has the capacity to create a new connection. If the ## connections, or if it has the capacity to create a new connection. If the
## pool is configured with a hard capacity limit and is out of free ## pool is configured with a hard capacity limit and is out of free
@@ -104,32 +82,33 @@ proc take*[D: DbConnType](pool: DbConnPool[D]): tuple[id: int, conn: D] =
## ##
## Connections taken must be returned via `release` when the caller is ## Connections taken must be returned via `release` when the caller is
## finished using them in order for them to be released back to the pool. ## finished using them in order for them to be released back to the pool.
pool.maintain withLock(pool.lock):
let freeConns = pool.conns.filterIt(it.free) while pool.entries.len == 0: wait(pool.cond, pool.lock)
result = pool.entries.popFirst()
log().debug( # check that the connection is healthy
"Providing a new connection ($# currently free)." % [$freeConns.len]) try: discard getRow(result, pool.healthCheckQuery, [])
except DbError:
{.gcsafe.}:
# if it's not, let's try to close it and create a new connection
try:
getLogger("pool").info(
"pooled connection failed health check, opening a new connection")
close(result)
except: discard
result = pool.connect()
let reserved =
if freeConns.len > 0: freeConns[0]
else: pool.newConn()
reserved.free = false proc release*[D: DbConnType](pool: DbConnPool[D], conn: D) {.raises: [], gcsafe.} =
log().debug("Reserve connection $#" % [$reserved.id])
return (id: reserved.id, conn: reserved.conn)
proc release*[D: DbConnType](pool: DbConnPool[D], connId: int): void =
## Release a connection back to the pool. ## Release a connection back to the pool.
log().debug("Reclaiming released connaction $#" % [$connId]) withLock(pool.lock):
let foundConn = pool.conns.filterIt(it.id == connId) pool.entries.addLast(conn)
if foundConn.len > 0: foundConn[0].free = true signal(pool.cond)
template withConn*[D: DbConnType](pool: DbConnPool[D], stmt: untyped): untyped = template withConnection*[D: DbConnType](pool: DbConnPool[D], conn, stmt: untyped): untyped =
## Convenience template to provide a connection from the pool for use in a ## Convenience template to provide a connection from the pool for use in a
## statement block, automatically releasing that connnection when done. ## statement block, automatically releasing that connnection when done.
## block:
## The provided connection is injected as the variable `conn` in the let conn = take(pool)
## statement body. try: stmt
let (connId, conn {.inject.}) = take(pool) finally: release(pool, conn)
try: stmt
finally: release(pool, connId)

View File

@@ -0,0 +1,34 @@
import std/[json, options]
import namespaced_logging
export namespaced_logging.log
export namespaced_logging.debug
export namespaced_logging.info
export namespaced_logging.notice
export namespaced_logging.warn
export namespaced_logging.error
export namespaced_logging.fatal
var logService {.threadvar.}: Option[ThreadLocalLogService]
var logger {.threadvar.}: Option[Logger]
proc makeQueryLogEntry(
m: string,
sql: string,
args: openArray[(string, string)] = []): JsonNode =
result = %*{ "method": m, "sql": sql }
for (k, v) in args: result[k] = %v
proc logQuery*(methodName: string, sqlStmt: string, args: openArray[(string, string)] = []) =
# namespaced_logging would do this check for us, but we don't want to even
# build the log object if we're not actually logging
if logService.isNone: return
if logger.isNone: logger = logService.getLogger("fiber_orm/query")
logger.debug(makeQueryLogEntry(methodName, sqlStmt, args))
proc enableDbLogging*(svc: ThreadLocalLogService) =
logService = some(svc)
proc getLogger*(scope: string): Option[Logger] =
logService.getLogger("fiber_orm/" & scope)

View File

@@ -9,6 +9,11 @@ import uuids
import std/nre except toSeq import std/nre except toSeq
type type
PaginationParams* = object
pageSize*: int
offset*: int
orderBy*: Option[seq[string]]
MutateClauses* = object MutateClauses* = object
## Data structure to hold information about the clauses that should be ## Data structure to hold information about the clauses that should be
## added to a query. How these clauses are used will depend on the query. ## added to a query. How these clauses are used will depend on the query.
@@ -22,9 +27,11 @@ const ISO_8601_FORMATS = @[
"yyyy-MM-dd'T'HH:mm:ssz", "yyyy-MM-dd'T'HH:mm:ssz",
"yyyy-MM-dd'T'HH:mm:sszzz", "yyyy-MM-dd'T'HH:mm:sszzz",
"yyyy-MM-dd'T'HH:mm:ss'.'fffzzz", "yyyy-MM-dd'T'HH:mm:ss'.'fffzzz",
"yyyy-MM-dd'T'HH:mm:ss'.'ffffzzz",
"yyyy-MM-dd HH:mm:ssz", "yyyy-MM-dd HH:mm:ssz",
"yyyy-MM-dd HH:mm:sszzz", "yyyy-MM-dd HH:mm:sszzz",
"yyyy-MM-dd HH:mm:ss'.'fffzzz" "yyyy-MM-dd HH:mm:ss'.'fffzzz",
"yyyy-MM-dd HH:mm:ss'.'ffffzzz"
] ]
proc parseIso8601(val: string): DateTime = proc parseIso8601(val: string): DateTime =
@@ -102,7 +109,7 @@ proc dbFormat*[T](list: seq[T]): string =
proc dbFormat*[T](item: T): string = proc dbFormat*[T](item: T): string =
## For all other types, fall back on a defined `$` function to create a ## For all other types, fall back on a defined `$` function to create a
## string version of the value we can include in an SQL query> ## string version of the value we can include in an SQL query.
return $item return $item
type DbArrayParseState = enum type DbArrayParseState = enum
@@ -126,18 +133,20 @@ proc parsePGDatetime*(val: string): DateTime =
var correctedVal = val; var correctedVal = val;
# PostgreSQL will truncate any trailing 0's in the millisecond value leading # The Nim `times#format` function only recognizes 3-digit millisecond values
# to values like `2020-01-01 16:42.3+00`. This cannot currently be parsed by # but PostgreSQL will sometimes send 1-2 digits, truncating any trailing 0's,
# the standard times format as it expects exactly three digits for # or sometimes provide more than three digits of preceision in the millisecond value leading
# millisecond values. So we have to detect this and pad out the millisecond # to values like `2020-01-01 16:42.3+00` or `2025-01-06 00:56:00.9007+00`.
# value to 3 digits. # This cannot currently be parsed by the standard times format as it expects
let PG_PARTIAL_FORMAT_REGEX = re"(\d{4}-\d{2}-\d{2}( |'T')\d{2}:\d{2}:\d{2}\.)(\d{1,2})(\S+)?" # exactly three digits for millisecond values. So we have to detect this and
# coerce the millisecond value to exactly 3 digits.
let PG_PARTIAL_FORMAT_REGEX = re"(\d{4}-\d{2}-\d{2}( |'T')\d{2}:\d{2}:\d{2}\.)(\d+)(\S+)?"
let match = val.match(PG_PARTIAL_FORMAT_REGEX) let match = val.match(PG_PARTIAL_FORMAT_REGEX)
if match.isSome: if match.isSome:
let c = match.get.captures let c = match.get.captures
if c.toSeq.len == 2: correctedVal = c[0] & alignLeft(c[2], 3, '0') if c.toSeq.len == 2: correctedVal = c[0] & alignLeft(c[2], 3, '0')[0..2]
else: correctedVal = c[0] & alignLeft(c[2], 3, '0') & c[3] else: correctedVal = c[0] & alignLeft(c[2], 3, '0')[0..2] & c[3]
var errStr = "" var errStr = ""
@@ -146,7 +155,7 @@ proc parsePGDatetime*(val: string): DateTime =
try: return correctedVal.parse(df) try: return correctedVal.parse(df)
except: errStr &= "\n\t" & getCurrentExceptionMsg() except: errStr &= "\n\t" & getCurrentExceptionMsg()
raise newException(ValueError, "Cannot parse PG date. Tried:" & errStr) raise newException(ValueError, "Cannot parse PG date '" & correctedVal & "'. Tried:" & errStr)
proc parseDbArray*(val: string): seq[string] = proc parseDbArray*(val: string): seq[string] =
## Parse a Postgres array column into a Nim seq[string] ## Parse a Postgres array column into a Nim seq[string]
@@ -208,7 +217,7 @@ proc parseDbArray*(val: string): seq[string] =
result.add(curStr) result.add(curStr)
func createParseStmt*(t, value: NimNode): NimNode = func createParseStmt*(t, value: NimNode): NimNode =
## Utility method to create the Nim cod required to parse a value coming from ## Utility method to create the Nim code required to parse a value coming from
## the a database query. This is used by functions like `rowToModel` to parse ## the a database query. This is used by functions like `rowToModel` to parse
## the dataabase columns into the Nim object fields. ## the dataabase columns into the Nim object fields.
@@ -231,7 +240,7 @@ func createParseStmt*(t, value: NimNode): NimNode =
elif t.getType == DateTime.getType: elif t.getType == DateTime.getType:
result = quote do: parsePGDatetime(`value`) result = quote do: parsePGDatetime(`value`)
else: error "Unknown value object type: " & $t.getTypeInst else: error "Cannot parse column with unknown object type: " & $t.getTypeInst
elif t.typeKind == ntyGenericInst: elif t.typeKind == ntyGenericInst:
@@ -245,7 +254,7 @@ func createParseStmt*(t, value: NimNode): NimNode =
if `value`.len == 0: none[`innerType`]() if `value`.len == 0: none[`innerType`]()
else: some(`parseStmt`) else: some(`parseStmt`)
else: error "Unknown generic instance type: " & $t.getTypeInst else: error "Cannot parse column with unknown generic instance type: " & $t.getTypeInst
elif t.typeKind == ntyRef: elif t.typeKind == ntyRef:
@@ -253,7 +262,7 @@ func createParseStmt*(t, value: NimNode): NimNode =
result = quote do: parseJson(`value`) result = quote do: parseJson(`value`)
else: else:
error "Unknown ref type: " & $t.getTypeInst error "Cannot parse column with unknown ref type: " & $t.getTypeInst
elif t.typeKind == ntySequence: elif t.typeKind == ntySequence:
let innerType = t[1] let innerType = t[1]
@@ -272,14 +281,14 @@ func createParseStmt*(t, value: NimNode): NimNode =
result = quote do: parseFloat(`value`) result = quote do: parseFloat(`value`)
elif t.typeKind == ntyBool: elif t.typeKind == ntyBool:
result = quote do: "true".startsWith(`value`.toLower) result = quote do: "true".startsWith(`value`.toLower) or `value` == "1"
elif t.typeKind == ntyEnum: elif t.typeKind == ntyEnum:
let innerType = t.getTypeInst let innerType = t.getTypeInst
result = quote do: parseEnum[`innerType`](`value`) result = quote do: parseEnum[`innerType`](`value`)
else: else:
error "Unknown value type: " & $t.typeKind error "Cannot parse column with unknown value type: " & $t.typeKind
func fields(t: NimNode): seq[tuple[fieldIdent: NimNode, fieldType: NimNode]] = func fields(t: NimNode): seq[tuple[fieldIdent: NimNode, fieldType: NimNode]] =
#[ #[
@@ -447,6 +456,19 @@ macro populateMutateClauses*(t: typed, newRecord: bool, mc: var MutateClauses):
`mc`.placeholders.add("?") `mc`.placeholders.add("?")
`mc`.values.add(dbFormat(`t`.`fieldIdent`)) `mc`.values.add(dbFormat(`t`.`fieldIdent`))
proc getPagingClause*(page: PaginationParams): string =
## Given a `PaginationParams` object, return the SQL clause necessary to
## limit the number of records returned by a query.
result = ""
if page.orderBy.isSome:
let orderByClause = page.orderBy.get.map(identNameToDb).join(",")
result &= " ORDER BY " & orderByClause
else:
result &= " ORDER BY id"
result &= " LIMIT " & $page.pageSize & " OFFSET " & $page.offset
## .. _model class: ../fiber_orm.html#objectminusrelational-modeling-model-class ## .. _model class: ../fiber_orm.html#objectminusrelational-modeling-model-class
## .. _rules for name mapping: ../fiber_orm.html ## .. _rules for name mapping: ../fiber_orm.html
## .. _table name: ../fiber_orm.html ## .. _table name: ../fiber_orm.html