Compare commits

..

No commits in common. "main" and "0.2.1" have entirely different histories.
main ... 0.2.1

9 changed files with 274 additions and 678 deletions

1
.gitignore vendored
View File

@ -2,4 +2,3 @@ nimcache/
*.sw?
build/
db_migrate
.gradle/

View File

@ -1,36 +1,4 @@
# DB Migrate
Nim DB Migrate
==============
Small tool(s) to manage database migrations in various languages.
## Usage
```
Usage:
db_migrate [options] create <migration-name>
db_migrate [options] up [<count>]
db_migrate [options] down [<count>]
db_migrate [options] init <schema-name>
db_migrate (-V | --version)
db_migrate (-h | --help)
Options:
-c --config <config-file> Use the given configuration file (defaults to
"database.properties").
-q --quiet Suppress log information.
-v --verbose Print detailed log information.
--very-verbose Print very detailed log information.
-V --version Print the tools version information.
-h --help Print this usage information.
```
## Database Config Format
The database config is formatted as JSON. The following keys are supported by
all of the implementations:
* `sqlDir` -- Directory to store SQL files.
The following keys are supported by the Nim implementation:
* `connectionString` --
Small tool to manage database migrations in Nim.

View File

@ -1,28 +0,0 @@
apply plugin: 'groovy'
apply plugin: 'application'
apply plugin: 'maven'
group = 'com.jdblabs'
version = '0.2.5'
mainClassName = 'com.jdblabs.dbmigrate.DbMigrate'
repositories {
mavenLocal()
mavenCentral()
maven { url "http://mvn.jdb-labs.com/repo" }
}
dependencies {
compile localGroovy()
compile 'ch.qos.logback:logback-classic:1.1.3'
compile 'ch.qos.logback:logback-core:1.1.3'
compile 'com.jdbernard:jdb-util:4.4'
compile 'com.offbytwo:docopt:0.6.0.20150202'
compile 'com.zaxxer:HikariCP:2.4.3'
testCompile 'junit:junit:4.12'
runtime 'com.h2database:h2:1.4.185'
runtime 'org.postgresql:postgresql:9.4.1207.jre7'
}

268
db_migrate.nim Normal file
View File

@ -0,0 +1,268 @@
## DB Migrate
## ==========
##
## Simple tool to manage database migrations.
import algorithm, json, times, os, strutils, docopt, db_postgres, sets,
sequtils, logging
type
DbMigrateConfig* = tuple[ driver, sqlDir, connectionString: string, logLevel: Level ]
proc createMigrationsTable(conn: DbConn): void =
conn.exec(sql("""
CREATE TABLE IF NOT EXISTS migrations (
id SERIAL PRIMARY KEY,
name VARCHAR NOT NULL,
run_at TIMESTAMP NOT NULL DEFAULT NOW())"""))
proc rollbackWithErr (pgConn: DbConn, errMsg: string): void =
pgConn.exec(sql"ROLLBACK")
dbError(errMsg)
proc loadConfig*(filename: string): DbMigrateConfig =
## Load DbMigrateConfig from a file.
let cfg = json.parseFile(filename)
var logLevel: Level
if cfg.hasKey("logLevel"):
let idx = find(LevelNames, cfg["logLevel"].getStr.toUpper)
logLevel = if idx == -1: lvlInfo else: (Level)(idx)
return (
driver: if cfg.hasKey("driver"): cfg["driver"].getStr else: "postres",
sqlDir: if cfg.hasKey("sqlDir"): cfg["sqlDir"].getStr else: "migrations",
connectionString: cfg["connectionString"].getStr,
logLevel: logLevel)
proc createMigration*(config: DbMigrateConfig, migrationName: string): seq[string] =
## Create a new set of database migration files.
let timestamp = getTime().getLocalTime().format("yyyyMMddHHmmss")
let filenamePrefix = timestamp & "-" & migrationName
let upFilename = joinPath(config.sqlDir, filenamePrefix & "-up.sql")
let downFilename = joinPath(config.sqlDir, filenamePrefix & "-down.sql")
let scriptDesc = migrationName & " (" & timestamp & ")"
let upFile = open(upFilename, fmWrite)
let downFile = open(downFilename, fmWrite)
upFile.writeLine "-- UP script for " & scriptDesc
downFile.writeLine "-- DOWN script for " & scriptDesc
upFile.close()
downFile.close()
return @[upFilename, downFilename]
proc diffMigrations*(pgConn: DbConn, config: DbMigrateConfig):
tuple[ run, notRun, missing: seq[string] ] =
# Query the database to find out what migrations have been run.
var migrationsRun = initSet[string]()
for row in pgConn.fastRows(sql"SELECT * FROM migrations ORDER BY name", @[]):
migrationsRun.incl(row[1])
# Inspect the filesystem to see what migrations are available.
var migrationsAvailable = initSet[string]()
for filePath in walkFiles(joinPath(config.sqlDir, "*.sql")):
var migrationName = filePath.extractFilename
migrationName.removeSuffix("-up.sql")
migrationName.removeSuffix("-down.sql")
migrationsAvailable.incl(migrationName)
# Diff with the list of migrations that we have in our migrations
# directory.
let migrationsInOrder =
toSeq(migrationsAvailable.items).sorted(system.cmp)
var migrationsNotRun = newSeq[string]()
var missingMigrations = newSeq[string]()
for migration in migrationsInOrder:
if not migrationsRun.contains(migration):
migrationsNotRun.add(migration)
# if we've already seen some migrations that have not been run, but this
# one has been, that means we have a gap and are missing migrations
elif migrationsNotRun.len > 0:
missingMigrations.add(migrationsNotRun)
migrationsNotRun = newSeq[string]()
return (run: toSeq(migrationsRun.items).sorted(system.cmp),
notRun: migrationsNotRun,
missing: missingMigrations)
proc readStatements*(filename: string): seq[SqlQuery] =
let migrationSql = filename.readFile
result = migrationSql.split(';').
filter(proc(st: string): bool = st.strip.len > 0 and not st.startsWith("--")).
map(proc(st: string): SqlQuery = sql(st & ";"))
proc up*(pgConn: DbConn, config: DbMigrateConfig, toRun: seq[string]): seq[string] =
var migrationsRun = newSeq[string]()
# Begin a transaction.
pgConn.exec(sql"BEGIN")
# Apply each of the migrations.
for migration in toRun:
info migration
let filename = joinPath(config.sqlDir, migration & "-up.sql")
if not filename.fileExists:
pgConn.rollbackWithErr "Can not find UP file for " & migration &
". Expected '" & filename & "'."
let statements = filename.readStatements
try:
for statement in statements: pgConn.exec(statement)
pgConn.exec(sql"INSERT INTO migrations (name) VALUES (?);", migration)
except DbError:
pgConn.rollbackWithErr "Migration '" & migration & "' failed:\n\t" &
getCurrentExceptionMsg()
migrationsRun.add(migration)
pgConn.exec(sql"COMMIT")
return migrationsRun
proc down*(pgConn: DbConn, config: DbMigrateConfig, migrationsToDown: seq[string]): seq[string] =
var migrationsDowned = newSeq[string]()
pgConn.exec(sql"BEGIN")
for migration in migrationsToDown:
info migration
let filename = joinPath(config.sqlDir, migration & "-down.sql")
if not filename.fileExists:
pgConn.rollbackWithErr "Can not find DOWN file for " & migration &
". Expected '" & filename & "'."
let statements = filename.readStatements
try:
for statement in statements: pgConn.exec(statement)
pgConn.exec(sql"DELETE FROM migrations WHERE name = ?;", migration)
except DbError:
pgConn.rollbackWithErr "Migration '" & migration & "' failed:\n\t" &
getCurrentExceptionMsg()
migrationsDowned.add(migration)
pgConn.exec(sql"COMMIT")
return migrationsDowned
when isMainModule:
let doc = """
Usage:
db_migrate [options] create <migration-name>
db_migrate [options] up [<count>]
db_migrate [options] down [<count>]
db_migrate [options] init <schema-name>
db_migrate (-V | --version)
Options:
-c --config <config-file> Use the given configuration file (defaults to
"database.json").
-q --quiet Suppress log information.
-v --verbose Print detailed log information.
--very-verbose Print very detailed log information.
-V --version Print the tools version information.
"""
# Parse arguments
let args = docopt(doc, version = "db-migrate 0.2.1")
let exitErr = proc(msg: string): void =
fatal("db_migrate: " & msg)
quit(QuitFailure)
# Load configuration file
let configFilename =
if args["--config"]: $args["<config-file>"]
else: "database.json"
var config: DbMigrateConfig
try:
config = loadConfig(configFilename)
except IOError:
exitErr "Cannot open config file: " & configFilename
except:
exitErr "Error parsing config file: " & configFilename & "\L\t" & getCurrentExceptionMsg()
logging.addHandler(newConsoleLogger())
if args["--quiet"]: logging.setLogFilter(lvlError)
elif args["--very-verbose"]: logging.setLogFilter(lvlAll)
elif args["--verbose"]: logging.setlogFilter(lvlDebug)
else: logging.setLogFilter(config.logLevel)
# Check for migrations directory
if not existsDir config.sqlDir:
try:
warn "SQL directory '" & config.sqlDir &
"' does not exist and will be created."
createDir config.sqlDir
except IOError:
exitErr "Unable to create directory: " & config.sqlDir & ":\L\T" & getCurrentExceptionMsg()
# Execute commands
if args["create"]:
try:
let filesCreated = createMigration(config, $args["<migration-name>"])
info "Created new migration files:"
for filename in filesCreated: info "\t" & filename
except IOError:
exitErr "Unable to create migration scripts: " & getCurrentExceptionMsg()
else:
let pgConn: DbConn =
try: open("", "", "", config.connectionString)
except DbError:
exitErr "Unable to connect to the database: " & getCurrentExceptionMsg()
(DbConn)(nil)
pgConn.createMigrationsTable
let (run, notRun, missing) = diffMigrations(pgConn, config)
# Make sure we have no gaps (database is in an unknown state)
if missing.len > 0:
exitErr "Database is in an inconsistent state. Migrations have been " &
"run that are not sequential."
if args["up"]:
try:
let count = if args["<count>"]: parseInt($args["<count>"]) else: high(int)
let toRun = if count < notRun.len: notRun[0..<count] else: notRun
let migrationsRun = pgConn.up(config, toRun)
if count < high(int): info "Went up " & $(migrationsRun.len) & "."
except DbError:
exitErr "Unable to migrate database: " & getCurrentExceptionMsg()
elif args["down"]:
try:
let count = if args["<count>"]: parseInt($args["<count>"]) else: 1
let toRun = if count < run.len: run.reversed[0..<count] else: run.reversed
let migrationsRun = pgConn.down(config, toRun)
info "Went down " & $(migrationsRun.len) & "."
except DbError:
exitErr "Unable to migrate database: " & getCurrentExceptionMsg()
elif args["init"]: discard
let newResults = diffMigrations(pgConn, config)
if newResults.notRun.len > 0:
info "Database is behind by " & $(newResults.notRun.len) & " migrations."
else: info "Database is up to date."

View File

@ -1,12 +1,12 @@
# Package
bin = @["db_migrate"]
version = "0.3.2"
version = "0.2.1"
author = "Jonathan Bernard"
description = "Simple tool to handle database migrations."
license = "BSD"
srcDir = "src/main/nim"
# Dependencies
requires: @["nim >= 2.0.0", "docopt >= 0.1.0", "db_connector"]
requires: @["nim >= 0.13.0", "docopt >= 0.1.0"]

View File

@ -1 +0,0 @@
rootProject.name = "db-migrate.groovy"

View File

@ -1,254 +0,0 @@
package com.jdblabs.dbmigrate
import groovy.sql.Sql
import ch.qos.logback.classic.Level
import ch.qos.logback.classic.Logger as LBLogger
import com.jdbernard.util.AnsiEscapeCodeSequence as ANSI
import com.zaxxer.hikari.HikariConfig
import com.zaxxer.hikari.HikariDataSource
import java.io.FilenameFilter
import java.text.SimpleDateFormat
import org.docopt.Docopt
import org.slf4j.Logger
import org.slf4j.LoggerFactory
public class DbMigrate {
public static final VERSION = "0.2.5"
public static final def DOC = """\
db-migrate.groovy v${VERSION}
Usage:
db_migrate [options] create <migration-name>
db_migrate [options] up [<count>]
db_migrate [options] down [<count>]
db_migrate [options] init <schema-name>
db_migrate (-V | --version)
db_migrate (-h | --help)
Options:
-c --config <config-file> Use the given configuration file (defaults to
"database.properties").
-q --quiet Suppress log information.
-v --verbose Print detailed log information.
--very-verbose Print very detailed log information.
-V --version Print the tools version information.
-h --help Print this usage information.
"""
private static sdf = new SimpleDateFormat('yyyyMMddHHmmss')
private static Logger LOGGER = LoggerFactory.getLogger(DbMigrate)
Sql sql
File sqlDir
public static void main(String[] args) {
// Parse arguments
def opts = new Docopt(DOC).withVersion("db-migrate.groovy v$VERSION").parse(args)
if (opts['--version']) {
println "db-migrate.groovy v$VERSION"
System.exit(0) }
if (opts['--help']) { println DOC; System.exit(0) }
// TODO: Setup logging & output levels
Logger clilog = LoggerFactory.getLogger("db-migrate.cli")
if (opts['--quiet']) {
((LBLogger) LOGGER).level = Level.ERROR
((LBLogger) LoggerFactory.getLogger(LBLogger.ROOT_LOGGER_NAME))
.level = Level.ERROR }
if (opts['--verbose']) {
((LBLogger) LOGGER).level = Level.DEBUG
((LBLogger) LoggerFactory.getLogger(LBLogger.ROOT_LOGGER_NAME))
.level = Level.INFO }
if (opts['--very-verbose']) {
((LBLogger) LOGGER).level = Level.TRACE
((LBLogger) LoggerFactory.getLogger(LBLogger.ROOT_LOGGER_NAME))
.level = Level.DEBUG }
// Load the configuration file
def givenCfg = new Properties()
File cfgFile
if (opts["--config"]) cfgFile = new File(opts["--config"])
if (!cfgFile || !cfgFile.exists() || !cfgFile.isFile())
cfgFile = new File("database.properties")
if (!cfgFile.exists() || !cfgFile.isFile()) {
clilog.warn("Config file '{}' does not exist or is not a regular file.",
cfgFile.canonicalPath) }
if (cfgFile.exists() && cfgFile.isFile()) {
try { cfgFile.withInputStream { givenCfg.load(it) } }
catch (Exception e) {
clilog.error("Could not read configuration file.", e)
givenCfg.clear() } }
// Check for migrations directory
File sqlDir = new File(givenCfg["sqlDir"] ?: 'migrations')
if (!sqlDir.exists() || !sqlDir.isDirectory()) {
clilog.error("'{}' does not exist or is not a directory.",
sqlDir.canonicalPath)
System.exit(1) }
// Instantiate the DbMigrate instance
DbMigrate dbmigrate = new DbMigrate(sqlDir: sqlDir)
// If we've only been asked to create a new migration, we don't need to
// setup the DB connection.
if (opts['create']) {
try {
List<File> files = dbmigrate.createMigration(opts['<migration-name>'])
clilog.info("Created new migration files:\n\t${files.name.join('\n\t')}")
return }
catch (Exception e) {
clilog.error('Unable to create migration scripts.', e)
System.exit(1) } }
// Create the datasource.
Properties dsProps = new Properties()
dsProps.putAll(givenCfg.findAll { it.key != 'sqlDir' })
HikariDataSource hds = new HikariDataSource(new HikariConfig(dsProps))
dbmigrate.sql = new Sql(hds)
// Execute the appropriate command.
if (opts['up']) dbmigrate.up(opts['<count>'])
else if (opts['down']) dbmigrate.down(opts['<count>'] ?: 1) }
public List<File> createMigration(String migrationName) {
String timestamp = sdf.format(new Date())
File upFile = new File(sqlDir, "$timestamp-$migrationName-up.sql")
File downFile = new File(sqlDir, "$timestamp-$migrationName-down.sql")
upFile.text = "-- UP script for $migrationName ($timestamp)"
downFile.text = "-- DOWN script for $migrationName ($timestamp)"
return [upFile, downFile] }
public def createMigrationsTable() {
LOGGER.trace('Checking for the existence of the migrations table and ' +
'creating it if it does not exist.')
sql.execute('''
CREATE TABLE IF NOT EXISTS migrations (
id SERIAL PRIMARY KEY,
name VARCHAR NOT NULL,
run_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW())''') }
public def diffMigrations() {
def results = [notRun: [], missing: []]
LOGGER.trace('Diffing migrations...')
results.run = sql.rows('SELECT name FROM migrations ORDER BY name')
.collect { it.name }.sort()
SortedSet<String> available = new TreeSet<>()
available.addAll(sqlDir
.listFiles({ d, n -> n ==~ /.+-(up|down).sql$/ } as FilenameFilter)
.collect { f -> f.name.replaceAll(/-(up|down).sql$/, '') })
available.each { migrationName ->
if (!results.run.contains(migrationName))
results.notRun << migrationName
// If we've already seen some migrations that have not been run but this
// one has been run, that means we have a gap and are missing migrations.
else if (results.notRun.size() > 0) {
results.missing += reults.notRun
results.notRun = [] } }
LOGGER.trace('Migrations diff:\n\trun: {}\n\tnot run: {}\n\tmissing: {}',
results.run, results.notRun, results.missing)
return results }
public List<String> up(Integer count = null) {
createMigrationsTable()
def diff = diffMigrations()
if (diff.missing) {
LOGGER.error('Missing migrations:\n\t{}', diff.missing)
throw new Exception('Database is in an inconsistent state.') }
LOGGER.debug('Migrating up.')
List<String> toRun
if (!count || count >= diff.notRun.size()) toRun = diff.notRun
else toRun = diff.notRun[0..<count]
LOGGER.debug('{} migrations to run.', toRun.size())
LOGGER.trace('Migrations: {}.', toRun)
return runMigrations(toRun, true) }
public List<String> down(Integer count = 1) {
createMigrationsTable()
def diff = diffMigrations()
if (diff.missing) {
LOGGER.error('Missing migrations:\n\t{}', diff.missing)
throw new Exception('Database is in an inconsistent state.') }
LOGGER.debug('Migrating down.')
List<String> toRun = count < diff.run.size() ?
diff.run.reverse()[0..<count] : diff.run.reverse()
LOGGER.debug('{} migrations to run.', toRun.size())
LOGGER.trace('Migrations: {}.', toRun)
return runMigrations(toRun, false) }
private List<String> runMigrations(List<String> toRun, boolean up = true) {
List<String> migrationsRun = []
try {
LOGGER.trace("Beginning transaction.")
sql.execute('BEGIN')
toRun.each { migrationName ->
LOGGER.info(migrationName)
File migrationFile = new File(sqlDir,
"$migrationName-${up ? 'up' : 'down'}.sql")
if (!migrationFile.exists() || !migrationFile.isFile())
throw new FileNotFoundException(migrationFile.canonicalPath +
"does not exist or is not a regular file.")
runFile(migrationFile)
if (up) sql.execute(
'INSERT INTO migrations (name) VALUES (?)', migrationName)
else sql.execute(
'DELETE FROM migrations WHERE name = ?', migrationName)
migrationsRun << migrationName }
sql.execute('COMMIT')
LOGGER.info('Went {} {} migrations.',
up ? 'up' : 'down', migrationsRun.size()) }
catch (Exception e) { sql.execute('ROLLBACK'); }
return migrationsRun }
public void runFile(File file) {
LOGGER.trace('Raw statements:\n\n{}\n', file.text.split(/;/).join('\n'))
List<String> statements = file.text.split(/;/)
.collect { it.replaceAll(/--.*$/, '').trim() }
.findAll { it.length() > 0 }
LOGGER.trace('Statements:\n\n{}\n', statements.join('\n'))
statements.each {
LOGGER.trace('Executing SQL: {}', it)
sql.execute(it) } }
}

View File

@ -1,339 +0,0 @@
## DB Migrate
## ==========
##
## Simple tool to manage database migrations.
import std/[algorithm, json, logging, os, sequtils, sets, strutils, tables,
times]
import db_connector/db_postgres
import docopt
type
DbMigrateConfig* = object
driver, connectionString: string
sqlDirs: seq[string]
logLevel: Level
MigrationEntry* = tuple[name, upPath, downPath: string]
proc ensureMigrationsTableExists(conn: DbConn): void =
let tableCount = conn.getValue(sql"""
SELECT COUNT(*) FROM information_schema.tables
WHERE table_name = 'migrations';""")
if tableCount.strip == "0":
info "Creating the migrations table as it does not already exist."
conn.exec(sql("""
CREATE TABLE IF NOT EXISTS migrations (
id SERIAL PRIMARY KEY,
name VARCHAR NOT NULL,
run_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW())"""))
proc rollbackWithErr (pgConn: DbConn, errMsg: string): void =
pgConn.exec(sql"ROLLBACK")
dbError(errMsg)
proc loadConfig*(filename: string): DbMigrateConfig =
## Load DbMigrateConfig from a file.
let cfg = json.parseFile(filename)
var logLevel: Level = lvlInfo
if existsEnv("DBM_LOG_LEVEL"):
let idx = find(LevelNames, $getEnv("DBM_LOG_LEVEL").toUpper)
logLevel = if idx == -1: lvlInfo else: (Level)(idx)
elif cfg.hasKey("logLevel"):
let idx = find(LevelNames, cfg["logLevel"].getStr.toUpper)
logLevel = if idx == -1: lvlInfo else: (Level)(idx)
return DbMigrateConfig(
driver:
if existsEnv("DATABASE_DRIVER"): $getEnv("DATABASE_DRIVER")
elif cfg.hasKey("driver"): cfg["driver"].getStr
else: "postres",
connectionString:
if existsEnv("DATABASE_URL"): $getEnv("DATABASE_URL")
elif cfg.hasKey("connectionString"): cfg["connectionString"].getStr
else: "",
sqlDirs:
if existsEnv("MIGRATIONS_DIRS"): getEnv("MIGRATIONS_DIRS").split(';')
elif cfg.hasKey("sqlDirs"): cfg["sqlDirs"].getElems.mapIt(it.getStr)
else: @["migrations"],
logLevel: logLevel)
proc createMigration*(config: DbMigrateConfig, migrationName: string): MigrationEntry =
## Create a new set of database migration files.
let timestamp = now().format("yyyyMMddHHmmss")
let filenamePrefix = timestamp & "-" & migrationName
let migration = (
name: filenamePrefix & "-up.sql",
upPath: joinPath(config.sqlDirs[0], filenamePrefix & "-up.sql"),
downPath: joinPath(config.sqlDirs[0], filenamePrefix & "-down.sql"))
let scriptDesc = migrationName & " (" & timestamp & ")"
let upFile = open(migration.upPath, fmWrite)
let downFile = open(migration.downPath, fmWrite)
upFile.writeLine "-- UP script for " & scriptDesc
downFile.writeLine "-- DOWN script for " & scriptDesc
upFile.close()
downFile.close()
return migration
proc diffMigrations*(
pgConn: DbConn,
config: DbMigrateConfig
): tuple[
available: TableRef[string, MigrationEntry],
run: seq[string],
notRun, missing: seq[MigrationEntry] ] =
debug "diffMigrations: inspecting database and configured directories " &
"for migrations"
# Query the database to find out what migrations have been run.
var migrationsRun = initHashSet[string]()
for row in pgConn.fastRows(sql"SELECT * FROM migrations ORDER BY name", @[]):
migrationsRun.incl(row[1])
# Inspect the filesystem to see what migrations are available.
var migrationsAvailable = newTable[string, MigrationEntry]()
for sqlDir in config.sqlDirs:
debug "Looking in " & sqlDir
for filePath in walkFiles(joinPath(sqlDir, "*.sql")):
debug "Saw migration file: " & filePath
var migrationName = filePath.extractFilename
migrationName.removeSuffix("-up.sql")
migrationName.removeSuffix("-down.sql")
migrationsAvailable[migrationName] = (
name: migrationName,
upPath: joinPath(sqlDir, migrationName) & "-up.sql",
downPath: joinPath(sqlDir, migrationName) & "-down.sql")
# Diff with the list of migrations that we have in our migrations
# directory.
let migrationsInOrder =
toSeq(migrationsAvailable.keys).sorted(system.cmp)
var migrationsNotRun = newSeq[MigrationEntry]()
var missingMigrations = newSeq[MigrationEntry]()
for migName in migrationsInOrder:
if not migrationsRun.contains(migName):
migrationsNotRun.add(migrationsAvailable[migName])
# if we've already seen some migrations that have not been run, but this
# one has been, that means we have a gap and are missing migrations
elif migrationsNotRun.len > 0:
missingMigrations.add(migrationsNotRun)
migrationsNotRun = newSeq[MigrationEntry]()
result = (available: migrationsAvailable,
run: toSeq(migrationsRun.items).sorted(system.cmp),
notRun: migrationsNotRun,
missing: missingMigrations)
debug "diffMigration: Results" &
"\n\tavailable: " & $toSeq(result[0].keys) &
"\n\trun: " & $result[1] &
"\n\tnotRun: " & $(result[2].mapIt(it.name)) &
"\n\tmissing: " & $(result[3].mapIt(it.name))
proc readStatements*(filename: string): seq[SqlQuery] =
result = @[]
var stmt: string = ""
for line in filename.lines:
let l = line.strip
if l.len == 0 or l.startsWith("--"): continue
let parts = line.split(';')
stmt &= "\n" & parts[0]
if parts.len > 1:
result.add(sql(stmt & ";"))
stmt = parts[1] & "";
if stmt.strip.len > 0: result.add(sql(stmt))
proc up*(
pgConn: DbConn,
config: DbMigrateConfig,
toRun: seq[MigrationEntry]): seq[MigrationEntry] =
var migrationsRun = newSeq[MigrationEntry]()
# Begin a transaction.
pgConn.exec(sql"BEGIN")
# Apply each of the migrations.
for migration in toRun:
info migration.name
if not migration.upPath.fileExists:
pgConn.rollbackWithErr "Can not find UP file for " & migration.name &
". Expected '" & migration.upPath & "'."
let statements = migration.upPath.readStatements
try:
for statement in statements:
pgConn.exec(statement)
pgConn.exec(sql"INSERT INTO migrations (name) VALUES (?);", migration.name)
except DbError:
pgConn.rollbackWithErr "Migration '" & migration.name & "' failed:\n\t" &
getCurrentExceptionMsg()
migrationsRun.add(migration)
pgConn.exec(sql"COMMIT")
return migrationsRun
proc down*(
pgConn: DbConn,
config: DbMigrateConfig,
migrationsToDown: seq[MigrationEntry]): seq[MigrationEntry] =
var migrationsDowned = newSeq[MigrationEntry]()
pgConn.exec(sql"BEGIN")
for migration in migrationsToDown:
info migration.name
if not migration.downPath.fileExists:
pgConn.rollbackWithErr "Can not find DOWN file for " & migration.name &
". Expected '" & migration.downPath & "'."
let statements = migration.downPath.readStatements
try:
for statement in statements: pgConn.exec(statement)
pgConn.exec(sql"DELETE FROM migrations WHERE name = ?;", migration.name)
except DbError:
pgConn.rollbackWithErr "Migration '" & migration.name & "' failed:\n\t" &
getCurrentExceptionMsg()
migrationsDowned.add(migration)
pgConn.exec(sql"COMMIT")
return migrationsDowned
when isMainModule:
let doc = """
Usage:
db_migrate [options] create <migration-name>
db_migrate [options] up [<count>]
db_migrate [options] down [<count>]
db_migrate [options] init <schema-name>
db_migrate (-V | --version)
db_migrate (-h | --help)
Options:
-c --config <config-file> Use the given configuration file (defaults to
"database.json").
-h --help Show this usage information.
-q --quiet Suppress log information.
-v --verbose Print detailed log information.
--very-verbose Print very detailed log information.
-V --version Print the tools version information.
"""
# Parse arguments
let args = docopt(doc, version = "db-migrate (Nim) 0.3.2\nhttps://git.jdb-software.com/jdb/db-migrate")
let exitErr = proc(msg: string): void =
fatal("db_migrate: " & msg)
quit(QuitFailure)
# Load configuration file
let configFilename =
if args["--config"]: $args["--config"]
else: "database.json"
var config: DbMigrateConfig
try:
config = loadConfig(configFilename)
except IOError:
writeLine(stderr, "db_migrate: Cannot open config file: " & configFilename)
except:
writeLine(stderr, "db_migrate: Error parsing config file: " &
configFilename & "\L\t" & getCurrentExceptionMsg())
logging.addHandler(newConsoleLogger())
if args["--quiet"]: logging.setLogFilter(lvlError)
elif args["--very-verbose"]: logging.setLogFilter(lvlAll)
elif args["--verbose"]: logging.setlogFilter(lvlDebug)
else: logging.setLogFilter(config.logLevel)
# Check for migrations directory
for sqlDir in config.sqlDirs:
if not dirExists sqlDir:
try:
warn "SQL directory '" & sqlDir &
"' does not exist and will be created."
createDir sqlDir
except IOError:
exitErr "Unable to create directory: " & sqlDir & ":\L\T" & getCurrentExceptionMsg()
# Execute commands
if args["create"]:
try:
let newMigration = createMigration(config, $args["<migration-name>"])
info "Created new migration files:"
info "\t" & newMigration.upPath
info "\t" & newMigration.downPath
except IOError:
exitErr "Unable to create migration scripts: " & getCurrentExceptionMsg()
else:
let pgConn: DbConn =
try: open("", "", "", config.connectionString)
except DbError:
exitErr "Unable to connect to the database: " & getCurrentExceptionMsg()
(DbConn)(nil)
pgConn.ensureMigrationsTableExists
let (available, run, notRun, missing) = diffMigrations(pgConn, config)
# Make sure we have no gaps (database is in an unknown state)
if missing.len > 0:
exitErr "Database is in an inconsistent state. Migrations have been " &
"run that are not sequential."
if args["up"]:
try:
let count = if args["<count>"]: parseInt($args["<count>"]) else: high(int)
let toRun = if count < notRun.len: notRun[0..<count] else: notRun
let migrationsRun = pgConn.up(config, toRun)
if count < high(int): info "Went up " & $(migrationsRun.len) & "."
except DbError:
exitErr "Unable to migrate database: " & getCurrentExceptionMsg()
elif args["down"]:
try:
let count = if args["<count>"]: parseInt($args["<count>"]) else: 1
let toRunNames = if count < run.len: run.reversed[0..<count] else: run.reversed
let toRun = toRunNames.mapIt(available[it])
let migrationsRun = pgConn.down(config, toRun)
info "Went down " & $(migrationsRun.len) & "."
except DbError:
exitErr "Unable to migrate database: " & getCurrentExceptionMsg()
elif args["init"]: discard
let newResults = diffMigrations(pgConn, config)
if newResults.notRun.len > 0:
info "Database is behind by " & $(newResults.notRun.len) & " migrations."
else: info "Database is up to date."

View File

@ -1,17 +0,0 @@
import ch.qos.logback.core.*;
import ch.qos.logback.core.encoder.*;
import ch.qos.logback.core.read.*;
import ch.qos.logback.core.rolling.*;
import ch.qos.logback.core.status.*;
import ch.qos.logback.classic.net.*;
import ch.qos.logback.classic.encoder.PatternLayoutEncoder;
appender("STDOUT", ConsoleAppender) {
encoder(PatternLayoutEncoder) {
pattern = "db-migrate.groovy: %level - %msg%n"
}
}
root(WARN, ["STDOUT"])
logger('com.jdblabs.dbmigrate', INFO)