* The Nim binary now recognizes the following environment and allows them to override configured values: - `DATABASE_DRIVER`: overrides the `driver` config value, selects which kind of database we expect to connect to. - `MIGRATIONS_DIR`: overrides the `sqlDir` config value, sets the path to the directory containing the migration SQL files. - `DATABASE_URL`: overrides the `connectionString` config value, supplies connection parameters to the database. - `DBM_LOG_LEVEL`: overrides the `logLevel` config value, sets the logging level of db_migrate. * Fixes a bug in the parsing of migration files. Previously the file was split on `;` characters and chunks that started with `--` were ignored as comments. This was wrong in the common case where a block starts with a comment but then contains SQL further. Such a block was being ignored. The new behavior is to consider each line and build up queries that way. * Fixes a logging bug when parsing the configuration. If there was an that prevented the configuration from loading this in turn prevented logging from being setup correctly, and so the error was not logged. Now errors that may occur before logging is setup are hard-coded to be logged to STDERR. * Changes the logic for creating the `migrations` table to check before performing the query that creates the table. This is to avoid continually printing messages about skipping this creation when the table already exists (which is the normal case). This change is PostgreSQL-specific, but of course the entire tool is currently PostgreSQL-specific.
255 lines
8.4 KiB
Groovy
255 lines
8.4 KiB
Groovy
package com.jdblabs.dbmigrate
|
|
|
|
import groovy.sql.Sql
|
|
|
|
import ch.qos.logback.classic.Level
|
|
import ch.qos.logback.classic.Logger as LBLogger
|
|
import com.jdbernard.util.AnsiEscapeCodeSequence as ANSI
|
|
import com.zaxxer.hikari.HikariConfig
|
|
import com.zaxxer.hikari.HikariDataSource
|
|
import java.io.FilenameFilter
|
|
import java.text.SimpleDateFormat
|
|
import org.docopt.Docopt
|
|
import org.slf4j.Logger
|
|
import org.slf4j.LoggerFactory
|
|
|
|
public class DbMigrate {
|
|
|
|
public static final VERSION = "0.2.5"
|
|
|
|
public static final def DOC = """\
|
|
db-migrate.groovy v${VERSION}
|
|
|
|
Usage:
|
|
db_migrate [options] create <migration-name>
|
|
db_migrate [options] up [<count>]
|
|
db_migrate [options] down [<count>]
|
|
db_migrate [options] init <schema-name>
|
|
db_migrate (-V | --version)
|
|
db_migrate (-h | --help)
|
|
|
|
Options:
|
|
-c --config <config-file> Use the given configuration file (defaults to
|
|
"database.properties").
|
|
-q --quiet Suppress log information.
|
|
-v --verbose Print detailed log information.
|
|
--very-verbose Print very detailed log information.
|
|
-V --version Print the tools version information.
|
|
-h --help Print this usage information.
|
|
"""
|
|
|
|
private static sdf = new SimpleDateFormat('yyyyMMddHHmmss')
|
|
private static Logger LOGGER = LoggerFactory.getLogger(DbMigrate)
|
|
|
|
Sql sql
|
|
File migrationsDir
|
|
|
|
public static void main(String[] args) {
|
|
|
|
// Parse arguments
|
|
def opts = new Docopt(DOC).withVersion("db-migrate.groovy v$VERSION").parse(args)
|
|
|
|
if (opts['--version']) {
|
|
println "db-migrate.groovy v$VERSION"
|
|
System.exit(0) }
|
|
|
|
if (opts['--help']) { println DOC; System.exit(0) }
|
|
|
|
// TODO: Setup logging & output levels
|
|
Logger clilog = LoggerFactory.getLogger("db-migrate.cli")
|
|
|
|
if (opts['--quiet']) {
|
|
((LBLogger) LOGGER).level = Level.ERROR
|
|
((LBLogger) LoggerFactory.getLogger(LBLogger.ROOT_LOGGER_NAME))
|
|
.level = Level.ERROR }
|
|
if (opts['--verbose']) {
|
|
((LBLogger) LOGGER).level = Level.DEBUG
|
|
((LBLogger) LoggerFactory.getLogger(LBLogger.ROOT_LOGGER_NAME))
|
|
.level = Level.INFO }
|
|
if (opts['--very-verbose']) {
|
|
((LBLogger) LOGGER).level = Level.TRACE
|
|
((LBLogger) LoggerFactory.getLogger(LBLogger.ROOT_LOGGER_NAME))
|
|
.level = Level.DEBUG }
|
|
|
|
// Load the configuration file
|
|
def givenCfg = new Properties()
|
|
File cfgFile
|
|
if (opts["--config"]) cfgFile = new File(opts["--config"])
|
|
|
|
if (!cfgFile || !cfgFile.exists() || !cfgFile.isFile())
|
|
cfgFile = new File("database.properties")
|
|
|
|
if (!cfgFile.exists() || !cfgFile.isFile()) {
|
|
clilog.warn("Config file '{}' does not exist or is not a regular file.",
|
|
cfgFile.canonicalPath) }
|
|
|
|
if (cfgFile.exists() && cfgFile.isFile()) {
|
|
try { cfgFile.withInputStream { givenCfg.load(it) } }
|
|
catch (Exception e) {
|
|
clilog.error("Could not read configuration file.", e)
|
|
givenCfg.clear() } }
|
|
|
|
// Check for migrations directory
|
|
File migrationsDir = new File(givenCfg["migrations.dir"] ?: 'migrations')
|
|
if (!migrationsDir.exists() || !migrationsDir.isDirectory()) {
|
|
clilog.error("'{}' does not exist or is not a directory.",
|
|
migrationsDir.canonicalPath)
|
|
System.exit(1) }
|
|
|
|
// Instantiate the DbMigrate instance
|
|
DbMigrate dbmigrate = new DbMigrate(migrationsDir: migrationsDir)
|
|
|
|
// If we've only been asked to create a new migration, we don't need to
|
|
// setup the DB connection.
|
|
if (opts['create']) {
|
|
try {
|
|
List<File> files = dbmigrate.createMigration(opts['<migration-name>'])
|
|
clilog.info("Created new migration files:\n\t${files.name.join('\n\t')}")
|
|
return }
|
|
catch (Exception e) {
|
|
clilog.error('Unable to create migration scripts.', e)
|
|
System.exit(1) } }
|
|
|
|
// Create the datasource.
|
|
Properties dsProps = new Properties()
|
|
dsProps.putAll(givenCfg.findAll { it.key != 'migrations.dir' })
|
|
|
|
HikariDataSource hds = new HikariDataSource(new HikariConfig(dsProps))
|
|
|
|
dbmigrate.sql = new Sql(hds)
|
|
|
|
// Execute the appropriate command.
|
|
if (opts['up']) dbmigrate.up(opts['<count>'])
|
|
else if (opts['down']) dbmigrate.down(opts['<count>'] ?: 1) }
|
|
|
|
public List<File> createMigration(String migrationName) {
|
|
String timestamp = sdf.format(new Date())
|
|
|
|
File upFile = new File(migrationsDir, "$timestamp-$migrationName-up.sql")
|
|
File downFile = new File(migrationsDir, "$timestamp-$migrationName-down.sql")
|
|
|
|
upFile.text = "-- UP script for $migrationName ($timestamp)"
|
|
downFile.text = "-- DOWN script for $migrationName ($timestamp)"
|
|
|
|
return [upFile, downFile] }
|
|
|
|
public def createMigrationsTable() {
|
|
LOGGER.trace('Checking for the existence of the migrations table and ' +
|
|
'creating it if it does not exist.')
|
|
sql.execute('''
|
|
CREATE TABLE IF NOT EXISTS migrations (
|
|
id SERIAL PRIMARY KEY,
|
|
name VARCHAR NOT NULL,
|
|
run_at TIMESTAMP NOT NULL DEFAULT NOW())''') }
|
|
|
|
public def diffMigrations() {
|
|
def results = [notRun: [], missing: []]
|
|
|
|
LOGGER.trace('Diffing migrations...')
|
|
results.run = sql.rows('SELECT name FROM migrations ORDER BY name')
|
|
.collect { it.name }.sort()
|
|
|
|
SortedSet<String> available = new TreeSet<>()
|
|
available.addAll(migrationsDir
|
|
.listFiles({ d, n -> n ==~ /.+-(up|down).sql$/ } as FilenameFilter)
|
|
.collect { f -> f.name.replaceAll(/-(up|down).sql$/, '') })
|
|
|
|
available.each { migrationName ->
|
|
if (!results.run.contains(migrationName))
|
|
results.notRun << migrationName
|
|
|
|
// If we've already seen some migrations that have not been run but this
|
|
// one has been run, that means we have a gap and are missing migrations.
|
|
else if (results.notRun.size() > 0) {
|
|
results.missing += reults.notRun
|
|
results.notRun = [] } }
|
|
|
|
LOGGER.trace('Migrations diff:\n\trun: {}\n\tnot run: {}\n\tmissing: {}',
|
|
results.run, results.notRun, results.missing)
|
|
|
|
return results }
|
|
|
|
public List<String> up(Integer count = null) {
|
|
createMigrationsTable()
|
|
def diff = diffMigrations()
|
|
|
|
if (diff.missing) {
|
|
LOGGER.error('Missing migrations:\n\t{}', diff.missing)
|
|
throw new Exception('Database is in an inconsistent state.') }
|
|
|
|
LOGGER.debug('Migrating up.')
|
|
List<String> toRun
|
|
|
|
if (!count || count >= diff.notRun.size()) toRun = diff.notRun
|
|
else toRun = diff.notRun[0..<count]
|
|
|
|
LOGGER.debug('{} migrations to run.', toRun.size())
|
|
LOGGER.trace('Migrations: {}.', toRun)
|
|
|
|
return runMigrations(toRun, true) }
|
|
|
|
public List<String> down(Integer count = 1) {
|
|
createMigrationsTable()
|
|
def diff = diffMigrations()
|
|
|
|
if (diff.missing) {
|
|
LOGGER.error('Missing migrations:\n\t{}', diff.missing)
|
|
throw new Exception('Database is in an inconsistent state.') }
|
|
|
|
LOGGER.debug('Migrating down.')
|
|
|
|
List<String> toRun = count < diff.run.size() ?
|
|
diff.run.reverse()[0..<count] : diff.run.reverse()
|
|
|
|
LOGGER.debug('{} migrations to run.', toRun.size())
|
|
LOGGER.trace('Migrations: {}.', toRun)
|
|
|
|
return runMigrations(toRun, false) }
|
|
|
|
private List<String> runMigrations(List<String> toRun, boolean up = true) {
|
|
List<String> migrationsRun = []
|
|
|
|
try {
|
|
LOGGER.trace("Beginning transaction.")
|
|
sql.execute('BEGIN')
|
|
|
|
toRun.each { migrationName ->
|
|
LOGGER.info(migrationName)
|
|
File migrationFile = new File(migrationsDir,
|
|
"$migrationName-${up ? 'up' : 'down'}.sql")
|
|
|
|
if (!migrationFile.exists() || !migrationFile.isFile())
|
|
throw new FileNotFoundException(migrationFile.canonicalPath +
|
|
"does not exist or is not a regular file.")
|
|
|
|
runFile(migrationFile)
|
|
|
|
if (up) sql.execute(
|
|
'INSERT INTO migrations (name) VALUES (?)', migrationName)
|
|
else sql.execute(
|
|
'DELETE FROM migrations WHERE name = ?', migrationName)
|
|
|
|
migrationsRun << migrationName }
|
|
|
|
sql.execute('COMMIT')
|
|
LOGGER.info('Went {} {} migrations.',
|
|
up ? 'up' : 'down', migrationsRun.size()) }
|
|
|
|
catch (Exception e) { sql.execute('ROLLBACK'); }
|
|
|
|
return migrationsRun }
|
|
|
|
public void runFile(File file) {
|
|
LOGGER.trace('Raw statements:\n\n{}\n', file.text.split(/;/).join('\n'))
|
|
|
|
List<String> statements = file.text.split(/;/)
|
|
.collect { it.replaceAll(/--.*$/, '').trim() }
|
|
.findAll { it.length() > 0 }
|
|
|
|
LOGGER.trace('Statements:\n\n{}\n', statements.join('\n'))
|
|
|
|
statements.each {
|
|
LOGGER.trace('Executing SQL: {}', it)
|
|
sql.execute(it) } }
|
|
}
|