phase 2: mattes/migrate -> migratex (#848)

* move mattes migrations to migratex

* changes format of migrations to migratex format
* updates test runner to use new interface (double checked this with printlns,
the tests go fully down and then up, and work on pg/mysql)

* remove mattes/migrate

* update tests from deps

* update readme

* fix other file extensions
This commit is contained in:
Reed Allman
2018-03-13 14:12:34 -07:00
committed by GitHub
parent 1f43545b63
commit 4084b727c0
697 changed files with 16924 additions and 35406 deletions

View File

@@ -20,9 +20,8 @@ var (
// TODO doesn't have to be a glob
MigrationsTable = "schema_migrations"
ErrLocked = errors.New("database is locked")
ErrDirty = errors.New("database is dirty")
ErrOutOfOrder = errors.New("non-contiguous migration attempted")
ErrLocked = errors.New("database is locked")
ErrDirty = errors.New("database is dirty")
)
const (
@@ -30,8 +29,8 @@ const (
)
type Migration interface {
Up(*sqlx.Tx) error
Down(*sqlx.Tx) error
Up(context.Context, *sqlx.Tx) error
Down(context.Context, *sqlx.Tx) error
Version() int64
}
@@ -45,14 +44,14 @@ var _ Migration = new(MigFields)
// MigFields implements Migration and can be used for convenience.
type MigFields struct {
UpFunc func(*sqlx.Tx) error
DownFunc func(*sqlx.Tx) error
UpFunc func(context.Context, *sqlx.Tx) error
DownFunc func(context.Context, *sqlx.Tx) error
VersionFunc func() int64
}
func (m MigFields) Up(tx *sqlx.Tx) error { return m.UpFunc(tx) }
func (m MigFields) Down(tx *sqlx.Tx) error { return m.DownFunc(tx) }
func (m MigFields) Version() int64 { return m.VersionFunc() }
func (m MigFields) Up(ctx context.Context, tx *sqlx.Tx) error { return m.UpFunc(ctx, tx) }
func (m MigFields) Down(ctx context.Context, tx *sqlx.Tx) error { return m.DownFunc(ctx, tx) }
func (m MigFields) Version() int64 { return m.VersionFunc() }
// TODO instance must have `multiStatements` set to true ?
@@ -65,13 +64,10 @@ func Down(ctx context.Context, db *sqlx.DB, migs []Migration) error {
}
func migrate(ctx context.Context, db *sqlx.DB, migs []Migration, up bool) error {
var curVersion int64
var curVersion int64 // could be NilVersion, is ok
err := tx(ctx, db, func(tx *sqlx.Tx) error {
err := ensureVersionTable(ctx, tx)
if err != nil {
return err
}
var dirty bool
var err error
curVersion, dirty, err = Version(ctx, tx)
if dirty {
return ErrDirty
@@ -90,11 +86,11 @@ func migrate(ctx context.Context, db *sqlx.DB, migs []Migration, up bool) error
if up {
sort.Sort(sorted(migs))
} else {
migs = []Migration(sort.Reverse(sorted(migs)).(sorted))
sort.Sort(sort.Reverse(sorted(migs)))
}
for _, m := range migs {
// skip over migrations we have run
if (up && curVersion < m.Version()) || (!up && curVersion > m.Version()) {
if (up && curVersion < m.Version()) || (!up && curVersion >= m.Version()) {
// do each individually, for large migrations it's better to checkpoint
// than to try to do them all in one big go.
@@ -182,9 +178,9 @@ func run(ctx context.Context, db *sqlx.DB, m Migration, up bool) error {
// enforce monotonicity
if up && curVersion != NilVersion && m.Version() != curVersion+1 {
return ErrOutOfOrder
return fmt.Errorf("non-contiguous migration attempted up: %v != %v", m.Version(), curVersion+1)
} else if !up && m.Version() != curVersion { // down is always unraveling
return ErrOutOfOrder
return fmt.Errorf("non-contiguous migration attempted down: %v != %v", m.Version(), curVersion)
}
// TODO is this robust enough? we could check
@@ -194,12 +190,12 @@ func run(ctx context.Context, db *sqlx.DB, m Migration, up bool) error {
}
// TODO we don't need the dirty bit anymore since we're using transactions?
err = SetVersion(ctx, tx, m.Version(), true)
err = SetVersion(ctx, tx, version, true)
if up {
err = m.Up(tx)
err = m.Up(ctx, tx)
} else {
err = m.Down(tx)
err = m.Down(ctx, tx)
}
if err != nil {
@@ -230,7 +226,7 @@ func lock(ctx context.Context, tx *sqlx.Tx) error {
var query string
switch tx.DriverName() {
case "postgres", "pgx", "pq-timeouts", "cloudsqlpostgres":
query = `SELECT pg_try_advisory_lock($1)`
query = `SELECT pg_try_advisory_lock(?)`
case "mysql", "oci8", "ora", "goracle":
query = "SELECT GET_LOCK(?, -1)"
case "sqlite3":
@@ -260,7 +256,7 @@ func unlock(ctx context.Context, tx *sqlx.Tx) error {
var query string
switch tx.DriverName() {
case "postgres", "pgx", "pq-timeouts", "cloudsqlpostgres":
query = `SELECT pg_advisory_unlock($1)`
query = `SELECT pg_advisory_unlock(?)`
case "mysql", "oci8", "ora", "goracle":
query = `SELECT RELEASE_LOCK(?)`
case "sqlite3":
@@ -277,6 +273,11 @@ func unlock(ctx context.Context, tx *sqlx.Tx) error {
}
func SetVersion(ctx context.Context, tx *sqlx.Tx, version int64, dirty bool) error {
err := ensureVersionTable(ctx, tx)
if err != nil {
return nil
}
// TODO need to handle down migration better
// ideally, we have a record of each up/down migration with a timestamp for auditing,
// this just nukes the whole table which is kinda lame.
@@ -286,7 +287,7 @@ func SetVersion(ctx context.Context, tx *sqlx.Tx, version int64, dirty bool) err
}
if version >= 0 {
query = tx.Rebind("INSERT INTO `" + MigrationsTable + "` (version, dirty) VALUES (?, ?)")
query = tx.Rebind(`INSERT INTO ` + MigrationsTable + ` (version, dirty) VALUES (?, ?)`)
if _, err := tx.ExecContext(ctx, query, version, dirty); err != nil {
return err
}
@@ -296,7 +297,7 @@ func SetVersion(ctx context.Context, tx *sqlx.Tx, version int64, dirty bool) err
}
func Version(ctx context.Context, tx *sqlx.Tx) (version int64, dirty bool, err error) {
query := tx.Rebind("SELECT version, dirty FROM `" + MigrationsTable + "` LIMIT 1")
query := tx.Rebind(`SELECT version, dirty FROM ` + MigrationsTable + ` LIMIT 1`)
err = tx.QueryRowContext(ctx, query).Scan(&version, &dirty)
switch {
case err == sql.ErrNoRows:

View File

@@ -14,15 +14,15 @@ const testsqlite3 = "file::memory:?mode=memory&cache=shared"
type tm struct{}
func (t *tm) Up(tx *sqlx.Tx) error {
_, err := tx.Exec(`CREATE TABLE IF NOT EXISTS foo (
func (t *tm) Up(ctx context.Context, tx *sqlx.Tx) error {
_, err := tx.ExecContext(ctx, `CREATE TABLE IF NOT EXISTS foo (
bar bigint NOT NULL PRIMARY KEY
)`)
return err
}
func (t *tm) Down(tx *sqlx.Tx) error {
_, err := tx.Exec("DROP TABLE foo")
func (t *tm) Down(ctx context.Context, tx *sqlx.Tx) error {
_, err := tx.ExecContext(ctx, "DROP TABLE foo")
return err
}

View File

@@ -1 +0,0 @@
ALTER TABLE routes DROP COLUMN created_at;

View File

@@ -0,0 +1,26 @@
package migrations
import (
"context"
"github.com/fnproject/fn/api/datastore/sql/migratex"
"github.com/jmoiron/sqlx"
)
func up1(ctx context.Context, tx *sqlx.Tx) error {
_, err := tx.ExecContext(ctx, "ALTER TABLE routes ADD created_at text;")
return err
}
func down1(ctx context.Context, tx *sqlx.Tx) error {
_, err := tx.ExecContext(ctx, "ALTER TABLE routes DROP COLUMN created_at;")
return err
}
func init() {
Migrations = append(Migrations, &migratex.MigFields{
VersionFunc: vfunc(1),
UpFunc: up1,
DownFunc: down1,
})
}

View File

@@ -1 +0,0 @@
ALTER TABLE routes ADD created_at text;

View File

@@ -1 +0,0 @@
ALTER TABLE calls DROP COLUMN stats;

View File

@@ -0,0 +1,26 @@
package migrations
import (
"context"
"github.com/fnproject/fn/api/datastore/sql/migratex"
"github.com/jmoiron/sqlx"
)
func up2(ctx context.Context, tx *sqlx.Tx) error {
_, err := tx.ExecContext(ctx, "ALTER TABLE calls ADD stats text;")
return err
}
func down2(ctx context.Context, tx *sqlx.Tx) error {
_, err := tx.ExecContext(ctx, "ALTER TABLE calls DROP COLUMN stats;")
return err
}
func init() {
Migrations = append(Migrations, &migratex.MigFields{
VersionFunc: vfunc(2),
UpFunc: up2,
DownFunc: down2,
})
}

View File

@@ -1 +0,0 @@
ALTER TABLE calls ADD stats text;

View File

@@ -1 +0,0 @@
ALTER TABLE calls DROP COLUMN error;

View File

@@ -0,0 +1,26 @@
package migrations
import (
"context"
"github.com/fnproject/fn/api/datastore/sql/migratex"
"github.com/jmoiron/sqlx"
)
func up3(ctx context.Context, tx *sqlx.Tx) error {
_, err := tx.ExecContext(ctx, "ALTER TABLE calls ADD error text;")
return err
}
func down3(ctx context.Context, tx *sqlx.Tx) error {
_, err := tx.ExecContext(ctx, "ALTER TABLE calls DROP COLUMN error;")
return err
}
func init() {
Migrations = append(Migrations, &migratex.MigFields{
VersionFunc: vfunc(3),
UpFunc: up3,
DownFunc: down3,
})
}

View File

@@ -1 +0,0 @@
ALTER TABLE calls ADD error text;

View File

@@ -1 +0,0 @@
ALTER TABLE routes DROP COLUMN updated_at;

View File

@@ -0,0 +1,26 @@
package migrations
import (
"context"
"github.com/fnproject/fn/api/datastore/sql/migratex"
"github.com/jmoiron/sqlx"
)
func up4(ctx context.Context, tx *sqlx.Tx) error {
_, err := tx.ExecContext(ctx, "ALTER TABLE routes ADD updated_at VARCHAR(256);")
return err
}
func down4(ctx context.Context, tx *sqlx.Tx) error {
_, err := tx.ExecContext(ctx, "ALTER TABLE routes DROP COLUMN updated_at;")
return err
}
func init() {
Migrations = append(Migrations, &migratex.MigFields{
VersionFunc: vfunc(4),
UpFunc: up4,
DownFunc: down4,
})
}

View File

@@ -1 +0,0 @@
ALTER TABLE routes ADD updated_at varchar(256);

View File

@@ -1 +0,0 @@
ALTER TABLE apps DROP COLUMN created_at;

View File

@@ -0,0 +1,26 @@
package migrations
import (
"context"
"github.com/fnproject/fn/api/datastore/sql/migratex"
"github.com/jmoiron/sqlx"
)
func up5(ctx context.Context, tx *sqlx.Tx) error {
_, err := tx.ExecContext(ctx, "ALTER TABLE apps ADD created_at VARCHAR(256);")
return err
}
func down5(ctx context.Context, tx *sqlx.Tx) error {
_, err := tx.ExecContext(ctx, "ALTER TABLE apps DROP COLUMN created_at;")
return err
}
func init() {
Migrations = append(Migrations, &migratex.MigFields{
VersionFunc: vfunc(5),
UpFunc: up5,
DownFunc: down5,
})
}

View File

@@ -1 +0,0 @@
ALTER TABLE apps ADD created_at varchar(256);

View File

@@ -1 +0,0 @@
ALTER TABLE apps DROP COLUMN updated_at;

View File

@@ -0,0 +1,26 @@
package migrations
import (
"context"
"github.com/fnproject/fn/api/datastore/sql/migratex"
"github.com/jmoiron/sqlx"
)
func up6(ctx context.Context, tx *sqlx.Tx) error {
_, err := tx.ExecContext(ctx, "ALTER TABLE apps ADD updated_at VARCHAR(256);")
return err
}
func down6(ctx context.Context, tx *sqlx.Tx) error {
_, err := tx.ExecContext(ctx, "ALTER TABLE apps DROP COLUMN updated_at;")
return err
}
func init() {
Migrations = append(Migrations, &migratex.MigFields{
VersionFunc: vfunc(6),
UpFunc: up6,
DownFunc: down6,
})
}

View File

@@ -1 +0,0 @@
ALTER TABLE apps ADD updated_at varchar(256);

View File

@@ -1 +0,0 @@
ALTER TABLE routes DROP COLUMN cpus;

View File

@@ -0,0 +1,26 @@
package migrations
import (
"context"
"github.com/fnproject/fn/api/datastore/sql/migratex"
"github.com/jmoiron/sqlx"
)
func up7(ctx context.Context, tx *sqlx.Tx) error {
_, err := tx.ExecContext(ctx, "ALTER TABLE routes ADD cpus int;")
return err
}
func down7(ctx context.Context, tx *sqlx.Tx) error {
_, err := tx.ExecContext(ctx, "ALTER TABLE routes DROP COLUMN cpus;")
return err
}
func init() {
Migrations = append(Migrations, &migratex.MigFields{
VersionFunc: vfunc(7),
UpFunc: up7,
DownFunc: down7,
})
}

View File

@@ -1 +0,0 @@
ALTER TABLE routes ADD cpus int;

View File

@@ -2,39 +2,49 @@
All migration files should be of the format:
`[0-9]+_[add|remove]_model[_field]*.[up|down].sql`
`[0-9]+_[add|remove]_model[_field]*.go`
The number at the beginning of the file name should be monotonically
increasing, from the last highest file number in this directory. E.g. if there
is `11_add_foo_bar.up.sql`, your new file should be `12_add_bar_baz.up.sql`.
is `11_add_foo_bar.go`, your new file should be `12_add_bar_baz.go`.
All `*.up.sql` files must have an accompanying `*.down.sql` file in order to
pass review.
Each migration file have to contain both up and down function:
The contents of each file should contain only 1 ANSI sql query. For help, you
may refer to https://github.com/mattes/migrate/blob/master/MIGRATIONS.md which
illustrates some of the finer points.
```go
package migrations
After creating the file you will need to run, in the same directory as this
README:
import (
"context"
```sh
$ go generate
"github.com/fnproject/fn/api/datastore/sql/migratex"
"github.com/jmoiron/sqlx"
)
func up1(ctx context.Context, tx *sqlx.Tx) error {
_, err := tx.ExecContext(ctx, "ALTER TABLE routes ADD created_at text;")
return err
}
func down1(ctx context.Context, tx *sqlx.Tx) error {
_, err := tx.ExecContext(ctx, "ALTER TABLE routes DROP COLUMN created_at;")
return err
}
func init() {
Migrations = append(Migrations, &migratex.MigFields{
VersionFunc: vfunc(1),
UpFunc: up1,
DownFunc: down1,
})
}
```
NOTE: You may need to `go get -u github.com/jteeuwen/go-bindata/...` before running `go
generate` in order for it to work.
Each migration must initialize a `migratex.Migration` with corresponding
version and up/down function.
After running `go generate`, the `migrations.go` file should be updated. Check
the updated version of this as well as the new `.sql` file into git.
We have elected to expose fn's specific sql migrations as an exported global
list `migrations.Migrations` from this package, you must simply add your
migration and append it to this list.
After adding the migration, be sure to update the fields in the sql tables in
`sql.go` up one package. For example, if you added a column `foo` to `routes`,
add this field to the routes `CREATE TABLE` query, as well as any queries
where it should be returned.
After doing this, run the test suite to make sure the sql queries work as
intended and voila. The test suite will ensure that the up and down migrations
work as well as a fresh db. The down migrations will not be tested against
SQLite3 as it does not support `ALTER TABLE DROP COLUMN`, but will still be
tested against postgres and MySQL.
Please note that every database change should be considered as 1 individual
migration (new table, new column, column type change, etc.)

View File

@@ -1,12 +0,0 @@
package migrations
//go:generate go-bindata -ignore README.md -ignore migrations.go -ignore index.go -o migrations.go -pkg migrations .
// migrations are generated from this cwd with go generate.
// install https://github.com/jteeuwen/go-bindata for go generate
// command to work properly.
// this will generate a go file with go-bindata of all the migration
// files in 1 go file, so that migrations can be run remotely without
// having to carry the migration files around (i.e. since they are
// compiled into the go binary)

View File

@@ -1,534 +0,0 @@
// Code generated by go-bindata.
// sources:
// 1_add_route_created_at.down.sql
// 1_add_route_created_at.up.sql
// 2_add_call_stats.down.sql
// 2_add_call_stats.up.sql
// 3_add_call_error.down.sql
// 3_add_call_error.up.sql
// 4_add_route_updated_at.down.sql
// 4_add_route_updated_at.up.sql
// 5_add_app_created_at.down.sql
// 5_add_app_created_at.up.sql
// 6_add_app_updated_at.down.sql
// 6_add_app_updated_at.up.sql
// 7_add_route_cpus.down.sql
// 7_add_route_cpus.up.sql
// DO NOT EDIT!
package migrations
import (
"bytes"
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
)
func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("Read %q: %v", name, err)
}
var buf bytes.Buffer
_, err = io.Copy(&buf, gz)
clErr := gz.Close()
if err != nil {
return nil, fmt.Errorf("Read %q: %v", name, err)
}
if clErr != nil {
return nil, err
}
return buf.Bytes(), nil
}
type asset struct {
bytes []byte
info os.FileInfo
}
type bindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
func (fi bindataFileInfo) Name() string {
return fi.name
}
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
func (fi bindataFileInfo) IsDir() bool {
return false
}
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var __1_add_route_created_atDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\xca\x2f\x2d\x49\x2d\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x09\xf5\xf5\x53\x48\x2e\x4a\x4d\x2c\x49\x4d\x89\x4f\x2c\xb1\xe6\x02\x04\x00\x00\xff\xff\x47\xfd\x3b\xbe\x2b\x00\x00\x00")
func _1_add_route_created_atDownSqlBytes() ([]byte, error) {
return bindataRead(
__1_add_route_created_atDownSql,
"1_add_route_created_at.down.sql",
)
}
func _1_add_route_created_atDownSql() (*asset, error) {
bytes, err := _1_add_route_created_atDownSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "1_add_route_created_at.down.sql", size: 43, mode: os.FileMode(420), modTime: time.Unix(1510963763, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var __1_add_route_created_atUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\xca\x2f\x2d\x49\x2d\x56\x70\x74\x71\x51\x48\x2e\x4a\x4d\x2c\x49\x4d\x89\x4f\x2c\x51\x28\x49\xad\x28\xb1\xe6\x02\x04\x00\x00\xff\xff\x3b\x59\x9c\x54\x28\x00\x00\x00")
func _1_add_route_created_atUpSqlBytes() ([]byte, error) {
return bindataRead(
__1_add_route_created_atUpSql,
"1_add_route_created_at.up.sql",
)
}
func _1_add_route_created_atUpSql() (*asset, error) {
bytes, err := _1_add_route_created_atUpSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "1_add_route_created_at.up.sql", size: 40, mode: os.FileMode(420), modTime: time.Unix(1510963763, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var __2_add_call_statsDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\x4e\xcc\xc9\x29\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x09\xf5\xf5\x53\x28\x2e\x49\x2c\x29\xb6\xe6\x02\x04\x00\x00\xff\xff\xd3\x09\xeb\x22\x25\x00\x00\x00")
func _2_add_call_statsDownSqlBytes() ([]byte, error) {
return bindataRead(
__2_add_call_statsDownSql,
"2_add_call_stats.down.sql",
)
}
func _2_add_call_statsDownSql() (*asset, error) {
bytes, err := _2_add_call_statsDownSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "2_add_call_stats.down.sql", size: 37, mode: os.FileMode(420), modTime: time.Unix(1511917353, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var __2_add_call_statsUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\x4e\xcc\xc9\x29\x56\x70\x74\x71\x51\x28\x2e\x49\x2c\x29\x56\x28\x49\xad\x28\xb1\xe6\x02\x04\x00\x00\xff\xff\x29\xde\x11\xe8\x22\x00\x00\x00")
func _2_add_call_statsUpSqlBytes() ([]byte, error) {
return bindataRead(
__2_add_call_statsUpSql,
"2_add_call_stats.up.sql",
)
}
func _2_add_call_statsUpSql() (*asset, error) {
bytes, err := _2_add_call_statsUpSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "2_add_call_stats.up.sql", size: 34, mode: os.FileMode(420), modTime: time.Unix(1511917353, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var __3_add_call_errorDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\x4e\xcc\xc9\x29\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x09\xf5\xf5\x53\x48\x2d\x2a\xca\x2f\xb2\xe6\x02\x04\x00\x00\xff\xff\xc1\x14\x26\x51\x25\x00\x00\x00")
func _3_add_call_errorDownSqlBytes() ([]byte, error) {
return bindataRead(
__3_add_call_errorDownSql,
"3_add_call_error.down.sql",
)
}
func _3_add_call_errorDownSql() (*asset, error) {
bytes, err := _3_add_call_errorDownSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "3_add_call_error.down.sql", size: 37, mode: os.FileMode(420), modTime: time.Unix(1511989827, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var __3_add_call_errorUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\x4e\xcc\xc9\x29\x56\x70\x74\x71\x51\x48\x2d\x2a\xca\x2f\x52\x28\x49\xad\x28\xb1\xe6\x02\x04\x00\x00\xff\xff\xaf\xba\x27\xcd\x22\x00\x00\x00")
func _3_add_call_errorUpSqlBytes() ([]byte, error) {
return bindataRead(
__3_add_call_errorUpSql,
"3_add_call_error.up.sql",
)
}
func _3_add_call_errorUpSql() (*asset, error) {
bytes, err := _3_add_call_errorUpSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "3_add_call_error.up.sql", size: 34, mode: os.FileMode(420), modTime: time.Unix(1511989827, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var __4_add_route_updated_atDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\xca\x2f\x2d\x49\x2d\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x09\xf5\xf5\x53\x28\x2d\x48\x49\x2c\x49\x4d\x89\x4f\x2c\xb1\xe6\x02\x04\x00\x00\xff\xff\xa4\x67\xb0\xea\x2b\x00\x00\x00")
func _4_add_route_updated_atDownSqlBytes() ([]byte, error) {
return bindataRead(
__4_add_route_updated_atDownSql,
"4_add_route_updated_at.down.sql",
)
}
func _4_add_route_updated_atDownSql() (*asset, error) {
bytes, err := _4_add_route_updated_atDownSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "4_add_route_updated_at.down.sql", size: 43, mode: os.FileMode(420), modTime: time.Unix(1514060619, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var __4_add_route_updated_atUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\xca\x2f\x2d\x49\x2d\x56\x70\x74\x71\x51\x28\x2d\x48\x49\x2c\x49\x4d\x89\x4f\x2c\x51\x28\x4b\x2c\x4a\xce\x48\x2c\xd2\x30\x32\x35\xd3\xb4\xe6\x02\x04\x00\x00\xff\xff\x54\xf7\xac\x11\x30\x00\x00\x00")
func _4_add_route_updated_atUpSqlBytes() ([]byte, error) {
return bindataRead(
__4_add_route_updated_atUpSql,
"4_add_route_updated_at.up.sql",
)
}
func _4_add_route_updated_atUpSql() (*asset, error) {
bytes, err := _4_add_route_updated_atUpSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "4_add_route_updated_at.up.sql", size: 48, mode: os.FileMode(420), modTime: time.Unix(1514060619, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var __5_add_app_created_atDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\x2c\x28\x28\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x09\xf5\xf5\x53\x48\x2e\x4a\x4d\x2c\x49\x4d\x89\x4f\x2c\xb1\xe6\x02\x04\x00\x00\xff\xff\xd2\xde\x5c\x98\x29\x00\x00\x00")
func _5_add_app_created_atDownSqlBytes() ([]byte, error) {
return bindataRead(
__5_add_app_created_atDownSql,
"5_add_app_created_at.down.sql",
)
}
func _5_add_app_created_atDownSql() (*asset, error) {
bytes, err := _5_add_app_created_atDownSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "5_add_app_created_at.down.sql", size: 41, mode: os.FileMode(420), modTime: time.Unix(1514060619, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var __5_add_app_created_atUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\x2c\x28\x28\x56\x70\x74\x71\x51\x48\x2e\x4a\x4d\x2c\x49\x4d\x89\x4f\x2c\x51\x28\x4b\x2c\x4a\xce\x48\x2c\xd2\x30\x32\x35\xd3\xb4\xe6\x02\x04\x00\x00\xff\xff\x76\x6c\x0f\x45\x2e\x00\x00\x00")
func _5_add_app_created_atUpSqlBytes() ([]byte, error) {
return bindataRead(
__5_add_app_created_atUpSql,
"5_add_app_created_at.up.sql",
)
}
func _5_add_app_created_atUpSql() (*asset, error) {
bytes, err := _5_add_app_created_atUpSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "5_add_app_created_at.up.sql", size: 46, mode: os.FileMode(420), modTime: time.Unix(1514060619, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var __6_add_app_updated_atDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\x2c\x28\x28\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x09\xf5\xf5\x53\x28\x2d\x48\x49\x2c\x49\x4d\x89\x4f\x2c\xb1\xe6\x02\x04\x00\x00\xff\xff\x31\x44\xd7\xcc\x29\x00\x00\x00")
func _6_add_app_updated_atDownSqlBytes() ([]byte, error) {
return bindataRead(
__6_add_app_updated_atDownSql,
"6_add_app_updated_at.down.sql",
)
}
func _6_add_app_updated_atDownSql() (*asset, error) {
bytes, err := _6_add_app_updated_atDownSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "6_add_app_updated_at.down.sql", size: 41, mode: os.FileMode(420), modTime: time.Unix(1514060619, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var __6_add_app_updated_atUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\x2c\x28\x28\x56\x70\x74\x71\x51\x28\x2d\x48\x49\x2c\x49\x4d\x89\x4f\x2c\x51\x28\x4b\x2c\x4a\xce\x48\x2c\xd2\x30\x32\x35\xd3\xb4\xe6\x02\x04\x00\x00\xff\xff\x65\x01\x8b\x34\x2e\x00\x00\x00")
func _6_add_app_updated_atUpSqlBytes() ([]byte, error) {
return bindataRead(
__6_add_app_updated_atUpSql,
"6_add_app_updated_at.up.sql",
)
}
func _6_add_app_updated_atUpSql() (*asset, error) {
bytes, err := _6_add_app_updated_atUpSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "6_add_app_updated_at.up.sql", size: 46, mode: os.FileMode(420), modTime: time.Unix(1514060619, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var __7_add_route_cpusDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\xca\x2f\x2d\x49\x2d\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x09\xf5\xf5\x53\x48\x2e\x28\x2d\xb6\xe6\x02\x04\x00\x00\xff\xff\xec\x60\x24\xd0\x25\x00\x00\x00")
func _7_add_route_cpusDownSqlBytes() ([]byte, error) {
return bindataRead(
__7_add_route_cpusDownSql,
"7_add_route_cpus.down.sql",
)
}
func _7_add_route_cpusDownSql() (*asset, error) {
bytes, err := _7_add_route_cpusDownSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "7_add_route_cpus.down.sql", size: 37, mode: os.FileMode(420), modTime: time.Unix(1515624756, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var __7_add_route_cpusUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\xca\x2f\x2d\x49\x2d\x56\x70\x74\x71\x51\x48\x2e\x28\x2d\x56\xc8\xcc\x2b\xb1\xe6\x02\x04\x00\x00\xff\xff\xf1\x18\xf8\xa9\x21\x00\x00\x00")
func _7_add_route_cpusUpSqlBytes() ([]byte, error) {
return bindataRead(
__7_add_route_cpusUpSql,
"7_add_route_cpus.up.sql",
)
}
func _7_add_route_cpusUpSql() (*asset, error) {
bytes, err := _7_add_route_cpusUpSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "7_add_route_cpus.up.sql", size: 33, mode: os.FileMode(420), modTime: time.Unix(1515628068, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables.
func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
}
// AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %s not found", name)
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"1_add_route_created_at.down.sql": _1_add_route_created_atDownSql,
"1_add_route_created_at.up.sql": _1_add_route_created_atUpSql,
"2_add_call_stats.down.sql": _2_add_call_statsDownSql,
"2_add_call_stats.up.sql": _2_add_call_statsUpSql,
"3_add_call_error.down.sql": _3_add_call_errorDownSql,
"3_add_call_error.up.sql": _3_add_call_errorUpSql,
"4_add_route_updated_at.down.sql": _4_add_route_updated_atDownSql,
"4_add_route_updated_at.up.sql": _4_add_route_updated_atUpSql,
"5_add_app_created_at.down.sql": _5_add_app_created_atDownSql,
"5_add_app_created_at.up.sql": _5_add_app_created_atUpSql,
"6_add_app_updated_at.down.sql": _6_add_app_updated_atDownSql,
"6_add_app_updated_at.up.sql": _6_add_app_updated_atUpSql,
"7_add_route_cpus.down.sql": _7_add_route_cpusDownSql,
"7_add_route_cpus.up.sql": _7_add_route_cpusUpSql,
}
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
// data/
// foo.txt
// img/
// a.png
// b.png
// then AssetDir("data") would return []string{"foo.txt", "img"}
// AssetDir("data/img") would return []string{"a.png", "b.png"}
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
cannonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(cannonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for childName := range node.Children {
rv = append(rv, childName)
}
return rv, nil
}
type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"1_add_route_created_at.down.sql": &bintree{_1_add_route_created_atDownSql, map[string]*bintree{}},
"1_add_route_created_at.up.sql": &bintree{_1_add_route_created_atUpSql, map[string]*bintree{}},
"2_add_call_stats.down.sql": &bintree{_2_add_call_statsDownSql, map[string]*bintree{}},
"2_add_call_stats.up.sql": &bintree{_2_add_call_statsUpSql, map[string]*bintree{}},
"3_add_call_error.down.sql": &bintree{_3_add_call_errorDownSql, map[string]*bintree{}},
"3_add_call_error.up.sql": &bintree{_3_add_call_errorUpSql, map[string]*bintree{}},
"4_add_route_updated_at.down.sql": &bintree{_4_add_route_updated_atDownSql, map[string]*bintree{}},
"4_add_route_updated_at.up.sql": &bintree{_4_add_route_updated_atUpSql, map[string]*bintree{}},
"5_add_app_created_at.down.sql": &bintree{_5_add_app_created_atDownSql, map[string]*bintree{}},
"5_add_app_created_at.up.sql": &bintree{_5_add_app_created_atUpSql, map[string]*bintree{}},
"6_add_app_updated_at.down.sql": &bintree{_6_add_app_updated_atDownSql, map[string]*bintree{}},
"6_add_app_updated_at.up.sql": &bintree{_6_add_app_updated_atUpSql, map[string]*bintree{}},
"7_add_route_cpus.down.sql": &bintree{_7_add_route_cpusDownSql, map[string]*bintree{}},
"7_add_route_cpus.up.sql": &bintree{_7_add_route_cpusUpSql, map[string]*bintree{}},
}}
// RestoreAsset restores an asset under the given directory
func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
if err != nil {
return err
}
return nil
}
// RestoreAssets restores an asset under the given directory recursively
func RestoreAssets(dir, name string) error {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
}
func _filePath(dir, name string) string {
cannonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
}

View File

@@ -0,0 +1,10 @@
package migrations
import (
"github.com/fnproject/fn/api/datastore/sql/migratex"
)
// Migrations is the list of fn specific sql migrations to run
var Migrations []migratex.Migration
func vfunc(v int64) func() int64 { return func() int64 { return v } }

View File

@@ -14,6 +14,7 @@ import (
"time"
"github.com/fnproject/fn/api/common"
"github.com/fnproject/fn/api/datastore/sql/migratex"
"github.com/fnproject/fn/api/datastore/sql/migrations"
"github.com/fnproject/fn/api/models"
"github.com/go-sql-driver/mysql"
@@ -23,12 +24,6 @@ import (
_ "github.com/lib/pq"
"github.com/mattn/go-sqlite3"
_ "github.com/mattn/go-sqlite3"
"github.com/rdallman/migrate"
_ "github.com/rdallman/migrate/database/mysql"
_ "github.com/rdallman/migrate/database/postgres"
_ "github.com/rdallman/migrate/database/sqlite3"
"github.com/rdallman/migrate/source"
"github.com/rdallman/migrate/source/go-bindata"
"github.com/sirupsen/logrus"
)
@@ -147,7 +142,9 @@ func newDS(ctx context.Context, url *url.URL) (*sqlStore, error) {
db.SetMaxIdleConns(maxIdleConns)
log.WithFields(logrus.Fields{"max_idle_connections": maxIdleConns, "datastore": driver}).Info("datastore dialed")
err = runMigrations(url.String(), checkExistence(db)) // original url string
sdb := &sqlStore{db: db}
err = sdb.runMigrations(ctx, checkExistence(db), migrations.Migrations)
if err != nil {
log.WithError(err).Error("error running migrations")
return nil, err
@@ -164,7 +161,7 @@ func newDS(ctx context.Context, url *url.URL) (*sqlStore, error) {
}
}
return &sqlStore{db: db}, nil
return sdb, nil
}
func pingWithRetry(ctx context.Context, attempts int, sleep time.Duration, db *sqlx.DB) (err error) {
@@ -201,52 +198,29 @@ func checkExistence(db *sqlx.DB) bool {
// check if the db already existed, if the db is brand new then we can skip
// over all the migrations BUT we must be sure to set the right migration
// number so that only current migrations are skipped, not any future ones.
func runMigrations(url string, exists bool) error {
m, err := migrator(url)
if err != nil {
return err
}
defer m.Close()
if !exists {
func (ds *sqlStore) runMigrations(ctx context.Context, dbExists bool, migrations []migratex.Migration) error {
if !dbExists {
// set to highest and bail
return m.Force(latestVersion(migrations.AssetNames()))
return ds.Tx(func(tx *sqlx.Tx) error {
return migratex.SetVersion(ctx, tx, latestVersion(migrations), false)
})
}
// run any migrations needed to get to latest, if any
err = m.Up()
if err == migrate.ErrNoChange { // we don't care, but want other errors
err = nil
}
return err
}
func migrator(url string) (*migrate.Migrate, error) {
s := bindata.Resource(migrations.AssetNames(),
func(name string) ([]byte, error) {
return migrations.Asset(name)
})
d, err := bindata.WithInstance(s)
if err != nil {
return nil, err
}
return migrate.NewWithSourceInstance("go-bindata", d, url)
return migratex.Up(ctx, ds.db, migrations)
}
// latest version will find the latest version from a list of migration
// names (not from the db)
func latestVersion(migs []string) int {
var highest uint
for _, m := range migs {
mig, _ := source.Parse(m)
if mig.Version > highest {
highest = mig.Version
func latestVersion(migs []migratex.Migration) int64 {
var highest int64
for _, mig := range migs {
if mig.Version() > highest {
highest = mig.Version()
}
}
return int(highest)
return highest
}
// clear is for tests only, be careful, it deletes all records.

View File

@@ -1,14 +1,15 @@
package sql
import (
"context"
"net/url"
"os"
"testing"
"context"
"github.com/fnproject/fn/api/datastore/internal/datastoretest"
"github.com/fnproject/fn/api/datastore/internal/datastoreutil"
"github.com/fnproject/fn/api/datastore/sql/migratex"
"github.com/fnproject/fn/api/datastore/sql/migrations"
"github.com/fnproject/fn/api/models"
)
@@ -23,12 +24,7 @@ func newWithMigrations(ctx context.Context, url *url.URL) (*sqlStore, error) {
return nil, err
}
m, err := migrator(url.String())
if err != nil {
return nil, err
}
err = m.Down()
err = migratex.Down(ctx, ds.db, migrations.Migrations)
if err != nil {
return nil, err
}