mirror of
https://github.com/fnproject/fn.git
synced 2022-10-28 21:29:17 +03:00
* adds migrations closes #57 migrations only run if the database is not brand new. brand new databases will contain all the right fields when CREATE TABLE is called, this is for readability mostly more than efficiency (do not want to have to go through all of the database migrations to ascertain what columns a table has). upon startup of a new database, the migrations will be analyzed and the highest version set, so that future migrations will be run. this should also avoid running through all the migrations, which could bork db's easily enough (if the user just exits from impatience, say). otherwise, all migrations that a db has not yet seen will be run against it upon startup, this should be seamless to the user whether they had a db that had 0 migrations run on it before or N. this means users will not have to explicitly run any migrations on their dbs nor see any errors when we upgrade the db (so long as things go well). if migrations do not go so well, users will have to manually repair dbs (this is the intention of the `migrate` library and it seems sane), this should be rare, and I'm unsure myself how best to resolve not having gone through this myself, I would assume it will require running down migrations and then manually updating the migration field; in any case, docs once one of us has to go through this. migrations are written to files and checked into version control, and then use go-bindata to generate those files into go code and compiled in to be consumed by the migrate library (so that we don't have to put migration files on any servers) -- this is also in vcs. this seems to work ok. I don't like having to use the separate go-bindata tool but it wasn't really hard to install and then go generate takes care of the args. adding migrations should be relatively rare anyway, but tried to make it pretty painless. 1 migration to add created_at to the route is done here as an example of how to do migrations, as well as testing these things ;) -- `created_at` will be `0001-01-01T00:00:00.000Z` for any existing routes after a user runs this version. could spend the extra time adding 'today's date to any outstanding records, but that's not really accurate, the main thing is nobody will have to nuke their db with the migrations in place & we don't have any prod clusters really to worry about. all future routes will correctly have `created_at` set, and plan to add other timestamps but wanted to keep this patch as small as possible so only did routes.created_at. there are tests that a spankin new db will work as expected as well as a db after running all down & up migrations works. the latter tests only run on mysql and postgres, since sqlite3 does not like ALTER TABLE DROP COLUMN; up migrations will need to be tested manually for sqlite3 only, but in theory if they are simple and work on postgres and mysql, there is a good likelihood of success; the new migration from this patch works on sqlite3 fine. for now, we need to use `github.com/rdallman/migrate` to move forward, as getting integrated into upstream is proving difficult due to `github.com/go-sql-driver/mysql` being broken on master (yay dependencies). Fortunately for us, we vendor a version of the `mysql` bindings that actually works, thus, we are capable of using the `mattes/migrate` library with success due to that. this also will require go1.9 to use the new `database/sql.Conn` type, CI has been updated accordingly. some doc fixes too from testing.. and of course updated all deps. anyway, whew. this should let us add fields to the db without busting everybody's dbs. open to feedback on better ways, but this was overall pretty simple despite futzing with mysql. * add migrate pkg to deps, update deps use rdallman/migrate until we resolve in mattes land * add README in migrations package * add ref to mattes lib
259 lines
6.8 KiB
Go
259 lines
6.8 KiB
Go
// Code generated by go-bindata.
|
|
// sources:
|
|
// 1_add_route_created_at.down.sql
|
|
// 1_add_route_created_at.up.sql
|
|
// DO NOT EDIT!
|
|
|
|
package migrations
|
|
|
|
import (
|
|
"bytes"
|
|
"compress/gzip"
|
|
"fmt"
|
|
"io"
|
|
"io/ioutil"
|
|
"os"
|
|
"path/filepath"
|
|
"strings"
|
|
"time"
|
|
)
|
|
|
|
func bindataRead(data []byte, name string) ([]byte, error) {
|
|
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
|
if err != nil {
|
|
return nil, fmt.Errorf("Read %q: %v", name, err)
|
|
}
|
|
|
|
var buf bytes.Buffer
|
|
_, err = io.Copy(&buf, gz)
|
|
clErr := gz.Close()
|
|
|
|
if err != nil {
|
|
return nil, fmt.Errorf("Read %q: %v", name, err)
|
|
}
|
|
if clErr != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return buf.Bytes(), nil
|
|
}
|
|
|
|
type asset struct {
|
|
bytes []byte
|
|
info os.FileInfo
|
|
}
|
|
|
|
type bindataFileInfo struct {
|
|
name string
|
|
size int64
|
|
mode os.FileMode
|
|
modTime time.Time
|
|
}
|
|
|
|
func (fi bindataFileInfo) Name() string {
|
|
return fi.name
|
|
}
|
|
func (fi bindataFileInfo) Size() int64 {
|
|
return fi.size
|
|
}
|
|
func (fi bindataFileInfo) Mode() os.FileMode {
|
|
return fi.mode
|
|
}
|
|
func (fi bindataFileInfo) ModTime() time.Time {
|
|
return fi.modTime
|
|
}
|
|
func (fi bindataFileInfo) IsDir() bool {
|
|
return false
|
|
}
|
|
func (fi bindataFileInfo) Sys() interface{} {
|
|
return nil
|
|
}
|
|
|
|
var __1_add_route_created_atDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\xca\x2f\x2d\x49\x2d\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x09\xf5\xf5\x53\x48\x2e\x4a\x4d\x2c\x49\x4d\x89\x4f\x2c\xb1\xe6\x02\x04\x00\x00\xff\xff\x47\xfd\x3b\xbe\x2b\x00\x00\x00")
|
|
|
|
func _1_add_route_created_atDownSqlBytes() ([]byte, error) {
|
|
return bindataRead(
|
|
__1_add_route_created_atDownSql,
|
|
"1_add_route_created_at.down.sql",
|
|
)
|
|
}
|
|
|
|
func _1_add_route_created_atDownSql() (*asset, error) {
|
|
bytes, err := _1_add_route_created_atDownSqlBytes()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
info := bindataFileInfo{name: "1_add_route_created_at.down.sql", size: 43, mode: os.FileMode(420), modTime: time.Unix(1508386173, 0)}
|
|
a := &asset{bytes: bytes, info: info}
|
|
return a, nil
|
|
}
|
|
|
|
var __1_add_route_created_atUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\xca\x2f\x2d\x49\x2d\x56\x70\x74\x71\x51\x48\x2e\x4a\x4d\x2c\x49\x4d\x89\x4f\x2c\x51\x28\x49\xad\x28\xb1\xe6\x02\x04\x00\x00\xff\xff\x3b\x59\x9c\x54\x28\x00\x00\x00")
|
|
|
|
func _1_add_route_created_atUpSqlBytes() ([]byte, error) {
|
|
return bindataRead(
|
|
__1_add_route_created_atUpSql,
|
|
"1_add_route_created_at.up.sql",
|
|
)
|
|
}
|
|
|
|
func _1_add_route_created_atUpSql() (*asset, error) {
|
|
bytes, err := _1_add_route_created_atUpSqlBytes()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
info := bindataFileInfo{name: "1_add_route_created_at.up.sql", size: 40, mode: os.FileMode(420), modTime: time.Unix(1508360377, 0)}
|
|
a := &asset{bytes: bytes, info: info}
|
|
return a, nil
|
|
}
|
|
|
|
// Asset loads and returns the asset for the given name.
|
|
// It returns an error if the asset could not be found or
|
|
// could not be loaded.
|
|
func Asset(name string) ([]byte, error) {
|
|
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
|
if f, ok := _bindata[cannonicalName]; ok {
|
|
a, err := f()
|
|
if err != nil {
|
|
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
|
|
}
|
|
return a.bytes, nil
|
|
}
|
|
return nil, fmt.Errorf("Asset %s not found", name)
|
|
}
|
|
|
|
// MustAsset is like Asset but panics when Asset would return an error.
|
|
// It simplifies safe initialization of global variables.
|
|
func MustAsset(name string) []byte {
|
|
a, err := Asset(name)
|
|
if err != nil {
|
|
panic("asset: Asset(" + name + "): " + err.Error())
|
|
}
|
|
|
|
return a
|
|
}
|
|
|
|
// AssetInfo loads and returns the asset info for the given name.
|
|
// It returns an error if the asset could not be found or
|
|
// could not be loaded.
|
|
func AssetInfo(name string) (os.FileInfo, error) {
|
|
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
|
if f, ok := _bindata[cannonicalName]; ok {
|
|
a, err := f()
|
|
if err != nil {
|
|
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
|
|
}
|
|
return a.info, nil
|
|
}
|
|
return nil, fmt.Errorf("AssetInfo %s not found", name)
|
|
}
|
|
|
|
// AssetNames returns the names of the assets.
|
|
func AssetNames() []string {
|
|
names := make([]string, 0, len(_bindata))
|
|
for name := range _bindata {
|
|
names = append(names, name)
|
|
}
|
|
return names
|
|
}
|
|
|
|
// _bindata is a table, holding each asset generator, mapped to its name.
|
|
var _bindata = map[string]func() (*asset, error){
|
|
"1_add_route_created_at.down.sql": _1_add_route_created_atDownSql,
|
|
"1_add_route_created_at.up.sql": _1_add_route_created_atUpSql,
|
|
}
|
|
|
|
// AssetDir returns the file names below a certain
|
|
// directory embedded in the file by go-bindata.
|
|
// For example if you run go-bindata on data/... and data contains the
|
|
// following hierarchy:
|
|
// data/
|
|
// foo.txt
|
|
// img/
|
|
// a.png
|
|
// b.png
|
|
// then AssetDir("data") would return []string{"foo.txt", "img"}
|
|
// AssetDir("data/img") would return []string{"a.png", "b.png"}
|
|
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
|
|
// AssetDir("") will return []string{"data"}.
|
|
func AssetDir(name string) ([]string, error) {
|
|
node := _bintree
|
|
if len(name) != 0 {
|
|
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
|
pathList := strings.Split(cannonicalName, "/")
|
|
for _, p := range pathList {
|
|
node = node.Children[p]
|
|
if node == nil {
|
|
return nil, fmt.Errorf("Asset %s not found", name)
|
|
}
|
|
}
|
|
}
|
|
if node.Func != nil {
|
|
return nil, fmt.Errorf("Asset %s not found", name)
|
|
}
|
|
rv := make([]string, 0, len(node.Children))
|
|
for childName := range node.Children {
|
|
rv = append(rv, childName)
|
|
}
|
|
return rv, nil
|
|
}
|
|
|
|
type bintree struct {
|
|
Func func() (*asset, error)
|
|
Children map[string]*bintree
|
|
}
|
|
|
|
var _bintree = &bintree{nil, map[string]*bintree{
|
|
"1_add_route_created_at.down.sql": &bintree{_1_add_route_created_atDownSql, map[string]*bintree{}},
|
|
"1_add_route_created_at.up.sql": &bintree{_1_add_route_created_atUpSql, map[string]*bintree{}},
|
|
}}
|
|
|
|
// RestoreAsset restores an asset under the given directory
|
|
func RestoreAsset(dir, name string) error {
|
|
data, err := Asset(name)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
info, err := AssetInfo(name)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// RestoreAssets restores an asset under the given directory recursively
|
|
func RestoreAssets(dir, name string) error {
|
|
children, err := AssetDir(name)
|
|
// File
|
|
if err != nil {
|
|
return RestoreAsset(dir, name)
|
|
}
|
|
// Dir
|
|
for _, child := range children {
|
|
err = RestoreAssets(dir, filepath.Join(name, child))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func _filePath(dir, name string) string {
|
|
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
|
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
|
|
}
|