diff --git a/.circleci/config.yml b/.circleci/config.yml index fb673c5eb..cf106f2c2 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,7 +6,7 @@ jobs: working_directory: ~/go/src/github.com/fnproject/fn environment: # apparently expansion doesn't work here yet: https://discuss.circleci.com/t/environment-variable-expansion-in-working-directory/11322 - GOPATH=/home/circleci/go - - GOVERSION=1.8.3 + - GOVERSION=1.9.1 - OS=linux - ARCH=amd64 steps: diff --git a/api/datastore/internal/datastoretest/test.go b/api/datastore/internal/datastoretest/test.go index f49e1af02..07093f887 100644 --- a/api/datastore/internal/datastoretest/test.go +++ b/api/datastore/internal/datastoretest/test.go @@ -25,8 +25,13 @@ func setLogBuffer() *bytes.Buffer { return &buf } -func Test(t *testing.T, dsf func() models.Datastore) { +func Test(t *testing.T, dsf func(t *testing.T) models.Datastore) { buf := setLogBuffer() + defer func() { + if t.Failed() { + t.Log(buf.String()) + } + }() ctx := context.Background() @@ -39,17 +44,16 @@ func Test(t *testing.T, dsf func() models.Datastore) { call.Path = testRoute.Path t.Run("call-insert", func(t *testing.T) { - ds := dsf() + ds := dsf(t) call.ID = id.New().String() err := ds.InsertCall(ctx, call) if err != nil { - t.Log(buf.String()) t.Fatalf("Test InsertCall(ctx, &call): unexpected error `%v`", err) } }) t.Run("call-get", func(t *testing.T) { - ds := dsf() + ds := dsf(t) call.ID = id.New().String() ds.InsertCall(ctx, call) newCall, err := ds.GetCall(ctx, call.AppName, call.ID) @@ -57,13 +61,12 @@ func Test(t *testing.T, dsf func() models.Datastore) { t.Fatalf("Test GetCall(ctx, call.ID): unexpected error `%v`", err) } if call.ID != newCall.ID { - t.Log(buf.String()) t.Fatalf("Test GetCall(ctx, call.ID): unexpected error `%v`", err) } }) t.Run("calls-get", func(t *testing.T) { - ds := dsf() + ds := dsf(t) filter := &models.CallFilter{AppName: call.AppName, Path: call.Path, PerPage: 100} call.ID = id.New().String() call.CreatedAt = strfmt.DateTime(time.Now()) @@ -76,7 +79,6 @@ func Test(t *testing.T, dsf func() models.Datastore) { t.Fatalf("Test GetCalls(ctx, filter): unexpected error `%v`", err) } if len(calls) != 1 { - t.Log(buf.String()) t.Fatalf("Test GetCalls(ctx, filter): unexpected length `%v`", len(calls)) } @@ -102,7 +104,6 @@ func Test(t *testing.T, dsf func() models.Datastore) { t.Fatalf("Test GetCalls(ctx, filter): unexpected error `%v`", err) } if len(calls) != 3 { - t.Log(buf.String()) t.Fatalf("Test GetCalls(ctx, filter): unexpected length `%v`", len(calls)) } @@ -113,10 +114,8 @@ func Test(t *testing.T, dsf func() models.Datastore) { t.Fatalf("Test GetCalls(ctx, filter): unexpected error `%v`", err) } if len(calls) != 1 { - t.Log(buf.String()) t.Fatalf("Test GetCalls(ctx, filter): unexpected length `%v`", len(calls)) } else if calls[0].ID != c3.ID { - t.Log(buf.String()) t.Fatalf("Test GetCalls: call ids not in expected order: %v %v", calls[0].ID, c3.ID) } @@ -127,13 +126,10 @@ func Test(t *testing.T, dsf func() models.Datastore) { t.Fatalf("Test GetCalls(ctx, filter): unexpected error `%v`", err) } if len(calls) != 2 { - t.Log(buf.String()) t.Fatalf("Test GetCalls(ctx, filter): unexpected length `%v`", len(calls)) } else if calls[0].ID != c2.ID { - t.Log(buf.String()) t.Fatalf("Test GetCalls: call ids not in expected order: %v %v", calls[0].ID, c2.ID) } else if calls[1].ID != call.ID { - t.Log(buf.String()) t.Fatalf("Test GetCalls: call ids not in expected order: %v %v", calls[1].ID, call.ID) } @@ -143,7 +139,6 @@ func Test(t *testing.T, dsf func() models.Datastore) { t.Fatalf("Test GetCalls(ctx, filter): unexpected error `%v`", err) } if len(calls) != 0 { - t.Log(buf.String()) t.Fatalf("Test GetCalls(ctx, filter): unexpected length `%v`", len(calls)) } @@ -152,7 +147,6 @@ func Test(t *testing.T, dsf func() models.Datastore) { t.Fatalf("Test GetCalls(ctx, filter): unexpected error `%v`", err) } if len(calls) != 0 { - t.Log(buf.String()) t.Fatalf("Test GetCalls(ctx, filter): unexpected length `%v`", len(calls)) } @@ -167,42 +161,35 @@ func Test(t *testing.T, dsf func() models.Datastore) { t.Fatalf("Test GetCalls(ctx, filter): unexpected error `%v`", err) } if len(calls) != 1 { - t.Log(buf.String()) t.Fatalf("Test GetCalls(ctx, filter): unexpected length `%v`", len(calls)) } else if calls[0].ID != c2.ID { - t.Log(buf.String()) t.Fatalf("Test GetCalls: call id not expected %s vs %s", calls[0].ID, c2.ID) } }) t.Run("apps", func(t *testing.T) { - ds := dsf() + ds := dsf(t) // Testing insert app _, err := ds.InsertApp(ctx, nil) if err != models.ErrDatastoreEmptyApp { - t.Log(buf.String()) t.Fatalf("Test InsertApp(nil): expected error `%v`, but it was `%v`", models.ErrDatastoreEmptyApp, err) } _, err = ds.InsertApp(ctx, &models.App{}) if err != models.ErrDatastoreEmptyAppName { - t.Log(buf.String()) t.Fatalf("Test InsertApp(&{}): expected error `%v`, but it was `%v`", models.ErrDatastoreEmptyAppName, err) } inserted, err := ds.InsertApp(ctx, testApp) if err != nil { - t.Log(buf.String()) t.Fatalf("Test InsertApp: error when storing new app: %s", err) } if !reflect.DeepEqual(*inserted, *testApp) { - t.Log(buf.String()) t.Fatalf("Test InsertApp: expected to insert:\n%v\nbut got:\n%v", testApp, inserted) } _, err = ds.InsertApp(ctx, testApp) if err != models.ErrAppsAlreadyExists { - t.Log(buf.String()) t.Fatalf("Test InsertApp duplicated: expected error `%v`, but it was `%v`", models.ErrAppsAlreadyExists, err) } @@ -211,12 +198,10 @@ func Test(t *testing.T, dsf func() models.Datastore) { updated, err := ds.UpdateApp(ctx, &models.App{Name: testApp.Name, Config: map[string]string{"TEST": "1"}}) if err != nil { - t.Log(buf.String()) t.Fatalf("Test UpdateApp: error when updating app: %v", err) } expected := &models.App{Name: testApp.Name, Config: map[string]string{"TEST": "1"}} if !reflect.DeepEqual(*updated, *expected) { - t.Log(buf.String()) t.Fatalf("Test UpdateApp: expected updated `%v` but got `%v`", expected, updated) } @@ -224,12 +209,10 @@ func Test(t *testing.T, dsf func() models.Datastore) { updated, err = ds.UpdateApp(ctx, &models.App{Name: testApp.Name, Config: map[string]string{"OTHER": "TEST"}}) if err != nil { - t.Log(buf.String()) t.Fatalf("Test UpdateApp: error when updating app: %v", err) } expected = &models.App{Name: testApp.Name, Config: map[string]string{"TEST": "1", "OTHER": "TEST"}} if !reflect.DeepEqual(*updated, *expected) { - t.Log(buf.String()) t.Fatalf("Test UpdateApp: expected updated `%v` but got `%v`", expected, updated) } @@ -237,12 +220,10 @@ func Test(t *testing.T, dsf func() models.Datastore) { updated, err = ds.UpdateApp(ctx, &models.App{Name: testApp.Name, Config: map[string]string{"TEST": ""}}) if err != nil { - t.Log(buf.String()) t.Fatalf("Test UpdateApp: error when updating app: %v", err) } expected = &models.App{Name: testApp.Name, Config: map[string]string{"OTHER": "TEST"}} if !reflect.DeepEqual(*updated, *expected) { - t.Log(buf.String()) t.Fatalf("Test UpdateApp: expected updated `%v` but got `%v`", expected, updated) } } @@ -250,31 +231,26 @@ func Test(t *testing.T, dsf func() models.Datastore) { // Testing get app _, err = ds.GetApp(ctx, "") if err != models.ErrDatastoreEmptyAppName { - t.Log(buf.String()) t.Fatalf("Test GetApp: expected error to be %v, but it was %s", models.ErrDatastoreEmptyAppName, err) } app, err := ds.GetApp(ctx, testApp.Name) if err != nil { - t.Log(buf.String()) t.Fatalf("Test GetApp: error: %s", err) } if app.Name != testApp.Name { - t.Log(buf.String()) t.Fatalf("Test GetApp: expected `app.Name` to be `%s` but it was `%s`", app.Name, testApp.Name) } // Testing list apps apps, err := ds.GetApps(ctx, &models.AppFilter{PerPage: 100}) if err != nil { - t.Log(buf.String()) t.Fatalf("Test GetApps: unexpected error %v", err) } if len(apps) == 0 { t.Fatal("Test GetApps: expected result count to be greater than 0") } if apps[0].Name != testApp.Name { - t.Log(buf.String()) t.Fatalf("Test GetApps: expected `app.Name` to be `%s` but it was `%s`", app.Name, testApp.Name) } @@ -292,28 +268,23 @@ func Test(t *testing.T, dsf func() models.Datastore) { apps, err = ds.GetApps(ctx, &models.AppFilter{PerPage: 1}) if err != nil { - t.Log(buf.String()) t.Fatalf("Test GetApps: error: %s", err) } if len(apps) != 1 { t.Fatalf("Test GetApps: expected result count to be 1 but got %d", len(apps)) } else if apps[0].Name != testApp.Name { - t.Log(buf.String()) t.Fatalf("Test GetApps: expected `app.Name` to be `%s` but it was `%s`", testApp.Name, apps[0].Name) } apps, err = ds.GetApps(ctx, &models.AppFilter{PerPage: 100, Cursor: apps[0].Name}) if err != nil { - t.Log(buf.String()) t.Fatalf("Test GetApps: error: %s", err) } if len(apps) != 2 { t.Fatalf("Test GetApps: expected result count to be 2 but got %d", len(apps)) } else if apps[0].Name != a2.Name { - t.Log(buf.String()) t.Fatalf("Test GetApps: expected `app.Name` to be `%s` but it was `%s`", a2.Name, apps[0].Name) } else if apps[1].Name != a3.Name { - t.Log(buf.String()) t.Fatalf("Test GetApps: expected `app.Name` to be `%s` but it was `%s`", a3.Name, apps[1].Name) } @@ -326,20 +297,17 @@ func Test(t *testing.T, dsf func() models.Datastore) { apps, err = ds.GetApps(ctx, &models.AppFilter{PerPage: 100}) if err != nil { - t.Log(buf.String()) t.Fatalf("Test GetApps: error: %s", err) } if len(apps) != 4 { t.Fatalf("Test GetApps: expected result count to be 4 but got %d", len(apps)) } else if apps[0].Name != a4.Name { - t.Log(buf.String()) t.Fatalf("Test GetApps: expected `app.Name` to be `%s` but it was `%s`", a4.Name, apps[0].Name) } // TODO fix up prefix stuff //apps, err = ds.GetApps(ctx, &models.AppFilter{Name: "Tes"}) //if err != nil { - //t.Log(buf.String()) //t.Fatalf("Test GetApps(filter): unexpected error %v", err) //} //if len(apps) != 3 { @@ -349,22 +317,18 @@ func Test(t *testing.T, dsf func() models.Datastore) { // Testing app delete err = ds.RemoveApp(ctx, "") if err != models.ErrDatastoreEmptyAppName { - t.Log(buf.String()) t.Fatalf("Test RemoveApp: expected error `%v`, but it was `%v`", models.ErrDatastoreEmptyAppName, err) } err = ds.RemoveApp(ctx, testApp.Name) if err != nil { - t.Log(buf.String()) t.Fatalf("Test RemoveApp: error: %s", err) } app, err = ds.GetApp(ctx, testApp.Name) if err != models.ErrAppsNotFound { - t.Log(buf.String()) t.Fatalf("Test GetApp(removed): expected error `%v`, but it was `%v`", models.ErrAppsNotFound, err) } if app != nil { - t.Log(buf.String()) t.Fatal("Test RemoveApp: failed to remove the app") } @@ -376,17 +340,15 @@ func Test(t *testing.T, dsf func() models.Datastore) { }, }) if err != models.ErrAppsNotFound { - t.Log(buf.String()) t.Fatalf("Test UpdateApp(inexistent): expected error `%v`, but it was `%v`", models.ErrAppsNotFound, err) } }) t.Run("routes", func(t *testing.T) { - ds := dsf() + ds := dsf(t) // Insert app again to test routes _, err := ds.InsertApp(ctx, testApp) if err != nil && err != models.ErrAppsAlreadyExists { - t.Log(buf.String()) t.Fatal("Test InsertRoute Prep: failed to insert app: ", err) } @@ -394,25 +356,21 @@ func Test(t *testing.T, dsf func() models.Datastore) { { _, err = ds.InsertRoute(ctx, nil) if err != models.ErrDatastoreEmptyRoute { - t.Log(buf.String()) t.Fatalf("Test InsertRoute(nil): expected error `%v`, but it was `%v`", models.ErrDatastoreEmptyRoute, err) } _, err = ds.InsertRoute(ctx, &models.Route{AppName: "notreal", Path: "/test"}) if err != models.ErrAppsNotFound { - t.Log(buf.String()) t.Fatalf("Test InsertRoute: expected error `%v`, but it was `%v`", models.ErrAppsNotFound, err) } _, err = ds.InsertRoute(ctx, testRoute) if err != nil { - t.Log(buf.String()) t.Fatalf("Test InsertRoute: error when storing new route: %s", err) } _, err = ds.InsertRoute(ctx, testRoute) if err != models.ErrRoutesAlreadyExists { - t.Log(buf.String()) t.Fatalf("Test InsertRoute duplicated: expected error to be `%v`, but it was `%v`", models.ErrRoutesAlreadyExists, err) } } @@ -421,24 +379,20 @@ func Test(t *testing.T, dsf func() models.Datastore) { { _, err = ds.GetRoute(ctx, "a", "") if err != models.ErrDatastoreEmptyRoutePath { - t.Log(buf.String()) t.Fatalf("Test GetRoute(empty route path): expected error `%v`, but it was `%v`", models.ErrDatastoreEmptyRoutePath, err) } _, err = ds.GetRoute(ctx, "", "a") if err != models.ErrDatastoreEmptyAppName { - t.Log(buf.String()) t.Fatalf("Test GetRoute(empty app name): expected error `%v`, but it was `%v`", models.ErrDatastoreEmptyAppName, err) } route, err := ds.GetRoute(ctx, testApp.Name, testRoute.Path) if err != nil { - t.Log(buf.String()) t.Fatalf("Test GetRoute: unexpected error %v", err) } var expected models.Route = *testRoute if !reflect.DeepEqual(*route, expected) { - t.Log(buf.String()) t.Fatalf("Test InsertApp: expected to insert:\n%v\nbut got:\n%v", expected, *route) } } @@ -462,7 +416,6 @@ func Test(t *testing.T, dsf func() models.Datastore) { }, }) if err != nil { - t.Log(buf.String()) t.Fatalf("Test UpdateRoute: unexpected error: %v", err) } expected := &models.Route{ @@ -488,7 +441,6 @@ func Test(t *testing.T, dsf func() models.Datastore) { }, } if !reflect.DeepEqual(*updated, *expected) { - t.Log(buf.String()) t.Fatalf("Test UpdateRoute: expected updated `%v` but got `%v`", expected, updated) } @@ -507,7 +459,6 @@ func Test(t *testing.T, dsf func() models.Datastore) { }, }) if err != nil { - t.Log(buf.String()) t.Fatalf("Test UpdateRoute: unexpected error: %v", err) } expected = &models.Route{ @@ -531,7 +482,6 @@ func Test(t *testing.T, dsf func() models.Datastore) { }, } if !reflect.DeepEqual(*updated, *expected) { - t.Log(buf.String()) t.Fatalf("Test UpdateRoute: expected updated:\n`%v`\nbut got:\n`%v`", expected, updated) } } @@ -539,39 +489,32 @@ func Test(t *testing.T, dsf func() models.Datastore) { // Testing list routes routes, err := ds.GetRoutesByApp(ctx, testApp.Name, &models.RouteFilter{PerPage: 1}) if err != nil { - t.Log(buf.String()) t.Fatalf("Test GetRoutesByApp: unexpected error %v", err) } if len(routes) == 0 { t.Fatal("Test GetRoutesByApp: expected result count to be greater than 0") } if routes[0] == nil { - t.Log(buf.String()) t.Fatalf("Test GetRoutes: expected non-nil route") } else if routes[0].Path != testRoute.Path { - t.Log(buf.String()) t.Fatalf("Test GetRoutes: expected `app.Name` to be `%s` but it was `%s`", testRoute.Path, routes[0].Path) } routes, err = ds.GetRoutesByApp(ctx, testApp.Name, &models.RouteFilter{Image: testRoute.Image, PerPage: 1}) if err != nil { - t.Log(buf.String()) t.Fatalf("Test GetRoutesByApp: unexpected error %v", err) } if len(routes) == 0 { t.Fatal("Test GetRoutesByApp: expected result count to be greater than 0") } if routes[0] == nil { - t.Log(buf.String()) t.Fatalf("Test GetRoutesByApp: expected non-nil route") } else if routes[0].Path != testRoute.Path { - t.Log(buf.String()) t.Fatalf("Test GetRoutesByApp: expected `route.Path` to be `%s` but it was `%s`", testRoute.Path, routes[0].Path) } routes, err = ds.GetRoutesByApp(ctx, "notreal", &models.RouteFilter{PerPage: 1}) if err != nil { - t.Log(buf.String()) t.Fatalf("Test GetRoutesByApp: error: %s", err) } if len(routes) != 0 { @@ -593,28 +536,23 @@ func Test(t *testing.T, dsf func() models.Datastore) { routes, err = ds.GetRoutesByApp(ctx, testApp.Name, &models.RouteFilter{PerPage: 1}) if err != nil { - t.Log(buf.String()) t.Fatalf("Test GetRoutesByApp: error: %s", err) } if len(routes) != 1 { t.Fatalf("Test GetRoutesByApp: expected result count to be 1 but got %d", len(routes)) } else if routes[0].Path != testRoute.Path { - t.Log(buf.String()) t.Fatalf("Test GetRoutesByApp: expected `route.Path` to be `%s` but it was `%s`", testRoute.Path, routes[0].Path) } routes, err = ds.GetRoutesByApp(ctx, testApp.Name, &models.RouteFilter{PerPage: 2, Cursor: routes[0].Path}) if err != nil { - t.Log(buf.String()) t.Fatalf("Test GetRoutesByApp: error: %s", err) } if len(routes) != 2 { t.Fatalf("Test GetRoutesByApp: expected result count to be 2 but got %d", len(routes)) } else if routes[0].Path != r2.Path { - t.Log(buf.String()) t.Fatalf("Test GetRoutesByApp: expected `route.Path` to be `%s` but it was `%s`", r2.Path, routes[0].Path) } else if routes[1].Path != r3.Path { - t.Log(buf.String()) t.Fatalf("Test GetRoutesByApp: expected `route.Path` to be `%s` but it was `%s`", r3.Path, routes[1].Path) } @@ -627,13 +565,11 @@ func Test(t *testing.T, dsf func() models.Datastore) { routes, err = ds.GetRoutesByApp(ctx, testApp.Name, &models.RouteFilter{PerPage: 100}) if err != nil { - t.Log(buf.String()) t.Fatalf("Test GetRoutesByApp: error: %s", err) } if len(routes) != 4 { t.Fatalf("Test GetRoutesByApp: expected result count to be 4 but got %d", len(routes)) } else if routes[0].Path != r4.Path { - t.Log(buf.String()) t.Fatalf("Test GetRoutesByApp: expected `route.Path` to be `%s` but it was `%s`", r4.Path, routes[0].Path) } @@ -643,29 +579,24 @@ func Test(t *testing.T, dsf func() models.Datastore) { // Testing route delete err = ds.RemoveRoute(ctx, "", "") if err != models.ErrDatastoreEmptyAppName { - t.Log(buf.String()) t.Fatalf("Test RemoveRoute(empty app name): expected error `%v`, but it was `%v`", models.ErrDatastoreEmptyAppName, err) } err = ds.RemoveRoute(ctx, "a", "") if err != models.ErrDatastoreEmptyRoutePath { - t.Log(buf.String()) t.Fatalf("Test RemoveRoute(empty route path): expected error `%v`, but it was `%v`", models.ErrDatastoreEmptyRoutePath, err) } err = ds.RemoveRoute(ctx, testRoute.AppName, testRoute.Path) if err != nil { - t.Log(buf.String()) t.Fatalf("Test RemoveApp: unexpected error: %v", err) } route, err := ds.GetRoute(ctx, testRoute.AppName, testRoute.Path) if err != nil && err != models.ErrRoutesNotFound { - t.Log(buf.String()) t.Fatalf("Test GetRoute: expected error `%v`, but it was `%v`", models.ErrRoutesNotFound, err) } if route != nil { - t.Log(buf.String()) t.Fatalf("Test RemoveApp: failed to remove the route: %v", route) } @@ -675,7 +606,6 @@ func Test(t *testing.T, dsf func() models.Datastore) { Image: "test", }) if err != models.ErrRoutesNotFound { - t.Log(buf.String()) t.Fatalf("Test UpdateRoute inexistent: expected error to be `%v`, but it was `%v`", models.ErrRoutesNotFound, err) } }) diff --git a/api/datastore/mock_test.go b/api/datastore/mock_test.go index 7e48ea04f..b046fcd81 100644 --- a/api/datastore/mock_test.go +++ b/api/datastore/mock_test.go @@ -4,8 +4,12 @@ import ( "testing" "github.com/fnproject/fn/api/datastore/internal/datastoretest" + "github.com/fnproject/fn/api/models" ) func TestDatastore(t *testing.T) { - datastoretest.Test(t, NewMock) + f := func(t *testing.T) models.Datastore { + return NewMock() + } + datastoretest.Test(t, f) } diff --git a/api/datastore/sql/migrations/1_add_route_created_at.down.sql b/api/datastore/sql/migrations/1_add_route_created_at.down.sql new file mode 100644 index 000000000..836d62f91 --- /dev/null +++ b/api/datastore/sql/migrations/1_add_route_created_at.down.sql @@ -0,0 +1 @@ +ALTER TABLE routes DROP COLUMN created_at; diff --git a/api/datastore/sql/migrations/1_add_route_created_at.up.sql b/api/datastore/sql/migrations/1_add_route_created_at.up.sql new file mode 100644 index 000000000..df65b916c --- /dev/null +++ b/api/datastore/sql/migrations/1_add_route_created_at.up.sql @@ -0,0 +1 @@ +ALTER TABLE routes ADD created_at text; diff --git a/api/datastore/sql/migrations/README.md b/api/datastore/sql/migrations/README.md new file mode 100644 index 000000000..5fd33f2a5 --- /dev/null +++ b/api/datastore/sql/migrations/README.md @@ -0,0 +1,40 @@ +# Migrations How-To + +All migration files should be of the format: + +`[0-9]+_[add|remove]_model[_field]*.[up|down].sql` + +The number at the beginning of the file name should be monotonically +increasing, from the last highest file number in this directory. E.g. if there +is `11_add_foo_bar.up.sql`, your new file should be `12_add_bar_baz.up.sql`. + +All `*.up.sql` files must have an accompanying `*.down.sql` file in order to +pass review. + +The contents of each file should contain only 1 ANSI sql query. For help, you +may refer to https://github.com/mattes/migrate/blob/master/MIGRATIONS.md which +illustrates some of the finer points. + +After creating the file you will need to run, in the same directory as this +README: + +```sh +$ go generate +``` + +NOTE: You may need to `go get github.com/jteeuwen/go-bindata` before running `go +generate` in order for it to work. + +After running `go generate`, the `migrations.go` file should be updated. Check +the updated version of this as well as the new `.sql` file into git. + +After adding the migration, be sure to update the fields in the sql tables in +`sql.go` up one package. For example, if you added a column `foo` to `routes`, +add this field to the routes `CREATE TABLE` query, as well as any queries +where it should be returned. + +After doing this, run the test suite to make sure the sql queries work as +intended and voila. The test suite will ensure that the up and down migrations +work as well as a fresh db. The down migrations will not be tested against +SQLite3 as it does not support `ALTER TABLE DROP COLUMN`, but will still be +tested against postgres and MySQL. diff --git a/api/datastore/sql/migrations/index.go b/api/datastore/sql/migrations/index.go new file mode 100644 index 000000000..40c7150d1 --- /dev/null +++ b/api/datastore/sql/migrations/index.go @@ -0,0 +1,12 @@ +package migrations + +//go:generate go-bindata -ignore migrations.go -ignore index.go -o migrations.go -pkg migrations . + +// migrations are generated from this cwd with go generate. +// install https://github.com/jteeuwen/go-bindata for go generate +// command to work properly. + +// this will generate a go file with go-bindata of all the migration +// files in 1 go file, so that migrations can be run remotely without +// having to carry the migration files around (i.e. since they are +// compiled into the go binary) diff --git a/api/datastore/sql/migrations/migrations.go b/api/datastore/sql/migrations/migrations.go new file mode 100644 index 000000000..fca5eeec3 --- /dev/null +++ b/api/datastore/sql/migrations/migrations.go @@ -0,0 +1,258 @@ +// Code generated by go-bindata. +// sources: +// 1_add_route_created_at.down.sql +// 1_add_route_created_at.up.sql +// DO NOT EDIT! + +package migrations + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var __1_add_route_created_atDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\xca\x2f\x2d\x49\x2d\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x09\xf5\xf5\x53\x48\x2e\x4a\x4d\x2c\x49\x4d\x89\x4f\x2c\xb1\xe6\x02\x04\x00\x00\xff\xff\x47\xfd\x3b\xbe\x2b\x00\x00\x00") + +func _1_add_route_created_atDownSqlBytes() ([]byte, error) { + return bindataRead( + __1_add_route_created_atDownSql, + "1_add_route_created_at.down.sql", + ) +} + +func _1_add_route_created_atDownSql() (*asset, error) { + bytes, err := _1_add_route_created_atDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1_add_route_created_at.down.sql", size: 43, mode: os.FileMode(420), modTime: time.Unix(1508386173, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __1_add_route_created_atUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\xca\x2f\x2d\x49\x2d\x56\x70\x74\x71\x51\x48\x2e\x4a\x4d\x2c\x49\x4d\x89\x4f\x2c\x51\x28\x49\xad\x28\xb1\xe6\x02\x04\x00\x00\xff\xff\x3b\x59\x9c\x54\x28\x00\x00\x00") + +func _1_add_route_created_atUpSqlBytes() ([]byte, error) { + return bindataRead( + __1_add_route_created_atUpSql, + "1_add_route_created_at.up.sql", + ) +} + +func _1_add_route_created_atUpSql() (*asset, error) { + bytes, err := _1_add_route_created_atUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1_add_route_created_at.up.sql", size: 40, mode: os.FileMode(420), modTime: time.Unix(1508360377, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "1_add_route_created_at.down.sql": _1_add_route_created_atDownSql, + "1_add_route_created_at.up.sql": _1_add_route_created_atUpSql, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "1_add_route_created_at.down.sql": &bintree{_1_add_route_created_atDownSql, map[string]*bintree{}}, + "1_add_route_created_at.up.sql": &bintree{_1_add_route_created_atUpSql, map[string]*bintree{}}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/api/datastore/sql/sql.go b/api/datastore/sql/sql.go index edcc1d966..03d3f1e62 100644 --- a/api/datastore/sql/sql.go +++ b/api/datastore/sql/sql.go @@ -13,6 +13,7 @@ import ( "strings" "time" + "github.com/fnproject/fn/api/datastore/sql/migrations" "github.com/fnproject/fn/api/models" "github.com/go-sql-driver/mysql" _ "github.com/go-sql-driver/mysql" @@ -21,6 +22,12 @@ import ( _ "github.com/lib/pq" "github.com/mattn/go-sqlite3" _ "github.com/mattn/go-sqlite3" + "github.com/rdallman/migrate" + _ "github.com/rdallman/migrate/database/mysql" + _ "github.com/rdallman/migrate/database/postgres" + _ "github.com/rdallman/migrate/database/sqlite3" + "github.com/rdallman/migrate/source" + "github.com/rdallman/migrate/source/go-bindata" "github.com/sirupsen/logrus" ) @@ -41,6 +48,7 @@ var tables = [...]string{`CREATE TABLE IF NOT EXISTS routes ( type varchar(16) NOT NULL, headers text NOT NULL, config text NOT NULL, + created_at text, PRIMARY KEY (app_name, path) );`, @@ -68,7 +76,7 @@ var tables = [...]string{`CREATE TABLE IF NOT EXISTS routes ( } const ( - routeSelector = `SELECT app_name, path, image, format, memory, type, timeout, idle_timeout, headers, config FROM routes` + routeSelector = `SELECT app_name, path, image, format, memory, type, timeout, idle_timeout, headers, config, created_at FROM routes` callSelector = `SELECT id, created_at, started_at, completed_at, status, app_name, path FROM calls` ) @@ -79,11 +87,16 @@ type sqlStore struct { // New will open the db specified by url, create any tables necessary // and return a models.Datastore safe for concurrent usage. func New(url *url.URL) (models.Datastore, error) { + return newDS(url) +} + +// for test methods, return concrete type, but don't expose +func newDS(url *url.URL) (*sqlStore, error) { driver := url.Scheme // driver must be one of these for sqlx to work, double check: switch driver { - case "postgres", "pgx", "mysql", "sqlite3", "oci8", "ora", "goracle": + case "postgres", "pgx", "mysql", "sqlite3": default: return nil, errors.New("invalid db driver, refer to the code") } @@ -121,6 +134,12 @@ func New(url *url.URL) (models.Datastore, error) { db.SetMaxIdleConns(maxIdleConns) logrus.WithFields(logrus.Fields{"max_idle_connections": maxIdleConns, "datastore": driver}).Info("datastore dialed") + err = runMigrations(url.String(), checkExistence(db)) // original url string + if err != nil { + logrus.WithError(err).Error("error running migrations") + return nil, err + } + switch driver { case "sqlite3": db.SetMaxOpenConns(1) @@ -135,6 +154,104 @@ func New(url *url.URL) (models.Datastore, error) { return &sqlStore{db: db}, nil } +// checkExistence checks if tables have been created yet, it is not concerned +// about the existence of the schema migration version (since migrations were +// added to existing dbs, we need to know whether the db exists without migrations +// or if it's brand new). +func checkExistence(db *sqlx.DB) bool { + query := db.Rebind(`SELECT name FROM apps LIMIT 1`) + row := db.QueryRow(query) + + var dummy string + err := row.Scan(&dummy) + if err != nil && err != sql.ErrNoRows { + // TODO we should probably ensure this is a certain 'no such table' error + // and if it's not that or err no rows, we should probably block start up. + // if we return false here spuriously, then migrations could be skipped, + // which would be bad. + return false + } + return true +} + +// check if the db already existed, if the db is brand new then we can skip +// over all the migrations BUT we must be sure to set the right migration +// number so that only current migrations are skipped, not any future ones. +func runMigrations(url string, exists bool) error { + m, err := migrator(url) + if err != nil { + return err + } + defer m.Close() + + if !exists { + // set to highest and bail + return m.Force(latestVersion(migrations.AssetNames())) + } + + // run any migrations needed to get to latest, if any + err = m.Up() + if err == migrate.ErrNoChange { // we don't care, but want other errors + err = nil + } + return err +} + +func migrator(url string) (*migrate.Migrate, error) { + s := bindata.Resource(migrations.AssetNames(), + func(name string) ([]byte, error) { + return migrations.Asset(name) + }) + + d, err := bindata.WithInstance(s) + if err != nil { + return nil, err + } + + return migrate.NewWithSourceInstance("go-bindata", d, url) +} + +// latest version will find the latest version from a list of migration +// names (not from the db) +func latestVersion(migs []string) int { + var highest uint + for _, m := range migs { + mig, _ := source.Parse(m) + if mig.Version > highest { + highest = mig.Version + } + } + + return int(highest) +} + +// clear is for tests only, be careful, it deletes all records. +func (ds *sqlStore) clear() error { + return ds.Tx(func(tx *sqlx.Tx) error { + query := tx.Rebind(`DELETE FROM routes`) + _, err := tx.Exec(query) + if err != nil { + return err + } + + query = tx.Rebind(`DELETE FROM calls`) + _, err = tx.Exec(query) + if err != nil { + return err + } + + query = tx.Rebind(`DELETE FROM apps`) + _, err = tx.Exec(query) + if err != nil { + return err + } + + query = tx.Rebind(`DELETE FROM logs`) + _, err = tx.Exec(query) + return err + }) +} + func (ds *sqlStore) InsertApp(ctx context.Context, app *models.App) (*models.App, error) { query := ds.db.Rebind("INSERT INTO apps (name, config) VALUES (:name, :config);") _, err := ds.db.NamedExecContext(ctx, query, app) @@ -298,7 +415,8 @@ func (ds *sqlStore) InsertRoute(ctx context.Context, route *models.Route) (*mode timeout, idle_timeout, headers, - config + config, + created_at ) VALUES ( :app_name, @@ -310,7 +428,8 @@ func (ds *sqlStore) InsertRoute(ctx context.Context, route *models.Route) (*mode :timeout, :idle_timeout, :headers, - :config + :config, + :created_at );`) _, err = tx.NamedExecContext(ctx, query, route) @@ -348,7 +467,8 @@ func (ds *sqlStore) UpdateRoute(ctx context.Context, newroute *models.Route) (*m timeout = :timeout, idle_timeout = :idle_timeout, headers = :headers, - config = :config + config = :config, + created_at = :created_at WHERE app_name=:app_name AND path=:path;`) res, err := tx.NamedExecContext(ctx, query, &route) diff --git a/api/datastore/sql/sql_test.go b/api/datastore/sql/sql_test.go index bf238e27c..a66d594b4 100644 --- a/api/datastore/sql/sql_test.go +++ b/api/datastore/sql/sql_test.go @@ -10,15 +10,45 @@ import ( "github.com/fnproject/fn/api/models" ) +// since New with fresh dbs skips all migrations: +// * open a fresh db on latest version +// * run all down migrations +// * run all up migrations +// [ then run tests against that db ] +func newWithMigrations(url *url.URL) (*sqlStore, error) { + ds, err := newDS(url) + if err != nil { + return nil, err + } + + m, err := migrator(url.String()) + if err != nil { + return nil, err + } + + err = m.Down() + if err != nil { + return nil, err + } + + // go through New, to ensure our Up logic works in there... + ds, err = newDS(url) + if err != nil { + return nil, err + } + + return ds, nil +} + func TestDatastore(t *testing.T) { defer os.RemoveAll("sqlite_test_dir") u, err := url.Parse("sqlite3://sqlite_test_dir") if err != nil { t.Fatal(err) } - f := func() models.Datastore { + f := func(t *testing.T) models.Datastore { os.RemoveAll("sqlite_test_dir") - ds, err := New(u) + ds, err := newDS(u) if err != nil { t.Fatal(err) } @@ -26,4 +56,63 @@ func TestDatastore(t *testing.T) { return datastoreutil.NewValidator(ds) } datastoretest.Test(t, f) + + // NOTE: sqlite3 does not like ALTER TABLE DROP COLUMN so do not run + // migration tests against it, only pg and mysql -- should prove UP migrations + // will likely work for sqlite3, but may need separate testing by devs :( + + // if being run from test script (CI) poke around for pg and mysql containers + // to run tests against them too. this runs with a fresh db first run, then + // will down migrate all migrations, up migrate, and run tests again. + + both := func(u *url.URL) { + f := func(t *testing.T) models.Datastore { + ds, err := newDS(u) + if err != nil { + t.Fatal(err) + } + ds.clear() + if err != nil { + t.Fatal(err) + } + return datastoreutil.NewValidator(ds) + } + + // test fresh w/o migrations + datastoretest.Test(t, f) + + f = func(t *testing.T) models.Datastore { + t.Log("with migrations now!") + ds, err := newWithMigrations(u) + if err != nil { + t.Fatal(err) + } + ds.clear() + if err != nil { + t.Fatal(err) + } + return datastoreutil.NewValidator(ds) + } + + // test that migrations work & things work with them + datastoretest.Test(t, f) + } + + if pg := os.Getenv("POSTGRES_URL"); pg != "" { + u, err := url.Parse(pg) + if err != nil { + t.Fatal(err) + } + + both(u) + } + + if mysql := os.Getenv("MYSQL_URL"); mysql != "" { + u, err := url.Parse(mysql) + if err != nil { + t.Fatal(err) + } + + both(u) + } } diff --git a/api/models/route.go b/api/models/route.go index 6305b6dbf..8d6a82ff0 100644 --- a/api/models/route.go +++ b/api/models/route.go @@ -5,6 +5,8 @@ import ( "net/url" "path" "strings" + + "github.com/go-openapi/strfmt" ) const ( @@ -21,16 +23,17 @@ const ( type Routes []*Route type Route struct { - AppName string `json:"app_name" db:"app_name"` - Path string `json:"path" db:"path"` - Image string `json:"image" db:"image"` - Memory uint64 `json:"memory" db:"memory"` - Headers Headers `json:"headers,omitempty" db:"headers"` - Type string `json:"type" db:"type"` - Format string `json:"format" db:"format"` - Timeout int32 `json:"timeout" db:"timeout"` - IdleTimeout int32 `json:"idle_timeout" db:"idle_timeout"` - Config Config `json:"config,omitempty" db:"config"` + AppName string `json:"app_name" db:"app_name"` + Path string `json:"path" db:"path"` + Image string `json:"image" db:"image"` + Memory uint64 `json:"memory" db:"memory"` + Headers Headers `json:"headers,omitempty" db:"headers"` + Type string `json:"type" db:"type"` + Format string `json:"format" db:"format"` + Timeout int32 `json:"timeout" db:"timeout"` + IdleTimeout int32 `json:"idle_timeout" db:"idle_timeout"` + Config Config `json:"config,omitempty" db:"config"` + CreatedAt strfmt.DateTime `json:"created_at,omitempty" db:"created_at"` } // SetDefaults sets zeroed field to defaults. diff --git a/api/server/apps_test.go b/api/server/apps_test.go index 42cd0a530..f643b5d3b 100644 --- a/api/server/apps_test.go +++ b/api/server/apps_test.go @@ -29,6 +29,12 @@ func setLogBuffer() *bytes.Buffer { func TestAppCreate(t *testing.T) { buf := setLogBuffer() + defer func() { + if t.Failed() { + t.Log(buf.String()) + } + }() + for i, test := range []struct { mock models.Datastore logDB models.LogStore @@ -57,7 +63,6 @@ func TestAppCreate(t *testing.T) { _, rec := routerRequest(t, router, "POST", test.path, body) if rec.Code != test.expectedCode { - t.Log(buf.String()) t.Errorf("Test %d: Expected status code to be %d but was %d", i, test.expectedCode, rec.Code) } @@ -66,7 +71,6 @@ func TestAppCreate(t *testing.T) { resp := getErrorResponse(t, rec) if !strings.Contains(resp.Error.Message, test.expectedError.Error()) { - t.Log(buf.String()) t.Errorf("Test %d: Expected error message to have `%s`", i, test.expectedError.Error()) } @@ -77,6 +81,11 @@ func TestAppCreate(t *testing.T) { func TestAppDelete(t *testing.T) { buf := setLogBuffer() + defer func() { + if t.Failed() { + t.Log(buf.String()) + } + }() for i, test := range []struct { ds models.Datastore @@ -99,7 +108,6 @@ func TestAppDelete(t *testing.T) { _, rec := routerRequest(t, srv.Router, "DELETE", test.path, nil) if rec.Code != test.expectedCode { - t.Log(buf.String()) t.Errorf("Test %d: Expected status code to be %d but was %d", i, test.expectedCode, rec.Code) } @@ -108,7 +116,6 @@ func TestAppDelete(t *testing.T) { resp := getErrorResponse(t, rec) if !strings.Contains(resp.Error.Message, test.expectedError.Error()) { - t.Log(buf.String()) t.Errorf("Test %d: Expected error message to have `%s`", i, test.expectedError.Error()) } @@ -119,6 +126,11 @@ func TestAppDelete(t *testing.T) { func TestAppList(t *testing.T) { buf := setLogBuffer() + defer func() { + if t.Failed() { + t.Log(buf.String()) + } + }() rnr, cancel := testRunner(t) defer cancel() @@ -156,7 +168,6 @@ func TestAppList(t *testing.T) { _, rec := routerRequest(t, srv.Router, "GET", test.path, nil) if rec.Code != test.expectedCode { - t.Log(buf.String()) t.Errorf("Test %d: Expected status code to be %d but was %d", i, test.expectedCode, rec.Code) } @@ -165,7 +176,6 @@ func TestAppList(t *testing.T) { resp := getErrorResponse(t, rec) if !strings.Contains(resp.Error.Message, test.expectedError.Error()) { - t.Log(buf.String()) t.Errorf("Test %d: Expected error message to have `%s`", i, test.expectedError.Error()) } @@ -189,6 +199,11 @@ func TestAppList(t *testing.T) { func TestAppGet(t *testing.T) { buf := setLogBuffer() + defer func() { + if t.Failed() { + t.Log(buf.String()) + } + }() rnr, cancel := testRunner(t) defer cancel() @@ -207,7 +222,6 @@ func TestAppGet(t *testing.T) { _, rec := routerRequest(t, srv.Router, "GET", test.path, nil) if rec.Code != test.expectedCode { - t.Log(buf.String()) t.Errorf("Test %d: Expected status code to be %d but was %d", i, test.expectedCode, rec.Code) } @@ -216,7 +230,6 @@ func TestAppGet(t *testing.T) { resp := getErrorResponse(t, rec) if !strings.Contains(resp.Error.Message, test.expectedError.Error()) { - t.Log(buf.String()) t.Errorf("Test %d: Expected error message to have `%s`", i, test.expectedError.Error()) } @@ -226,6 +239,11 @@ func TestAppGet(t *testing.T) { func TestAppUpdate(t *testing.T) { buf := setLogBuffer() + defer func() { + if t.Failed() { + t.Log(buf.String()) + } + }() for i, test := range []struct { mock models.Datastore @@ -259,7 +277,6 @@ func TestAppUpdate(t *testing.T) { _, rec := routerRequest(t, srv.Router, "PATCH", test.path, body) if rec.Code != test.expectedCode { - t.Log(buf.String()) t.Errorf("Test %d: Expected status code to be %d but was %d", i, test.expectedCode, rec.Code) } @@ -268,7 +285,6 @@ func TestAppUpdate(t *testing.T) { resp := getErrorResponse(t, rec) if !strings.Contains(resp.Error.Message, test.expectedError.Error()) { - t.Log(buf.String()) t.Errorf("Test %d: Expected error message to have `%s`", i, test.expectedError.Error()) } diff --git a/api/server/routes_create_update.go b/api/server/routes_create_update.go index be21f33ca..3ef4c637a 100644 --- a/api/server/routes_create_update.go +++ b/api/server/routes_create_update.go @@ -5,10 +5,12 @@ import ( "net/http" "path" "strings" + "time" "github.com/fnproject/fn/api" "github.com/fnproject/fn/api/models" "github.com/gin-gonic/gin" + "github.com/go-openapi/strfmt" ) /* handleRouteCreateOrUpdate is used to handle POST PUT and PATCH for routes. @@ -49,6 +51,7 @@ func (s *Server) handleRoutesPostPutPatch(c *gin.Context) { } func (s *Server) submitRoute(ctx context.Context, wroute *models.RouteWrapper) error { + wroute.Route.CreatedAt = strfmt.DateTime(time.Now()) wroute.Route.SetDefaults() err := wroute.Route.Validate() if err != nil { diff --git a/docs/operating/databases/mysql.md b/docs/operating/databases/mysql.md index 3ae752a31..3c56e5b3e 100644 --- a/docs/operating/databases/mysql.md +++ b/docs/operating/databases/mysql.md @@ -6,7 +6,7 @@ Let's presuppose you don't have even a MySQL DB ready. ``` docker run --name func-mysql \ - -e MYSQL_DATABASE=funcs -e MYSQL_USER=func -e MYSQL_PASSWORD=funcpass -d mysql + -e MYSQL_DATABASE=funcs -e MYSQL_USER=func -e MYSQL_PASSWORD=funcpass -e MYSQL_RANDOM_ROOT_PASSWORD=yes -d mysql ``` For more configuration options, see [docker mysql docs](https://hub.docker.com/_/mysql/). @@ -14,7 +14,7 @@ For more configuration options, see [docker mysql docs](https://hub.docker.com/_ ### 2. Now let's start Functions connecting to our new mysql instance ``` -docker run --rm --privileged --link "iron-mysql:mysql" \ +docker run --rm --privileged --link "func-mysql:mysql" \ -e "DB_URL=mysql://func:funcpass@tcp(mysql:3306)/funcs" \ -it -p 8080:8080 fnproject/fn ``` diff --git a/docs/operating/databases/postgres.md b/docs/operating/databases/postgres.md index b8cfd5ef9..e9caf2af4 100644 --- a/docs/operating/databases/postgres.md +++ b/docs/operating/databases/postgres.md @@ -28,7 +28,7 @@ docker run -it --rm --link func-postgres:postgres postgres \ ### 3. Now let's start Functions connecting to our new postgres instance ``` -docker run --rm --privileged --link "iron-postgres:postgres" \ +docker run --rm --privileged --link "func-postgres:postgres" \ -e "DB_URL=postgres://postgres:funcpass@postgres/funcs?sslmode=disable" \ -it -p 8080:8080 fnproject/functions ``` diff --git a/glide.lock b/glide.lock index 65fe4b994..05fff1fc5 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 9c04b00c52a7378e748a93c062aacd32f39fece29a79b50fddad6aa81e2cbab0 -updated: 2017-09-19T14:13:44.550343214-07:00 +hash: bcface74504c6e95fdb7e0de5d0803ae55c7913ae9c83a293f7b0828313d5f63 +updated: 2017-10-25T14:12:02.585493659-07:00 imports: - name: github.com/amir/raidman version: 1ccc43bfb9c93cb401a4025e49c64ba71e5e668b @@ -168,7 +168,7 @@ imports: - name: github.com/eapache/queue version: 44cc805cf13205b55f69e14bcb69867d1ae92f98 - name: github.com/fnproject/fn_go - version: 418dcd8e37593d86604e89a48d7ee2e109a1d3bf + version: 7ce3bb2e624df60cdfbfc1ee5483f6df80bb2b1b subpackages: - client - client/apps @@ -277,12 +277,6 @@ imports: - json/parser - json/scanner - json/token -- name: github.com/iron-io/iron_go3 - version: ded317cb147d3b52b593da08495bc7d53efa17d8 - subpackages: - - api - - config - - mq - name: github.com/jmoiron/jsonq version: e874b168d07ecc7808bc950a17998a8aa3141d82 - name: github.com/jmoiron/sqlx @@ -305,6 +299,11 @@ imports: - buffer - jlexer - jwriter +- name: github.com/mattes/migrate + version: 5b98c13eff7657ab49a1a5f705b72f961d7fc558 + subpackages: + - database + - source - name: github.com/mattn/go-isatty version: fc9e8d8ef48496124e79ae0df75490096eccf6fe - name: github.com/mattn/go-sqlite3 @@ -362,6 +361,7 @@ imports: version: c5b7fccd204277076155f10851dad72b76a49317 subpackages: - prometheus + - prometheus/promhttp - name: github.com/prometheus/client_model version: 6f3806018612930941127f2a7c6c453ba2c527d2 subpackages: @@ -382,6 +382,14 @@ imports: version: de5bf2ad457846296e2031421a34e2568e304e35 - name: github.com/rcrowley/go-metrics version: 1f30fe9094a513ce4c700b9a54458bbb0c96996c +- name: github.com/rdallman/migrate + version: bc72eeb997c7334cb5f05f5aefd2d70bc34d71ef + subpackages: + - database/mysql + - database/postgres + - database/sqlite3 + - source + - source/go-bindata - name: github.com/Shopify/sarama version: 4704a3a8c95920361c47e9a2adec13c3d757c757 - name: github.com/sirupsen/logrus @@ -405,7 +413,7 @@ imports: subpackages: - codec - name: golang.org/x/crypto - version: 7d9177d70076375b9a59c8fde23d52d9c4a7ecd5 + version: 2509b142fb2b797aa7587dad548f113b2c0f20ce subpackages: - bcrypt - blowfish diff --git a/glide.yaml b/glide.yaml index 42b6e1b1e..198baa639 100644 --- a/glide.yaml +++ b/glide.yaml @@ -32,6 +32,9 @@ import: subpackages: - redis - package: github.com/gin-gonic/gin +- package: github.com/rdallman/migrate + # TODO change to mattes/migrate w/ https://github.com/mattes/migrate/pull/299 + version: bc72eeb997c7334cb5f05f5aefd2d70bc34d71ef - package: github.com/go-openapi/errors - package: github.com/go-openapi/loads subpackages: diff --git a/test.sh b/test.sh index 7a2847979..a7e0b9c2b 100755 --- a/test.sh +++ b/test.sh @@ -1,3 +1,4 @@ +#!/bin/bash # Top level test script to start all other tests set -ex diff --git a/test/fn-api-tests/calls_test.go b/test/fn-api-tests/calls_test.go index 3b6ca1fad..e92a9a4b4 100644 --- a/test/fn-api-tests/calls_test.go +++ b/test/fn-api-tests/calls_test.go @@ -17,7 +17,7 @@ func TestCalls(t *testing.T) { s := SetupDefaultSuite() cfg := &call.GetAppsAppCallsParams{ App: s.AppName, - Route: &s.RoutePath, + Path: &s.RoutePath, Context: s.Context, } _, err := s.Client.Call.GetAppsAppCalls(cfg) @@ -62,8 +62,8 @@ func TestCalls(t *testing.T) { time.Sleep(time.Second * 5) _, err := s.Client.Call.GetAppsAppCalls(&call.GetAppsAppCallsParams{ - App: s.AppName, - Route: &s.RoutePath, + App: s.AppName, + Path: &s.RoutePath, }) if err != nil { switch err.(type) { @@ -93,7 +93,7 @@ func TestCalls(t *testing.T) { cfg := &call.GetAppsAppCallsParams{ App: s.AppName, - Route: &s.RoutePath, + Path: &s.RoutePath, Context: s.Context, } calls, err := s.Client.Call.GetAppsAppCalls(cfg) diff --git a/vendor/github.com/fnproject/fn_go/VERSION b/vendor/github.com/fnproject/fn_go/VERSION index 341cf11fa..7dff5b892 100644 --- a/vendor/github.com/fnproject/fn_go/VERSION +++ b/vendor/github.com/fnproject/fn_go/VERSION @@ -1 +1 @@ -0.2.0 \ No newline at end of file +0.2.1 \ No newline at end of file diff --git a/vendor/github.com/fnproject/fn_go/client/apps/apps_client.go b/vendor/github.com/fnproject/fn_go/client/apps/apps_client.go index 61cd89802..af2395ba2 100644 --- a/vendor/github.com/fnproject/fn_go/client/apps/apps_client.go +++ b/vendor/github.com/fnproject/fn_go/client/apps/apps_client.go @@ -57,7 +57,7 @@ func (a *Client) DeleteAppsApp(params *DeleteAppsAppParams) (*DeleteAppsAppOK, e /* GetApps gets all app names -Get a list of all the apps in the system. +Get a list of all the apps in the system, returned in alphabetical order. */ func (a *Client) GetApps(params *GetAppsParams) (*GetAppsOK, error) { // TODO: Validate the params before sending diff --git a/vendor/github.com/fnproject/fn_go/client/apps/get_apps_parameters.go b/vendor/github.com/fnproject/fn_go/client/apps/get_apps_parameters.go index 65dd2d233..8ed16bac5 100644 --- a/vendor/github.com/fnproject/fn_go/client/apps/get_apps_parameters.go +++ b/vendor/github.com/fnproject/fn_go/client/apps/get_apps_parameters.go @@ -14,6 +14,7 @@ import ( "github.com/go-openapi/errors" "github.com/go-openapi/runtime" cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/swag" strfmt "github.com/go-openapi/strfmt" ) @@ -21,7 +22,7 @@ import ( // NewGetAppsParams creates a new GetAppsParams object // with the default values initialized. func NewGetAppsParams() *GetAppsParams { - + var () return &GetAppsParams{ timeout: cr.DefaultTimeout, @@ -31,7 +32,7 @@ func NewGetAppsParams() *GetAppsParams { // NewGetAppsParamsWithTimeout creates a new GetAppsParams object // with the default values initialized, and the ability to set a timeout on a request func NewGetAppsParamsWithTimeout(timeout time.Duration) *GetAppsParams { - + var () return &GetAppsParams{ timeout: timeout, @@ -41,7 +42,7 @@ func NewGetAppsParamsWithTimeout(timeout time.Duration) *GetAppsParams { // NewGetAppsParamsWithContext creates a new GetAppsParams object // with the default values initialized, and the ability to set a context for a request func NewGetAppsParamsWithContext(ctx context.Context) *GetAppsParams { - + var () return &GetAppsParams{ Context: ctx, @@ -51,7 +52,7 @@ func NewGetAppsParamsWithContext(ctx context.Context) *GetAppsParams { // NewGetAppsParamsWithHTTPClient creates a new GetAppsParams object // with the default values initialized, and the ability to set a custom HTTPClient for a request func NewGetAppsParamsWithHTTPClient(client *http.Client) *GetAppsParams { - + var () return &GetAppsParams{ HTTPClient: client, } @@ -61,6 +62,18 @@ func NewGetAppsParamsWithHTTPClient(client *http.Client) *GetAppsParams { for the get apps operation typically these are written to a http.Request */ type GetAppsParams struct { + + /*Cursor + Cursor from previous response.next_cursor to begin results after, if any. + + */ + Cursor *string + /*PerPage + Number of results to return, defaults to 30. Max of 100. + + */ + PerPage *int64 + timeout time.Duration Context context.Context HTTPClient *http.Client @@ -99,6 +112,28 @@ func (o *GetAppsParams) SetHTTPClient(client *http.Client) { o.HTTPClient = client } +// WithCursor adds the cursor to the get apps params +func (o *GetAppsParams) WithCursor(cursor *string) *GetAppsParams { + o.SetCursor(cursor) + return o +} + +// SetCursor adds the cursor to the get apps params +func (o *GetAppsParams) SetCursor(cursor *string) { + o.Cursor = cursor +} + +// WithPerPage adds the perPage to the get apps params +func (o *GetAppsParams) WithPerPage(perPage *int64) *GetAppsParams { + o.SetPerPage(perPage) + return o +} + +// SetPerPage adds the perPage to the get apps params +func (o *GetAppsParams) SetPerPage(perPage *int64) { + o.PerPage = perPage +} + // WriteToRequest writes these params to a swagger request func (o *GetAppsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { @@ -107,6 +142,38 @@ func (o *GetAppsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Regis } var res []error + if o.Cursor != nil { + + // query param cursor + var qrCursor string + if o.Cursor != nil { + qrCursor = *o.Cursor + } + qCursor := qrCursor + if qCursor != "" { + if err := r.SetQueryParam("cursor", qCursor); err != nil { + return err + } + } + + } + + if o.PerPage != nil { + + // query param per_page + var qrPerPage int64 + if o.PerPage != nil { + qrPerPage = *o.PerPage + } + qPerPage := swag.FormatInt64(qrPerPage) + if qPerPage != "" { + if err := r.SetQueryParam("per_page", qPerPage); err != nil { + return err + } + } + + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } diff --git a/vendor/github.com/fnproject/fn_go/client/apps/patch_apps_app_parameters.go b/vendor/github.com/fnproject/fn_go/client/apps/patch_apps_app_parameters.go index 8b4e4f9e8..3c78e8373 100644 --- a/vendor/github.com/fnproject/fn_go/client/apps/patch_apps_app_parameters.go +++ b/vendor/github.com/fnproject/fn_go/client/apps/patch_apps_app_parameters.go @@ -148,12 +148,10 @@ func (o *PatchAppsAppParams) WriteToRequest(r runtime.ClientRequest, reg strfmt. return err } - if o.Body == nil { - o.Body = new(models.AppWrapper) - } - - if err := r.SetBodyParam(o.Body); err != nil { - return err + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } } if len(res) > 0 { diff --git a/vendor/github.com/fnproject/fn_go/client/apps/post_apps_parameters.go b/vendor/github.com/fnproject/fn_go/client/apps/post_apps_parameters.go index afb3ea89e..1b5575aa5 100644 --- a/vendor/github.com/fnproject/fn_go/client/apps/post_apps_parameters.go +++ b/vendor/github.com/fnproject/fn_go/client/apps/post_apps_parameters.go @@ -127,12 +127,10 @@ func (o *PostAppsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Regi } var res []error - if o.Body == nil { - o.Body = new(models.AppWrapper) - } - - if err := r.SetBodyParam(o.Body); err != nil { - return err + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } } if len(res) > 0 { diff --git a/vendor/github.com/fnproject/fn_go/client/call/call_client.go b/vendor/github.com/fnproject/fn_go/client/call/call_client.go index c0d6172b7..613fc161a 100644 --- a/vendor/github.com/fnproject/fn_go/client/call/call_client.go +++ b/vendor/github.com/fnproject/fn_go/client/call/call_client.go @@ -27,7 +27,7 @@ type Client struct { /* GetAppsAppCalls gets app bound calls -Get app-bound calls can filter to route-bound calls. +Get app-bound calls can filter to route-bound calls, results returned in created_at, descending order (newest first). */ func (a *Client) GetAppsAppCalls(params *GetAppsAppCallsParams) (*GetAppsAppCallsOK, error) { // TODO: Validate the params before sending diff --git a/vendor/github.com/fnproject/fn_go/client/call/get_apps_app_calls_parameters.go b/vendor/github.com/fnproject/fn_go/client/call/get_apps_app_calls_parameters.go index ea653e9ec..0ce363a10 100644 --- a/vendor/github.com/fnproject/fn_go/client/call/get_apps_app_calls_parameters.go +++ b/vendor/github.com/fnproject/fn_go/client/call/get_apps_app_calls_parameters.go @@ -14,6 +14,7 @@ import ( "github.com/go-openapi/errors" "github.com/go-openapi/runtime" cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/swag" strfmt "github.com/go-openapi/strfmt" ) @@ -67,11 +68,31 @@ type GetAppsAppCallsParams struct { */ App string - /*Route - App route. + /*Cursor + Cursor from previous response.next_cursor to begin results after, if any. */ - Route *string + Cursor *string + /*FromTime + Unix timestamp in seconds, of call.created_at to begin the results at, default 0. + + */ + FromTime *int64 + /*Path + Route path to match, exact. + + */ + Path *string + /*PerPage + Number of results to return, defaults to 30. Max of 100. + + */ + PerPage *int64 + /*ToTime + Unix timestamp in seconds, of call.created_at to end the results at, defaults to latest. + + */ + ToTime *int64 timeout time.Duration Context context.Context @@ -122,15 +143,59 @@ func (o *GetAppsAppCallsParams) SetApp(app string) { o.App = app } -// WithRoute adds the route to the get apps app calls params -func (o *GetAppsAppCallsParams) WithRoute(route *string) *GetAppsAppCallsParams { - o.SetRoute(route) +// WithCursor adds the cursor to the get apps app calls params +func (o *GetAppsAppCallsParams) WithCursor(cursor *string) *GetAppsAppCallsParams { + o.SetCursor(cursor) return o } -// SetRoute adds the route to the get apps app calls params -func (o *GetAppsAppCallsParams) SetRoute(route *string) { - o.Route = route +// SetCursor adds the cursor to the get apps app calls params +func (o *GetAppsAppCallsParams) SetCursor(cursor *string) { + o.Cursor = cursor +} + +// WithFromTime adds the fromTime to the get apps app calls params +func (o *GetAppsAppCallsParams) WithFromTime(fromTime *int64) *GetAppsAppCallsParams { + o.SetFromTime(fromTime) + return o +} + +// SetFromTime adds the fromTime to the get apps app calls params +func (o *GetAppsAppCallsParams) SetFromTime(fromTime *int64) { + o.FromTime = fromTime +} + +// WithPath adds the path to the get apps app calls params +func (o *GetAppsAppCallsParams) WithPath(path *string) *GetAppsAppCallsParams { + o.SetPath(path) + return o +} + +// SetPath adds the path to the get apps app calls params +func (o *GetAppsAppCallsParams) SetPath(path *string) { + o.Path = path +} + +// WithPerPage adds the perPage to the get apps app calls params +func (o *GetAppsAppCallsParams) WithPerPage(perPage *int64) *GetAppsAppCallsParams { + o.SetPerPage(perPage) + return o +} + +// SetPerPage adds the perPage to the get apps app calls params +func (o *GetAppsAppCallsParams) SetPerPage(perPage *int64) { + o.PerPage = perPage +} + +// WithToTime adds the toTime to the get apps app calls params +func (o *GetAppsAppCallsParams) WithToTime(toTime *int64) *GetAppsAppCallsParams { + o.SetToTime(toTime) + return o +} + +// SetToTime adds the toTime to the get apps app calls params +func (o *GetAppsAppCallsParams) SetToTime(toTime *int64) { + o.ToTime = toTime } // WriteToRequest writes these params to a swagger request @@ -146,16 +211,80 @@ func (o *GetAppsAppCallsParams) WriteToRequest(r runtime.ClientRequest, reg strf return err } - if o.Route != nil { + if o.Cursor != nil { - // query param route - var qrRoute string - if o.Route != nil { - qrRoute = *o.Route + // query param cursor + var qrCursor string + if o.Cursor != nil { + qrCursor = *o.Cursor } - qRoute := qrRoute - if qRoute != "" { - if err := r.SetQueryParam("route", qRoute); err != nil { + qCursor := qrCursor + if qCursor != "" { + if err := r.SetQueryParam("cursor", qCursor); err != nil { + return err + } + } + + } + + if o.FromTime != nil { + + // query param from_time + var qrFromTime int64 + if o.FromTime != nil { + qrFromTime = *o.FromTime + } + qFromTime := swag.FormatInt64(qrFromTime) + if qFromTime != "" { + if err := r.SetQueryParam("from_time", qFromTime); err != nil { + return err + } + } + + } + + if o.Path != nil { + + // query param path + var qrPath string + if o.Path != nil { + qrPath = *o.Path + } + qPath := qrPath + if qPath != "" { + if err := r.SetQueryParam("path", qPath); err != nil { + return err + } + } + + } + + if o.PerPage != nil { + + // query param per_page + var qrPerPage int64 + if o.PerPage != nil { + qrPerPage = *o.PerPage + } + qPerPage := swag.FormatInt64(qrPerPage) + if qPerPage != "" { + if err := r.SetQueryParam("per_page", qPerPage); err != nil { + return err + } + } + + } + + if o.ToTime != nil { + + // query param to_time + var qrToTime int64 + if o.ToTime != nil { + qrToTime = *o.ToTime + } + qToTime := swag.FormatInt64(qrToTime) + if qToTime != "" { + if err := r.SetQueryParam("to_time", qToTime); err != nil { return err } } diff --git a/vendor/github.com/fnproject/fn_go/client/routes/get_apps_app_routes_parameters.go b/vendor/github.com/fnproject/fn_go/client/routes/get_apps_app_routes_parameters.go index 878241a5e..2a1c6221a 100644 --- a/vendor/github.com/fnproject/fn_go/client/routes/get_apps_app_routes_parameters.go +++ b/vendor/github.com/fnproject/fn_go/client/routes/get_apps_app_routes_parameters.go @@ -14,6 +14,7 @@ import ( "github.com/go-openapi/errors" "github.com/go-openapi/runtime" cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/swag" strfmt "github.com/go-openapi/strfmt" ) @@ -67,6 +68,21 @@ type GetAppsAppRoutesParams struct { */ App string + /*Cursor + Cursor from previous response.next_cursor to begin results after, if any. + + */ + Cursor *string + /*Image + Route image to match, exact. + + */ + Image *string + /*PerPage + Number of results to return, defaults to 30. Max of 100. + + */ + PerPage *int64 timeout time.Duration Context context.Context @@ -117,6 +133,39 @@ func (o *GetAppsAppRoutesParams) SetApp(app string) { o.App = app } +// WithCursor adds the cursor to the get apps app routes params +func (o *GetAppsAppRoutesParams) WithCursor(cursor *string) *GetAppsAppRoutesParams { + o.SetCursor(cursor) + return o +} + +// SetCursor adds the cursor to the get apps app routes params +func (o *GetAppsAppRoutesParams) SetCursor(cursor *string) { + o.Cursor = cursor +} + +// WithImage adds the image to the get apps app routes params +func (o *GetAppsAppRoutesParams) WithImage(image *string) *GetAppsAppRoutesParams { + o.SetImage(image) + return o +} + +// SetImage adds the image to the get apps app routes params +func (o *GetAppsAppRoutesParams) SetImage(image *string) { + o.Image = image +} + +// WithPerPage adds the perPage to the get apps app routes params +func (o *GetAppsAppRoutesParams) WithPerPage(perPage *int64) *GetAppsAppRoutesParams { + o.SetPerPage(perPage) + return o +} + +// SetPerPage adds the perPage to the get apps app routes params +func (o *GetAppsAppRoutesParams) SetPerPage(perPage *int64) { + o.PerPage = perPage +} + // WriteToRequest writes these params to a swagger request func (o *GetAppsAppRoutesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { @@ -130,6 +179,54 @@ func (o *GetAppsAppRoutesParams) WriteToRequest(r runtime.ClientRequest, reg str return err } + if o.Cursor != nil { + + // query param cursor + var qrCursor string + if o.Cursor != nil { + qrCursor = *o.Cursor + } + qCursor := qrCursor + if qCursor != "" { + if err := r.SetQueryParam("cursor", qCursor); err != nil { + return err + } + } + + } + + if o.Image != nil { + + // query param image + var qrImage string + if o.Image != nil { + qrImage = *o.Image + } + qImage := qrImage + if qImage != "" { + if err := r.SetQueryParam("image", qImage); err != nil { + return err + } + } + + } + + if o.PerPage != nil { + + // query param per_page + var qrPerPage int64 + if o.PerPage != nil { + qrPerPage = *o.PerPage + } + qPerPage := swag.FormatInt64(qrPerPage) + if qPerPage != "" { + if err := r.SetQueryParam("per_page", qPerPage); err != nil { + return err + } + } + + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } diff --git a/vendor/github.com/fnproject/fn_go/client/routes/patch_apps_app_routes_route_parameters.go b/vendor/github.com/fnproject/fn_go/client/routes/patch_apps_app_routes_route_parameters.go index d59d69f5a..42ac78337 100644 --- a/vendor/github.com/fnproject/fn_go/client/routes/patch_apps_app_routes_route_parameters.go +++ b/vendor/github.com/fnproject/fn_go/client/routes/patch_apps_app_routes_route_parameters.go @@ -164,12 +164,10 @@ func (o *PatchAppsAppRoutesRouteParams) WriteToRequest(r runtime.ClientRequest, return err } - if o.Body == nil { - o.Body = new(models.RouteWrapper) - } - - if err := r.SetBodyParam(o.Body); err != nil { - return err + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } } // path param route diff --git a/vendor/github.com/fnproject/fn_go/client/routes/post_apps_app_routes_parameters.go b/vendor/github.com/fnproject/fn_go/client/routes/post_apps_app_routes_parameters.go index bf8b034ab..800b0649d 100644 --- a/vendor/github.com/fnproject/fn_go/client/routes/post_apps_app_routes_parameters.go +++ b/vendor/github.com/fnproject/fn_go/client/routes/post_apps_app_routes_parameters.go @@ -148,12 +148,10 @@ func (o *PostAppsAppRoutesParams) WriteToRequest(r runtime.ClientRequest, reg st return err } - if o.Body == nil { - o.Body = new(models.RouteWrapper) - } - - if err := r.SetBodyParam(o.Body); err != nil { - return err + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } } if len(res) > 0 { diff --git a/vendor/github.com/fnproject/fn_go/client/routes/put_apps_app_routes_route_parameters.go b/vendor/github.com/fnproject/fn_go/client/routes/put_apps_app_routes_route_parameters.go index 5028c5608..937556aed 100644 --- a/vendor/github.com/fnproject/fn_go/client/routes/put_apps_app_routes_route_parameters.go +++ b/vendor/github.com/fnproject/fn_go/client/routes/put_apps_app_routes_route_parameters.go @@ -164,12 +164,10 @@ func (o *PutAppsAppRoutesRouteParams) WriteToRequest(r runtime.ClientRequest, re return err } - if o.Body == nil { - o.Body = new(models.RouteWrapper) - } - - if err := r.SetBodyParam(o.Body); err != nil { - return err + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } } // path param route diff --git a/vendor/github.com/fnproject/fn_go/client/routes/routes_client.go b/vendor/github.com/fnproject/fn_go/client/routes/routes_client.go index ffc5d95b0..d00faa6a9 100644 --- a/vendor/github.com/fnproject/fn_go/client/routes/routes_client.go +++ b/vendor/github.com/fnproject/fn_go/client/routes/routes_client.go @@ -57,7 +57,7 @@ func (a *Client) DeleteAppsAppRoutesRoute(params *DeleteAppsAppRoutesRouteParams /* GetAppsAppRoutes gets route list by app name -This will list routes for a particular app. +This will list routes for a particular app, returned in alphabetical order. */ func (a *Client) GetAppsAppRoutes(params *GetAppsAppRoutesParams) (*GetAppsAppRoutesOK, error) { // TODO: Validate the params before sending diff --git a/vendor/github.com/fnproject/fn_go/models/app.go b/vendor/github.com/fnproject/fn_go/models/app.go index 64f6489cc..09f1d94d7 100644 --- a/vendor/github.com/fnproject/fn_go/models/app.go +++ b/vendor/github.com/fnproject/fn_go/models/app.go @@ -17,7 +17,7 @@ import ( type App struct { - // Application configuration + // Application configuration, applied to all routes. Config map[string]string `json:"config,omitempty"` // Name of this app. Must be different than the image name. Can ony contain alphanumeric, -, and _. diff --git a/vendor/github.com/fnproject/fn_go/models/apps_wrapper.go b/vendor/github.com/fnproject/fn_go/models/apps_wrapper.go index f57fefd30..24cc9e756 100644 --- a/vendor/github.com/fnproject/fn_go/models/apps_wrapper.go +++ b/vendor/github.com/fnproject/fn_go/models/apps_wrapper.go @@ -6,8 +6,6 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( - "strconv" - strfmt "github.com/go-openapi/strfmt" "github.com/go-openapi/errors" @@ -22,16 +20,22 @@ type AppsWrapper struct { // apps // Required: true - Apps []*App `json:"apps"` + Apps AppsWrapperApps `json:"apps"` // error Error *ErrorBody `json:"error,omitempty"` + + // cursor to send with subsequent request to receive the next page, if non-empty + // Read Only: true + NextCursor string `json:"next_cursor,omitempty"` } /* polymorph AppsWrapper apps false */ /* polymorph AppsWrapper error false */ +/* polymorph AppsWrapper next_cursor false */ + // Validate validates this apps wrapper func (m *AppsWrapper) Validate(formats strfmt.Registry) error { var res []error @@ -58,24 +62,6 @@ func (m *AppsWrapper) validateApps(formats strfmt.Registry) error { return err } - for i := 0; i < len(m.Apps); i++ { - - if swag.IsZero(m.Apps[i]) { // not required - continue - } - - if m.Apps[i] != nil { - - if err := m.Apps[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("apps" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - return nil } diff --git a/vendor/github.com/fnproject/fn_go/models/apps_wrapper_apps.go b/vendor/github.com/fnproject/fn_go/models/apps_wrapper_apps.go new file mode 100644 index 000000000..d5daeb434 --- /dev/null +++ b/vendor/github.com/fnproject/fn_go/models/apps_wrapper_apps.go @@ -0,0 +1,48 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// AppsWrapperApps apps wrapper apps +// swagger:model appsWrapperApps + +type AppsWrapperApps []*App + +// Validate validates this apps wrapper apps +func (m AppsWrapperApps) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/fnproject/fn_go/models/calls_wrapper.go b/vendor/github.com/fnproject/fn_go/models/calls_wrapper.go index e7fceef44..f8d005a8b 100644 --- a/vendor/github.com/fnproject/fn_go/models/calls_wrapper.go +++ b/vendor/github.com/fnproject/fn_go/models/calls_wrapper.go @@ -6,8 +6,6 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( - "strconv" - strfmt "github.com/go-openapi/strfmt" "github.com/go-openapi/errors" @@ -22,16 +20,22 @@ type CallsWrapper struct { // calls // Required: true - Calls []*Call `json:"calls"` + Calls CallsWrapperCalls `json:"calls"` // error Error *ErrorBody `json:"error,omitempty"` + + // cursor to send with subsequent request to receive the next page, if non-empty + // Read Only: true + NextCursor string `json:"next_cursor,omitempty"` } /* polymorph CallsWrapper calls false */ /* polymorph CallsWrapper error false */ +/* polymorph CallsWrapper next_cursor false */ + // Validate validates this calls wrapper func (m *CallsWrapper) Validate(formats strfmt.Registry) error { var res []error @@ -58,24 +62,6 @@ func (m *CallsWrapper) validateCalls(formats strfmt.Registry) error { return err } - for i := 0; i < len(m.Calls); i++ { - - if swag.IsZero(m.Calls[i]) { // not required - continue - } - - if m.Calls[i] != nil { - - if err := m.Calls[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("calls" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - return nil } diff --git a/vendor/github.com/fnproject/fn_go/models/calls_wrapper_calls.go b/vendor/github.com/fnproject/fn_go/models/calls_wrapper_calls.go new file mode 100644 index 000000000..2862dfff4 --- /dev/null +++ b/vendor/github.com/fnproject/fn_go/models/calls_wrapper_calls.go @@ -0,0 +1,48 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// CallsWrapperCalls calls wrapper calls +// swagger:model callsWrapperCalls + +type CallsWrapperCalls []*Call + +// Validate validates this calls wrapper calls +func (m CallsWrapperCalls) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/fnproject/fn_go/models/routes_wrapper.go b/vendor/github.com/fnproject/fn_go/models/routes_wrapper.go index f6c2b8e30..ce13373ff 100644 --- a/vendor/github.com/fnproject/fn_go/models/routes_wrapper.go +++ b/vendor/github.com/fnproject/fn_go/models/routes_wrapper.go @@ -6,8 +6,6 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( - "strconv" - strfmt "github.com/go-openapi/strfmt" "github.com/go-openapi/errors" @@ -23,13 +21,19 @@ type RoutesWrapper struct { // error Error *ErrorBody `json:"error,omitempty"` + // cursor to send with subsequent request to receive the next page, if non-empty + // Read Only: true + NextCursor string `json:"next_cursor,omitempty"` + // routes // Required: true - Routes []*Route `json:"routes"` + Routes RoutesWrapperRoutes `json:"routes"` } /* polymorph RoutesWrapper error false */ +/* polymorph RoutesWrapper next_cursor false */ + /* polymorph RoutesWrapper routes false */ // Validate validates this routes wrapper @@ -77,24 +81,6 @@ func (m *RoutesWrapper) validateRoutes(formats strfmt.Registry) error { return err } - for i := 0; i < len(m.Routes); i++ { - - if swag.IsZero(m.Routes[i]) { // not required - continue - } - - if m.Routes[i] != nil { - - if err := m.Routes[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("routes" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - return nil } diff --git a/vendor/github.com/fnproject/fn_go/models/routes_wrapper_routes.go b/vendor/github.com/fnproject/fn_go/models/routes_wrapper_routes.go new file mode 100644 index 000000000..5bc07812b --- /dev/null +++ b/vendor/github.com/fnproject/fn_go/models/routes_wrapper_routes.go @@ -0,0 +1,48 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + strfmt "github.com/go-openapi/strfmt" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" +) + +// RoutesWrapperRoutes routes wrapper routes +// swagger:model routesWrapperRoutes + +type RoutesWrapperRoutes []*Route + +// Validate validates this routes wrapper routes +func (m RoutesWrapperRoutes) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/iron-io/iron_go3/.gitignore b/vendor/github.com/iron-io/iron_go3/.gitignore deleted file mode 100644 index f3172d5ac..000000000 --- a/vendor/github.com/iron-io/iron_go3/.gitignore +++ /dev/null @@ -1,28 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -*.sublime* -.idea/ -*.iml - -iron.json diff --git a/vendor/github.com/iron-io/iron_go3/README.md b/vendor/github.com/iron-io/iron_go3/README.md deleted file mode 100644 index d683cf448..000000000 --- a/vendor/github.com/iron-io/iron_go3/README.md +++ /dev/null @@ -1,530 +0,0 @@ -go.iron -======= - -[Iron.io](http://www.iron.io) Go (golang) API libraries - -Go docs: http://godoc.org/github.com/iron-io/iron_go3 - - -Iron.io Go Client Library -------------- - -# IronMQ - -[IronMQ](http://www.iron.io/products/mq) is an elastic message queue for managing data and event flow within cloud applications and between systems. - -The [full API documentation is here](http://dev.iron.io/mq/reference/api/) and this client tries to stick to the API as -much as possible so if you see an option in the API docs, you can use it in the methods below. - -You can find [Go docs here](http://godoc.org/github.com/iron-io/iron_go3). - -## Getting Started - -### Get credentials - -To start using iron_go, you need to sign up and get an oauth token. - -1. Go to http://iron.io/ and sign up. -2. Create new project at http://hud.iron.io/dashboard -3. Download the iron.json file from "Credentials" block of project - --- - -### Configure - -1\. Reference the library: - -```go -import "github.com/iron-io/iron_go3/mq" -``` - -2\. [Setup your Iron.io credentials](http://dev.iron.io/mq/3/reference/configuration/) - -3\. Create an IronMQ client object: - -```go -queue := mq.New("test_queue"); -``` - -Or use initializer with settings specified in code: - -```go -settings := &config.Settings { - Token: "l504pLkINUWYDSO9YW4m", - ProjectId: "53ec6fc95e8edd2884000003", - Host: "localhost", - Scheme: "http", - Port: 8080, -} -queue := mq.ConfigNew("test_queue", settings); -``` - -Push queues must be explicitly created. There's no changing a queue's type. - -```go -subscribers := []mq.QueueSubscriber{mq.QueueSubscriber{Name: "sub1", URL: "wwww.subscriber1.com"}, mq.QueueSubscriber{Name: "sub2", URL: "wwww.subscriber2.com"}} -subscription := mq.PushInfo { - Retries: 3, - RetriesDelay: 60, - ErrorQueue: "error_queue", - Subscribers: subscribers, -} -queue_type := "multicast"; -queueInfo := mq.QueueInfo{ Type: &queue_type, MessageExpiration: 60, MessageTimeout: 56, Push: &subscription} -result, err := mq.CreateQueue("test_queue", queueInfo); -``` - -## The Basics - -### Get Queues List - -```go -queues, err := mq.ListQueues(0, 100); -for _, element := range queues { - fmt.Println(element.Name); -} -``` - -Request URL Query Parameters: - -* per_page - number of elements in response, default is 30. -* previous - this is the last queue on the previous page, it will start from the next one. If queue with specified - name doesn’t exist result will contain first per_page queues that lexicographically greater than previous. -* prefix - an optional queue prefix to search on. e.g., prefix=ca could return queues ["cars", "cats", etc.] - -FilterPage will return the list of queues with the specified options. - -```go -queues := mq.FilterPage(prefix, prev string, perPage int) -``` --- - -### Get a Queue Object - -You can have as many queues as you want, each with their own unique set of messages. - -```go -queue := mq.New("test_queue"); -``` - -Now you can use it. - --- - -### Post a Message on a Queue - -Messages are placed on the queue in a FIFO arrangement. -If a queue does not exist, it will be created upon the first posting of a message. - -```go -id, err := q.PushString("Hello, World!") -``` - --- - -### Retrieve Queue Information - -```go -info, err := q.Info() -fmt.Println(info.Name); -``` - --- - -### Reserve/Get a Message off a Queue - -```go -msg, err := q.Reserve() -fmt.Printf("The message says: %q\n", msg.Body) -``` - --- - -### Delete a Message from a Queue - -```go -msg, _ := q.Reserve() -// perform some actions with a message here -msg.Delete() -``` - -Be sure to delete a message from the queue when you're done with it. - - -```go -msg, _ := q.Reserve() -// perform some actions with a message here -msg.Delete() -``` -Delete multiple messages from the queue: - -```go -ids, err := queue.PushStrings("more", "and more", "and more") -queue.DeleteMessages(ids) -``` -Delete multiple reserved messages: - -```go -messages, err := queue.ReserveN(3) -queue.DeleteReservedMessages(messages) -``` --- - -## Queues - -### Retrieve Queue Information - -```go -info, err := q.Info() -fmt.Println(info.Name); -fmt.Println(info.Size); -``` - -QueueInfo struct consists of the following fields: - -```go -type QueueInfo struct { - Id string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - PushType string `json:"push_type,omitempty"` - Reserved int `json:"reserved,omitempty"` - RetriesDelay int `json:"retries,omitempty"` - Retries int `json:"retries_delay,omitempty"` - Size int `json:"size,omitempty"` - Subscribers []QueueSubscriber `json:"subscribers,omitempty"` - TotalMessages int `json:"total_messages,omitempty"` - ErrorQueue string `json:"error_queue,omitempty"` -} -``` - --- - -### Delete a Message Queue - -```go -deleted, err := q.Delete() -if(deleted) { - fmt.Println("Successfully deleted") -} else { - fmt.Println("Cannot delete, because of error: ", err) -} -``` - --- - -### Post Messages to a Queue - -**Single message:** - -```go -id, err := q.PushString("Hello, World!") -// To control parameters like timeout and delay, construct your own message. -id, err := q.PushMessage(&mq.Message{Delay: 0, Body: "Hi there"}) -``` - -**Multiple messages:** - -You can also pass multiple messages in a single call. - -```go -ids, err := q.PushStrings("Message 1", "Message 2") -``` - -To control parameters like timeout and delay, construct your own message. - -```go -ids, err = q.PushMessages( - &mq.Message{Delay: 0, Body: "The first"}, - &mq.Message{Delay: 10, Body: "The second"}, - &mq.Message{Delay: 10, Body: "The third"}, - &mq.Message{Delay: 0, Body: "The fifth"}, -) -``` - -**Parameters:** - -* `Delay`: The item will not be available on the queue until this many seconds have passed. -Default is 0 seconds. Maximum is 604,800 seconds (7 days). - --- - -### Get Messages from a Queue - -```go -msg, err := q.Reserve() -fmt.Printf("The message says: %q\n", msg.Body) -``` - -When you reserve a message from the queue, it is no longer on the queue but it still exists within the system. -You have to explicitly delete the message or else it will go back onto the queue after the `timeout`. -The default `timeout` is 60 seconds. Minimal `timeout` is 30 seconds. - -You also can get several messages at a time: - -```go -// get 5 messages -msgs, err := q.ReserveN(5) -``` - -And with timeout param: - -```go -messages, err := q.GetNWithTimeout(4, 600) -``` - -### Touch a Message on a Queue - -Touching a reserved message extends its timeout by the duration specified when the message was created, which is 60 seconds by default. - -```go -msg, _ := q.Reserve() -err := msg.Touch() // new reservation id will be assigned to current message -``` - -There is another way to touch a message without getting it: - -```go -newReservationId, err := q.TouchMessage(messageId, reservationId) -``` - -#### Specifiying timeout - -```go -msg, _ := q.Reserve() -err := msg.TouchFor(10) // new reservation id will be assigned to current message -``` - -or - -```go -newReservationId, err := q.TouchMessageFor(messageId, reservationId, 10) -``` - --- - -### Release Message - -```go -msg, _ := q.Reserve() -delay := 30 -err := msg.release(delay) -``` - -Or another way to release a message without creation of message object: - -```go -delay := 30 -err := q.ReleaseMessage("5987586196292186572", delay) -``` - -**Optional parameters:** - -* `delay`: The item will not be available on the queue until this many seconds have passed. -Default is 0 seconds. Maximum is 604,800 seconds (7 days). - --- - -### Delete a Message from a Queue - -```go -msg, _ := q.Reserve() -// perform some actions with a message here -err := msg.Delete() -``` - -Or - -```go -err := q.DeleteMessage("5987586196292186572") -``` - -Be sure to delete a message from the queue when you're done with it. - --- - -### Peek Messages from a Queue - -Peeking at a queue returns the next messages on the queue, but it does not reserve them. - -```go -message, err := q.Peek() -``` - -There is a way to get several messages not reserving them: - -```go -messages, err := q.PeekN(50) -for _, m := range messages { - fmt.Println(m.Body) -} -``` - -And with timeout param: - -```go -messages, err := q.PeekNWithTimeout(4, 600) -``` - --- - -### Clear a Queue - -```go -err := q.Clear() -``` - -### Add an Alert to a Queue - -[Check out our Blog Post on Queue Alerts](http://blog.iron.io). - -Alerts have now been incorporated into IronMQ. This feature lets developers control actions based on the activity within a queue. With alerts, actions can be triggered when the number of messages in a queue reach a certain threshold. These actions can include things like auto-scaling, failure detection, load-monitoring, and system health. - -You may add up to 5 alerts per queue. - -**Required parameters:** -* `type`: required - "fixed" or "progressive". In case of alert's type set to "fixed", alert will be triggered when queue size pass value set by trigger parameter. When type set to "progressive", alert will be triggered when queue size pass any of values, calculated by trigger * N where N >= 1. For example, if trigger set to 10, alert will be triggered at queue sizes 10, 20, 30, etc. -* `direction`: required - "asc" or "desc". Set direction in which queue size must be changed when pass trigger value. If direction set to "asc" queue size must growing to trigger alert. When direction is "desc" queue size must decreasing to trigger alert. -* `trigger`: required. It will be used to calculate actual values of queue size when alert must be triggered. See type field description. Trigger must be integer value greater than 0. -* `queue`: required. Name of queue which will be used to post alert messages. - -```go -err := q.AddAlerts( - &mq.Alert{Queue: "new_milestone_queue", Trigger: 10, Direction: "asc", Type: "progressive"}, - &mq.Alert{Queue: "low_level_queue", Trigger: 5, Direction: "desc", Type: "fixed" }) -``` - -#### Update alerts in a queue -```go -err := q.AddAlerts( - &mq.Alert{Queue: "milestone_queue", Trigger: 100, Direction: "asc", Type: "progressive"}) -``` - -#### Remove alerts from a queue - -You can delete an alert from a queue by id: - -```go -err := q.RemoveAlert("532fdf593663ed6afa06ed16") -``` - -Or delete several alerts by ids: - -```go -err := q.RemoveAlerts("532f59663ed6afed16483052", "559663ed6af6483399b3400a") -``` - -Also you can delete all alerts - -```go -err := q.RemoveAllAlerts() -``` - -Please, remember, that passing zero of alerts while update process will lead to deleating of all previously added alerts. - -```go -q.AddAlerts( - &mq.Alert{Queue: "alert1", Trigger: 10, Direction: "asc", Type: "progressive"}, - &mq.Alert{Queue: "alert2", Trigger: 5, Direction: "desc", Type: "fixed" }) -info, _ := q.Info() // 2 - -q.UpdateAlerts() -info, _ = q.Info() // 0 -``` - --- - -## Push Queues - -IronMQ push queues allow you to setup a queue that will push to an endpoint, rather than having to poll the endpoint. -[Here's the announcement for an overview](http://blog.iron.io/2013/01/ironmq-push-queues-reliable-message.html). - -### Update a Message Queue - -Same as create queue, all QueueInfo fields are optional. Queue type cannot be changed. - -```go -info, err := q.Update(...) -``` - - -QueueInfo struct consists of following fields: - -```go -type QueueInfo struct { - PushType string `json:"push_type,omitempty"` - RetriesDelay int `json:"retries,omitempty"` - Retries int `json:"retries_delay,omitempty"` - Subscribers []QueueSubscriber `json:"subscribers,omitempty"` - // and some other fields not related to push queues -} -``` - -**The following parameters are all related to Push Queues:** - -* `type`: Either `multicast` to push to all subscribers or `unicast` to push to one and only one subscriber. Default is `multicast`. -* `retries`: How many times to retry on failure. Default is 3. Maximum is 100. -* `retries_delay`: Delay between each retry in seconds. Default is 60. -* `subscribers`: An array of `QueueSubscriber` -This set of subscribers will replace the existing subscribers. -To add or remove subscribers, see the add subscribers endpoint or the remove subscribers endpoint. - -QueueSubscriber has the following structure: - -```go -type QueueSubscriber struct { - URL string `json:"url"` - Headers map[string]string `json:"headers,omitempty"` -} -``` - --- - -### Set Subscribers on a Queue - -Subscribers can be any HTTP endpoint. `push_type` is one of: - -* `multicast`: will push to all endpoints/subscribers -* `unicast`: will push to one and only one endpoint/subscriber - -Subscribers could be added only to push queue (unicast or multicast). It's possible to set it while creating a queue: - -```go -queueType := "multicast" -subscribers := []mq.QueueSubscriber{ - mq.QueueSubscriber{Name: "test3", URL: "http://mysterious-brook-1807.herokuapp.com/ironmq_push_3"}, - mq.QueueSubscriber{Name: "test4", URL: "http://mysterious-brook-1807.herokuapp.com/ironmq_push_4"}, -} -pushInfo := mq.PushInfo{RetriesDelay: 45, Retries: 2, Subscribers: subscribers} -info, err := mq.CreateQueue(qn, mq.QueueInfo{Type: &queueType, Push: &pushInfo}) -``` - -It's also possible to manage subscribers for existing push queue using the following methods: - -* `AddSubscribers` - adds subscribers and replaces existing (if name of old one is equal to name of new one) -* `ReplaceSubscribers` - adds new collection of subscribers instead of existing -* `RemoveSubscribers` and `RemoveSubscribersCollection` - remove specified subscribers - --- - -### Get Message Push Status - -After pushing a message: - -```go -subscribers, err := message.Subscribers() -``` - -Returns an array of subscribers with status. - --- - -## Further Links - -* [IronMQ Overview](http://dev.iron.io/mq/3/) -* [IronMQ REST/HTTP API](http://dev.iron.io/mq/3/reference/api/) -* [Push Queues](http://dev.iron.io/mq/reference/push_queues/) -* [Other Client Libraries](http://dev.iron.io/mq/3/libraries/) -* [Live Chat, Support & Fun](http://get.iron.io/chat) - -------------- -© 2011 - 2014 Iron.io Inc. All Rights Reserved. diff --git a/vendor/github.com/iron-io/iron_go3/api/api.go b/vendor/github.com/iron-io/iron_go3/api/api.go deleted file mode 100644 index 989f16119..000000000 --- a/vendor/github.com/iron-io/iron_go3/api/api.go +++ /dev/null @@ -1,289 +0,0 @@ -// api provides common functionality for all the iron.io APIs -package api - -import ( - "bytes" - "crypto/tls" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "os" - "strings" - "time" - - "github.com/iron-io/iron_go3/config" -) - -type DefaultResponseBody struct { - Msg string `json:"msg"` -} - -type URL struct { - URL url.URL - ContentType string - Settings config.Settings -} - -var ( - Debug bool - DebugOnErrors bool - DefaultCacheSize = 8192 - - // HttpClient is the client used by iron_go to make each http request. It is exported in case - // the client would like to modify it from the default behavior from http.DefaultClient. - // This uses the DefaultTransport modified to enable TLS Session Client caching. - HttpClient = &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - MaxIdleConnsPerHost: 512, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: &tls.Config{ - ClientSessionCache: tls.NewLRUClientSessionCache(DefaultCacheSize), - }, - }, - } -) - -func dbg(v ...interface{}) { - if Debug { - log.Println(v...) - } -} - -func dbgerr(v ...interface{}) { - if DebugOnErrors && !Debug { - log.Println(v...) - } -} - -func init() { - if os.Getenv("IRON_API_DEBUG") != "" { - Debug = true - dbg("debugging of api enabled") - } - if os.Getenv("IRON_API_DEBUG_ON_ERRORS") != "" { - DebugOnErrors = true - dbg("debugging of api on errors enabled") - } -} - -func Action(cs config.Settings, prefix string, suffix ...string) *URL { - parts := append([]string{prefix}, suffix...) - return ActionEndpoint(cs, strings.Join(parts, "/")) -} - -func RootAction(cs config.Settings, prefix string, suffix ...string) *URL { - parts := append([]string{prefix}, suffix...) - return RootActionEndpoint(cs, strings.Join(parts, "/")) -} - -func ActionEndpoint(cs config.Settings, endpoint string) *URL { - u := &URL{Settings: cs, URL: url.URL{}} - u.URL.Scheme = cs.Scheme - u.URL.Host = fmt.Sprintf("%s:%d", cs.Host, cs.Port) - u.URL.Path = fmt.Sprintf("/%s/projects/%s/%s", cs.ApiVersion, cs.ProjectId, endpoint) - return u -} - -func RootActionEndpoint(cs config.Settings, endpoint string) *URL { - u := &URL{Settings: cs, URL: url.URL{}} - u.URL.Scheme = cs.Scheme - u.URL.Host = fmt.Sprintf("%s:%d", cs.Host, cs.Port) - u.URL.Path = fmt.Sprintf("/%s/%s", cs.ApiVersion, endpoint) - return u -} - -func VersionAction(cs config.Settings) *URL { - u := &URL{Settings: cs, URL: url.URL{Scheme: cs.Scheme}} - u.URL.Host = fmt.Sprintf("%s:%d", cs.Host, cs.Port) - u.URL.Path = "/version" - return u -} - -func (u *URL) QueryAdd(key string, format string, value interface{}) *URL { - query := u.URL.Query() - query.Add(key, fmt.Sprintf(format, value)) - u.URL.RawQuery = query.Encode() - return u -} - -func (u *URL) SetContentType(t string) *URL { - u.ContentType = t - return u -} - -func (u *URL) Req(method string, in, out interface{}) error { - var body io.ReadSeeker - switch in := in.(type) { - case io.ReadSeeker: - // ready to send (zips uses this) - body = in - default: - if in == nil { - in = struct{}{} - } - data, err := json.Marshal(in) - if err != nil { - return err - } - dbg("request body:", in) - body = bytes.NewReader(data) - } - - response, err := u.req(method, body) - if response != nil && response.Body != nil { - defer response.Body.Close() - } - - if err != nil { - dbg("ERROR!", err, err.Error()) - body := "" - if response != nil && response.Body != nil { - binary, _ := ioutil.ReadAll(response.Body) - body = string(binary) - } - dbgerr("ERROR!", err, err.Error(), "Request:", body, " Response:", body) - return err - } - dbg("response:", response) - if out != nil { - return json.NewDecoder(response.Body).Decode(out) - } - - // throw it away - io.Copy(ioutil.Discard, response.Body) - return nil -} - -// returned body must be closed by caller if non-nil -func (u *URL) Request(method string, body io.Reader) (response *http.Response, err error) { - var byts []byte - if body != nil { - byts, err = ioutil.ReadAll(body) - if err != nil { - return nil, err - } - } - return u.req(method, bytes.NewReader(byts)) -} - -var MaxRequestRetries = 5 - -func (u *URL) req(method string, body io.ReadSeeker) (response *http.Response, err error) { - request, err := http.NewRequest(method, u.URL.String(), nil) - if err != nil { - return nil, err - } - - // body=bytes.Reader implements `Len() int`. if this changes for some reason, looky here - if s, ok := body.(interface { - Len() int - }); ok { - request.ContentLength = int64(s.Len()) - } - request.Header.Set("Authorization", "OAuth "+u.Settings.Token) - request.Header.Set("Accept", "application/json") - request.Header.Set("Accept-Encoding", "gzip/deflate") - request.Header.Set("User-Agent", u.Settings.UserAgent) - - if u.ContentType != "" { - request.Header.Set("Content-Type", u.ContentType) - } else if body != nil { - request.Header.Set("Content-Type", "application/json") - } - - if rc, ok := body.(io.ReadCloser); ok { // stdlib doesn't have ReadSeekCloser :( - request.Body = rc - } else { - request.Body = ioutil.NopCloser(body) - } - - dbg("URL:", request.URL.String()) - dbg("request:", fmt.Sprintf("%#v\n", request)) - - for tries := 0; tries < MaxRequestRetries; tries++ { - body.Seek(0, 0) // set back to beginning for retries - response, err = HttpClient.Do(request) - if err != nil { - if response != nil && response.Body != nil { - response.Body.Close() // make sure to close since we won't return it - } - if err == io.EOF { - continue - } - return nil, err - } - - if response.StatusCode == http.StatusServiceUnavailable { - delay := (tries + 1) * 10 // smooth out delays from 0-2 - time.Sleep(time.Duration(delay*delay) * time.Millisecond) - continue - } - - break - } - - if err != nil { // for that one lucky case where io.EOF reaches MaxRetries - return nil, err - } - - if err = ResponseAsError(response); err != nil { - return nil, err - } - - return response, nil -} - -var HTTPErrorDescriptions = map[int]string{ - http.StatusUnauthorized: "The OAuth token is either not provided or invalid", - http.StatusNotFound: "The resource, project, or endpoint being requested doesn't exist.", - http.StatusMethodNotAllowed: "This endpoint doesn't support that particular verb", - http.StatusNotAcceptable: "Required fields are missing", -} - -func ResponseAsError(response *http.Response) HTTPResponseError { - if response.StatusCode == http.StatusOK || response.StatusCode == http.StatusCreated { - return nil - } - - if response == nil { - return resErr{statusCode: http.StatusTeapot, error: fmt.Sprint("response nil but no errors. beware unicorns, this shouldn't happen")} - } - - if response.Body != nil { - defer response.Body.Close() - } - - var out DefaultResponseBody - err := json.NewDecoder(response.Body).Decode(&out) - if err != nil { - return resErr{statusCode: response.StatusCode, error: fmt.Sprint(response.Status, ": ", err.Error())} - } - if out.Msg != "" { - return resErr{statusCode: response.StatusCode, error: fmt.Sprint(response.Status, ": ", out.Msg)} - } - - return resErr{statusCode: response.StatusCode, error: response.Status + ": Unknown API Response"} -} - -type HTTPResponseError interface { - Error() string - StatusCode() int -} - -type resErr struct { - error string - statusCode int -} - -func (h resErr) Error() string { return h.error } -func (h resErr) StatusCode() int { return h.statusCode } diff --git a/vendor/github.com/iron-io/iron_go3/cache/cache.go b/vendor/github.com/iron-io/iron_go3/cache/cache.go deleted file mode 100644 index ca4d0c139..000000000 --- a/vendor/github.com/iron-io/iron_go3/cache/cache.go +++ /dev/null @@ -1,235 +0,0 @@ -// IronCache (cloud k/v store) client library -package cache - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "fmt" - "time" - - "github.com/iron-io/iron_go3/api" - "github.com/iron-io/iron_go3/config" -) - -var ( - JSON = Codec{Marshal: json.Marshal, Unmarshal: json.Unmarshal} - Gob = Codec{Marshal: gobMarshal, Unmarshal: gobUnmarshal} -) - -type Cache struct { - Settings config.Settings - Name string -} - -type Item struct { - // Value is the Item's value - Value interface{} - // Object is the Item's value for use with a Codec. - Object interface{} - // Number of seconds until expiration. The zero value defaults to 7 days, - // maximum is 30 days. - Expiration time.Duration - // Caches item only if the key is currently cached. - Replace bool - // Caches item only if the key isn't currently cached. - Add bool -} - -// New returns a struct ready to make requests with. -// The cacheName argument is used as namespace. -func New(cacheName string) *Cache { - return &Cache{Settings: config.Config("iron_cache"), Name: cacheName} -} - -func (c *Cache) caches(suffix ...string) *api.URL { - return api.Action(c.Settings, "caches", suffix...) -} - -func (c *Cache) ListCaches(page, perPage int) (caches []*Cache, err error) { - out := []struct { - Project_id string - Name string - }{} - - err = c.caches(). - QueryAdd("page", "%d", page). - QueryAdd("per_page", "%d", perPage). - Req("GET", nil, &out) - if err != nil { - return - } - - caches = make([]*Cache, 0, len(out)) - for _, item := range out { - caches = append(caches, &Cache{ - Settings: c.Settings, - Name: item.Name, - }) - } - - return -} - -func (c *Cache) ServerVersion() (version string, err error) { - out := map[string]string{} - err = api.VersionAction(c.Settings).Req("GET", nil, &out) - if err != nil { - return - } - return out["version"], nil -} - -func (c *Cache) Clear() (err error) { - return c.caches(c.Name, "clear").Req("POST", nil, nil) -} - -// Put adds an Item to the cache, overwriting any existing key of the same name. -func (c *Cache) Put(key string, item *Item) (err error) { - in := struct { - Value interface{} `json:"value"` - ExpiresIn int `json:"expires_in,omitempty"` - Replace bool `json:"replace,omitempty"` - Add bool `json:"add,omitempty"` - }{ - Value: item.Value, - ExpiresIn: int(item.Expiration.Seconds()), - Replace: item.Replace, - Add: item.Add, - } - - return c.caches(c.Name, "items", key).Req("PUT", &in, nil) -} - -func anyToString(value interface{}) (str interface{}, err error) { - switch v := value.(type) { - case string: - str = v - case uint, uint8, uint16, uint32, uint64, int, int8, int16, int32, int64: - str = v - case float32, float64: - str = v - case bool: - str = v - case fmt.Stringer: - str = v.String() - default: - var bytes []byte - if bytes, err = json.Marshal(value); err == nil { - str = string(bytes) - } - } - - return -} - -func (c *Cache) Set(key string, value interface{}, ttl ...int) (err error) { - str, err := anyToString(value) - if err == nil { - if len(ttl) > 0 { - err = c.Put(key, &Item{Value: str, Expiration: time.Duration(ttl[0]) * time.Second}) - } else { - err = c.Put(key, &Item{Value: str}) - } - } - return -} -func (c *Cache) Add(key string, value ...interface{}) (err error) { - str, err := anyToString(value) - if err == nil { - err = c.Put(key, &Item{ - Value: str, Expiration: time.Duration(123) * time.Second, Add: true, - }) - } - return -} -func (c *Cache) Replace(key string, value ...interface{}) (err error) { - str, err := anyToString(value) - if err == nil { - err = c.Put(key, &Item{ - Value: str, Expiration: time.Duration(123) * time.Second, Replace: true, - }) - } - return -} - -// Increment increments the corresponding item's value. -func (c *Cache) Increment(key string, amount int64) (value interface{}, err error) { - in := map[string]int64{"amount": amount} - - out := struct { - Message string `json:"msg"` - Value interface{} `json:"value"` - }{} - if err = c.caches(c.Name, "items", key, "increment").Req("POST", &in, &out); err == nil { - value = out.Value - } - return -} - -// Get gets an item from the cache. -func (c *Cache) Get(key string) (value interface{}, err error) { - out := struct { - Cache string `json:"cache"` - Key string `json:"key"` - Value interface{} `json:"value"` - }{} - if err = c.caches(c.Name, "items", key).Req("GET", nil, &out); err == nil { - value = out.Value - } - return -} - -func (c *Cache) GetMeta(key string) (value map[string]interface{}, err error) { - value = map[string]interface{}{} - err = c.caches(c.Name, "items", key).Req("GET", nil, &value) - return -} - -// Delete removes an item from the cache. -func (c *Cache) Delete(key string) (err error) { - return c.caches(c.Name, "items", key).Req("DELETE", nil, nil) -} - -type Codec struct { - Marshal func(interface{}) ([]byte, error) - Unmarshal func([]byte, interface{}) error -} - -func (cd Codec) Put(c *Cache, key string, item *Item) (err error) { - bytes, err := cd.Marshal(item.Object) - if err != nil { - return - } - - item.Value = string(bytes) - - return c.Put(key, item) -} - -func (cd Codec) Get(c *Cache, key string, object interface{}) (err error) { - value, err := c.Get(key) - if err != nil { - return - } - - err = cd.Unmarshal([]byte(value.(string)), object) - if err != nil { - return - } - - return -} - -func gobMarshal(v interface{}) ([]byte, error) { - writer := bytes.Buffer{} - enc := gob.NewEncoder(&writer) - err := enc.Encode(v) - return writer.Bytes(), err -} - -func gobUnmarshal(marshalled []byte, v interface{}) error { - reader := bytes.NewBuffer(marshalled) - dec := gob.NewDecoder(reader) - return dec.Decode(v) -} diff --git a/vendor/github.com/iron-io/iron_go3/cache/cache_examples_test.go b/vendor/github.com/iron-io/iron_go3/cache/cache_examples_test.go deleted file mode 100644 index 6a4bcca2c..000000000 --- a/vendor/github.com/iron-io/iron_go3/cache/cache_examples_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package cache_test - -import ( - "fmt" - "github.com/iron-io/iron_go3/cache" -) - -func p(a ...interface{}) { fmt.Println(a...) } - -func Example1StoringData() { - // For configuration info, see http://dev.iron.io/articles/configuration - c := cache.New("test_cache") - - // Numbers will get stored as numbers - c.Set("number_item", 42) - - // Strings get stored as strings - c.Set("string_item", "Hello, IronCache") - - // Objects and dicts get JSON-encoded and stored as strings - c.Set("complex_item", map[string]interface{}{ - "test": "this is a dict", - "args": []string{"apples", "oranges"}, - }) - - p("all stored") - // Output: - // all stored -} - -func Example2Incrementing() { - c := cache.New("test_cache") - - p(c.Increment("number_item", 10)) - p(c.Get("number_item")) - - p(c.Increment("string_item", 10)) - - p(c.Increment("complex_item", 10)) - - // Output: - // - // 52 - // 400 Bad Request: Cannot increment or decrement non-numeric value - // 400 Bad Request: Cannot increment or decrement non-numeric value -} - -func Example3Decrementing() { - c := cache.New("test_cache") - - p(c.Increment("number_item", -10)) - p(c.Get("number_item")) - - p(c.Increment("string_item", -10)) - - p(c.Increment("complex_item", -10)) - - // Output: - // - // 42 - // 400 Bad Request: Cannot increment or decrement non-numeric value - // 400 Bad Request: Cannot increment or decrement non-numeric value -} - -func Example4RetrievingData() { - c := cache.New("test_cache") - - value, err := c.Get("number_item") - fmt.Printf("%#v (%#v)\n", value, err) - - value, err = c.Get("string_item") - fmt.Printf("%#v (%#v)\n", value, err) - - // JSON is returned as strings - value, err = c.Get("complex_item") - fmt.Printf("%#v (%#v)\n", value, err) - - // You can use the JSON codec to deserialize it. - obj := struct { - Args []string - Test string - }{} - err = cache.JSON.Get(c, "complex_item", &obj) - fmt.Printf("%#v (%#v)\n", obj, err) - // Output: - // 42 () - // "Hello, IronCache" () - // "{\"args\":[\"apples\",\"oranges\"],\"test\":\"this is a dict\"}" () - // struct { Args []string; Test string }{Args:[]string{"apples", "oranges"}, Test:"this is a dict"} () -} - -func Example5DeletingData() { - c := cache.New("test_cache") - - // Immediately delete an item - c.Delete("string_item") - - p(c.Get("string_item")) - // Output: - // 404 Not Found: The resource, project, or endpoint being requested doesn't exist. -} diff --git a/vendor/github.com/iron-io/iron_go3/cache/cache_test.go b/vendor/github.com/iron-io/iron_go3/cache/cache_test.go deleted file mode 100644 index 03700723c..000000000 --- a/vendor/github.com/iron-io/iron_go3/cache/cache_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package cache_test - -import ( - "testing" - "time" - - "github.com/iron-io/iron_go3/cache" - . "github.com/jeffh/go.bdd" -) - -func TestEverything(t *testing.T) {} - -func init() { - defer PrintSpecReport() - - Describe("IronCache", func() { - c := cache.New("cachename") - - It("Lists all caches", func() { - _, err := c.ListCaches(0, 100) // can't check the caches value just yet. - Expect(err, ToBeNil) - }) - - It("Puts a value into the cache", func() { - err := c.Put("keyname", &cache.Item{ - Value: "value", - Expiration: 2 * time.Second, - }) - Expect(err, ToBeNil) - }) - - It("Gets a value from the cache", func() { - value, err := c.Get("keyname") - Expect(err, ToBeNil) - Expect(value, ToEqual, "value") - }) - - It("Gets meta-information about an item", func() { - err := c.Put("forever", &cache.Item{Value: "and ever", Expiration: 0}) - Expect(err, ToBeNil) - value, err := c.GetMeta("forever") - Expect(err, ToBeNil) - Expect(value["key"], ToEqual, "forever") - Expect(value["value"], ToEqual, "and ever") - Expect(value["cache"], ToEqual, "cachename") - Expect(value["expires"], ToEqual, "9999-01-01T00:00:00Z") - Expect(value["flags"], ToEqual, 0.0) - }) - - It("Sets numeric items", func() { - err := c.Set("number", 42) - Expect(err, ToBeNil) - value, err := c.Get("number") - Expect(err, ToBeNil) - Expect(value.(float64), ToEqual, 42.0) - }) - }) -} diff --git a/vendor/github.com/iron-io/iron_go3/circle.yml b/vendor/github.com/iron-io/iron_go3/circle.yml deleted file mode 100644 index 365f5f550..000000000 --- a/vendor/github.com/iron-io/iron_go3/circle.yml +++ /dev/null @@ -1,31 +0,0 @@ -machine: - environment: - GOPATH: $HOME - GOROOT: $HOME/go - PATH: $GOROOT/bin:$PATH - GO15VENDOREXPERIMENT: 1 - CHECKOUT_DIR: $HOME/$CIRCLE_PROJECT_REPONAME - GH_IRON: $HOME/src/github.com/iron-io - GO_PROJECT: ../src/github.com/iron-io - -checkout: - post: - - mkdir -p "$GH_IRON" - - cp -R "$CHECKOUT_DIR" "$GH_IRON/$CIRCLE_PROJECT_REPONAME": - pwd: $HOME - -dependencies: - pre: - # install go1.5 - - wget https://storage.googleapis.com/golang/go1.5.linux-amd64.tar.gz - - tar -C $HOME -xvzf go1.5.linux-amd64.tar.gz - override: - # this was being dumb, don't want it to auto detect we are a go repo b/c vendoring - - which go - -test: - override: - - go get github.com/jeffh/go.bdd: - pwd: $GO_PROJECT/$CIRCLE_PROJECT_REPONAME - - go test ./mq: - pwd: $GO_PROJECT/$CIRCLE_PROJECT_REPONAME diff --git a/vendor/github.com/iron-io/iron_go3/config/config.go b/vendor/github.com/iron-io/iron_go3/config/config.go deleted file mode 100644 index aaa2973ff..000000000 --- a/vendor/github.com/iron-io/iron_go3/config/config.go +++ /dev/null @@ -1,283 +0,0 @@ -// config helper for cache, mq, and worker -package config - -import ( - "encoding/json" - "io/ioutil" - "log" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" -) - -// Contains the configuration for an iron.io service. -// An empty instance is not usable -type Settings struct { - Token string `json:"token,omitempty"` - ProjectId string `json:"project_id,omitempty"` - Host string `json:"host,omitempty"` - Scheme string `json:"scheme,omitempty"` - Port uint16 `json:"port,omitempty"` - ApiVersion string `json:"api_version,omitempty"` - UserAgent string `json:"user_agent,omitempty"` -} - -var ( - debug = false - goVersion = runtime.Version() - Presets = map[string]Settings{ - "worker": Settings{ - Scheme: "https", - Port: 443, - ApiVersion: "2", - Host: "worker-aws-us-east-1.iron.io", - UserAgent: "iron_go3/worker 2.0 (Go " + goVersion + ")", - }, - "mq": Settings{ - Scheme: "https", - Port: 443, - ApiVersion: "3", - Host: "mq-aws-us-east-1-1.iron.io", - UserAgent: "iron_go3/mq 3.0 (Go " + goVersion + ")", - }, - "cache": Settings{ - Scheme: "https", - Port: 443, - ApiVersion: "1", - Host: "cache-aws-us-east-1.iron.io", - UserAgent: "iron_go3/cache 1.0 (Go " + goVersion + ")", - }, - } -) - -func dbg(v ...interface{}) { - if debug { - log.Println(v...) - } -} - -// ManualConfig gathers configuration from env variables, json config files -// and finally overwrites it with specified instance of Settings. -// Examples of fullProduct are "iron_worker", "iron_cache", "iron_mq" and -func ManualConfig(fullProduct string, configuration *Settings) (settings Settings) { - return config(fullProduct, "", configuration) -} - -// Config gathers configuration from env variables and json config files. -// Examples of fullProduct are "iron_worker", "iron_cache", "iron_mq". -func Config(fullProduct string) (settings Settings) { - return config(fullProduct, "", nil) -} - -// Like Config, but useful for keeping multiple dev environment information in -// one iron.json config file. If env="", works same as Config. -// -// e.g. -// { -// "production": { -// "token": ..., -// "project_id": ... -// }, -// "test": { -// ... -// } -// } -func ConfigWithEnv(fullProduct, env string) (settings Settings) { - return config(fullProduct, env, nil) -} - -func config(fullProduct, env string, configuration *Settings) Settings { - if os.Getenv("IRON_CONFIG_DEBUG") != "" { - debug = true - dbg("debugging of config enabled") - } - pair := strings.SplitN(fullProduct, "_", 2) - if len(pair) != 2 { - panic("Invalid product name, has to use prefix.") - } - family, product := pair[0], pair[1] - - base, found := Presets[product] - - if !found { - base = Settings{ - Scheme: "https", - Port: 443, - ApiVersion: "1", - Host: product + "-aws-us-east-1.iron.io", - UserAgent: "iron_go", - } - } - - base.globalConfig(family, product, env) - base.globalEnv(family, product) - base.productEnv(family, product) - base.localConfig(family, product, env) - base.manualConfig(configuration) - - if base.Token == "" || base.ProjectId == "" { - panic("Didn't find token or project_id in configs. Check your environment or iron.json.") - } - - return base -} - -func (s *Settings) globalConfig(family, product, env string) { - home, err := homeDir() - if err != nil { - log.Println("Error getting home directory:", err) - return - } - path := filepath.Join(home, ".iron.json") - s.UseConfigFile(family, product, path, env) -} - -// The environment variables the scheme looks for are all of the same formula: -// the camel-cased product name is switched to an underscore (“IronWorker” -// becomes “iron_worker”) and converted to be all capital letters. For the -// global environment variables, “IRON” is used by itself. The value being -// loaded is then joined by an underscore to the name, and again capitalised. -// For example, to retrieve the OAuth token, the client looks for “IRON_TOKEN”. -func (s *Settings) globalEnv(family, product string) { - eFamily := strings.ToUpper(family) + "_" - s.commonEnv(eFamily) -} - -// In the case of product-specific variables (which override global variables), -// it would be “IRON_WORKER_TOKEN” (for IronWorker). -func (s *Settings) productEnv(family, product string) { - eProduct := strings.ToUpper(family) + "_" + strings.ToUpper(product) + "_" - s.commonEnv(eProduct) -} - -func (s *Settings) localConfig(family, product, env string) { - s.UseConfigFile(family, product, "iron.json", env) -} - -func (s *Settings) manualConfig(settings *Settings) { - if settings != nil { - s.UseSettings(settings) - } -} - -func (s *Settings) commonEnv(prefix string) { - if token := os.Getenv(prefix + "TOKEN"); token != "" { - s.Token = token - dbg("env has TOKEN:", s.Token) - } - if pid := os.Getenv(prefix + "PROJECT_ID"); pid != "" { - s.ProjectId = pid - dbg("env has PROJECT_ID:", s.ProjectId) - } - if host := os.Getenv(prefix + "HOST"); host != "" { - s.Host = host - dbg("env has HOST:", s.Host) - } - if scheme := os.Getenv(prefix + "SCHEME"); scheme != "" { - s.Scheme = scheme - dbg("env has SCHEME:", s.Scheme) - } - if port := os.Getenv(prefix + "PORT"); port != "" { - n, err := strconv.ParseUint(port, 10, 16) - if err != nil { - panic(err) - } - s.Port = uint16(n) - dbg("env has PORT:", s.Port) - } - if vers := os.Getenv(prefix + "API_VERSION"); vers != "" { - s.ApiVersion = vers - dbg("env has API_VERSION:", s.ApiVersion) - } -} - -// Load and merge the given JSON config file. -func (s *Settings) UseConfigFile(family, product, path, env string) { - content, err := ioutil.ReadFile(path) - if err != nil { - dbg("tried to", err, ": skipping") - return - } - - data := map[string]interface{}{} - err = json.Unmarshal(content, &data) - if err != nil { - panic("Invalid JSON in " + path + ": " + err.Error()) - } - - dbg("config in", path, "found") - - if env != "" { - envdata, ok := data[env].(map[string]interface{}) - if !ok { - return // bail, they specified an env but we couldn't find one, so error out. - } - data = envdata - } - s.UseConfigMap(data) - - ipData, found := data[family+"_"+product] - if found { - pData := ipData.(map[string]interface{}) - s.UseConfigMap(pData) - } -} - -// Merge the given data into the settings. -func (s *Settings) UseConfigMap(data map[string]interface{}) { - if token, found := data["token"]; found { - s.Token = token.(string) - dbg("config has token:", s.Token) - } - if projectId, found := data["project_id"]; found { - s.ProjectId = projectId.(string) - dbg("config has project_id:", s.ProjectId) - } - if host, found := data["host"]; found { - s.Host = host.(string) - dbg("config has host:", s.Host) - } - if prot, found := data["scheme"]; found { - s.Scheme = prot.(string) - dbg("config has scheme:", s.Scheme) - } - if port, found := data["port"]; found { - s.Port = uint16(port.(float64)) - dbg("config has port:", s.Port) - } - if vers, found := data["api_version"]; found { - s.ApiVersion = vers.(string) - dbg("config has api_version:", s.ApiVersion) - } - if agent, found := data["user_agent"]; found { - s.UserAgent = agent.(string) - dbg("config has user_agent:", s.UserAgent) - } -} - -// Merge the given instance into the settings. -func (s *Settings) UseSettings(settings *Settings) { - if settings.Token != "" { - s.Token = settings.Token - } - if settings.ProjectId != "" { - s.ProjectId = settings.ProjectId - } - if settings.Host != "" { - s.Host = settings.Host - } - if settings.Scheme != "" { - s.Scheme = settings.Scheme - } - if settings.ApiVersion != "" { - s.ApiVersion = settings.ApiVersion - } - if settings.UserAgent != "" { - s.UserAgent = settings.UserAgent - } - if settings.Port > 0 { - s.Port = settings.Port - } -} diff --git a/vendor/github.com/iron-io/iron_go3/config/config_test.go b/vendor/github.com/iron-io/iron_go3/config/config_test.go deleted file mode 100644 index 160af5018..000000000 --- a/vendor/github.com/iron-io/iron_go3/config/config_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package config_test - -import ( - "github.com/iron-io/iron_go3/config" - . "github.com/jeffh/go.bdd" - "testing" -) - -func init() { - defer PrintSpecReport() - Describe("gets config", func() { - It("gets default configs", func() { - s := config.Config("iron_undefined") - Expect(s.Host, ToEqual, "undefined-aws-us-east-1.iron.io") - }) - }) -} - -func TestEverything(t *testing.T) {} diff --git a/vendor/github.com/iron-io/iron_go3/config/homedir.go b/vendor/github.com/iron-io/iron_go3/config/homedir.go deleted file mode 100644 index 65aa85f37..000000000 --- a/vendor/github.com/iron-io/iron_go3/config/homedir.go +++ /dev/null @@ -1,84 +0,0 @@ -//The MIT License (MIT) -// -//Copyright (c) 2013 Mitchell Hashimoto -// -//Permission is hereby granted, free of charge, to any person obtaining a copy -//of this software and associated documentation files (the "Software"), to deal -//in the Software without restriction, including without limitation the rights -//to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -//copies of the Software, and to permit persons to whom the Software is -//furnished to do so, subject to the following conditions: -// -//The above copyright notice and this permission notice shall be included in -//all copies or substantial portions of the Software. -// -//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -//IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -//FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -//AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -//LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -//OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -//THE SOFTWARE. -// -// This file is a wholesale copy of https://github.com/mitchellh/go-homedir@1f6da4a72e57d4e7edd4a7295a585e0a3999a2d4 -// with Dir() renamed to homeDir() and Expand() deleted. - -package config - -import ( - "bytes" - "errors" - "os" - "os/exec" - "runtime" - "strings" -) - -// homeDir returns the home directory for the executing user. -// -// This uses an OS-specific method for discovering the home directory. -// An error is returned if a home directory cannot be detected. -func homeDir() (string, error) { - if runtime.GOOS == "windows" { - return dirWindows() - } - - // Unix-like system, so just assume Unix - return dirUnix() -} - -func dirUnix() (string, error) { - // First prefer the HOME environmental variable - if home := os.Getenv("HOME"); home != "" { - return home, nil - } - - // If that fails, try the shell - var stdout bytes.Buffer - cmd := exec.Command("sh", "-c", "eval echo ~$USER") - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - return "", err - } - - result := strings.TrimSpace(stdout.String()) - if result == "" { - return "", errors.New("blank output when reading home directory") - } - - return result, nil -} - -func dirWindows() (string, error) { - drive := os.Getenv("HOMEDRIVE") - path := os.Getenv("HOMEPATH") - home := drive + path - if drive == "" || path == "" { - home = os.Getenv("USERPROFILE") - } - if home == "" { - return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank") - } - - return home, nil -} diff --git a/vendor/github.com/iron-io/iron_go3/examples/codepackages/info/main.go b/vendor/github.com/iron-io/iron_go3/examples/codepackages/info/main.go deleted file mode 100644 index 8be7e2501..000000000 --- a/vendor/github.com/iron-io/iron_go3/examples/codepackages/info/main.go +++ /dev/null @@ -1,108 +0,0 @@ -/* - This code sample demonstrates how to get a list of existing tasks - - http://dev.iron.io/worker/reference/api/ - http://dev.iron.io/worker/reference/api/#get_info_about_a_code_package -*/ -package main - -import ( - "bytes" - "encoding/json" - "github.com/iron-io/iron_go3/api" - "github.com/iron-io/iron_go3/config" - "io/ioutil" - "log" - "text/template" - "time" -) - -type ( - Code struct { - Id string `json:"id"` - ProjectId string `json:"project_id"` - Name string `json:"name"` - Runtime string `json:"runtime"` - LatestChecksum string `json:"latest_checksum"` - Revision int `json:"rev"` - LatestHistoryId string `json:"latest_history_id"` - LatestChange time.Time `json:"latest_change"` - } -) - -func main() { - // Create your configuration for iron_worker - // Find these value in credentials - config := config.Config("iron_worker") - config.ProjectId = "your_project_id" - config.Token = "your_token" - - // Capture info for this code - codeId := "522d160a91c530531f6f528d" - - // Create your endpoint url for tasks - url := api.Action(config, "codes", codeId) - log.Printf("Url: %s\n", url.URL.String()) - - // Post the request to Iron.io - resp, err := url.Request("GET", nil) - defer resp.Body.Close() - if err != nil { - log.Println(err) - return - } - - // Check the status code - if resp.StatusCode != 200 { - log.Printf("%v\n", resp) - return - } - - // Capture the response - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - log.Println(err) - return - } - - // Unmarshall to struct - code := &Code{} - err = json.Unmarshal(body, code) - if err != nil { - log.Printf("%v\n", err) - return - } - - // Unmarshall to map - results := map[string]interface{}{} - err = json.Unmarshal(body, &results) - if err != nil { - log.Printf("%v\n", err) - return - } - - // Pretty print the response - prettyPrint(code) -} - -func prettyPrint(code *Code) { - prettyTemplate := template.Must(template.New("pretty").Parse(prettyPrintFormat())) - - display := new(bytes.Buffer) - - prettyTemplate.Execute(display, code) - log.Printf("%s,\n", display.String()) -} - -func prettyPrintFormat() string { - return `{ - "id": "{{.Id}}", - "project_id": "{{.ProjectId}}", - "name": "{{.Name}}", - "runtime": "{{.Runtime}}", - "latest_checksum": "{{.LatestChecksum}}", - "rev": {{.Revision}}, - "latest_history_id": "{{.LatestHistoryId}}", - "latest_change": "{{.LatestChange}}", -}` -} diff --git a/vendor/github.com/iron-io/iron_go3/examples/codepackages/list/main.go b/vendor/github.com/iron-io/iron_go3/examples/codepackages/list/main.go deleted file mode 100644 index e47ea9888..000000000 --- a/vendor/github.com/iron-io/iron_go3/examples/codepackages/list/main.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - This code sample demonstrates how to get a list of existing tasks - - http://dev.iron.io/worker/reference/api/ - http://dev.iron.io/worker/reference/api/#list_code_packages -*/ -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "github.com/iron-io/iron_go3/api" - "github.com/iron-io/iron_go3/config" - "io/ioutil" - "log" - "text/template" - "time" -) - -type ( - CodeResponse struct { - Codes []Code `json:"codes"` - } - - Code struct { - Id string `json:"id"` - ProjectId string `json:"project_id"` - Name string `json:"name"` - Runtime string `json:"runtime"` - LatestChecksum string `json:"latest_checksum"` - Revision int `json:"rev"` - LatestHistoryId string `json:"latest_history_id"` - LatestChange time.Time `json:"latest_change"` - } -) - -func main() { - // Create your configuration for iron_worker - // Find these value in credentials - config := config.Config("iron_worker") - config.ProjectId = "your_project_id" - config.Token = "your_token" - - // Create your endpoint url for tasks - url := api.ActionEndpoint(config, "codes") - log.Printf("Url: %s\n", url.URL.String()) - - // Post the request to Iron.io - resp, err := url.Request("GET", nil) - defer resp.Body.Close() - if err != nil { - log.Println(err) - return - } - - // Check the status code - if resp.StatusCode != 200 { - log.Printf("%v\n", resp) - return - } - - // Capture the response - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - log.Println(err) - return - } - - // Unmarshall to struct - codeResponse := &CodeResponse{} - err = json.Unmarshal(body, codeResponse) - if err != nil { - log.Printf("%v\n", err) - return - } - - // Or you can Unmarshall to map - results := map[string]interface{}{} - err = json.Unmarshal(body, &results) - if err != nil { - log.Printf("%v\n", err) - return - } - - // Pretty print the response - prettyPrint(codeResponse) -} - -func prettyPrint(codeResponse *CodeResponse) { - prettyTemplate := template.Must(template.New("pretty").Parse(prettyPrintFormat())) - - codes := "\n" - display := new(bytes.Buffer) - - for _, code := range codeResponse.Codes { - display.Reset() - prettyTemplate.Execute(display, code) - codes += fmt.Sprintf("%s,\n", display.String()) - } - - log.Printf(codes) -} - -func prettyPrintFormat() string { - return `{ - "id": "{{.Id}}", - "project_id": "{{.ProjectId}}", - "name": "{{.Name}}", - "runtime": "{{.Runtime}}", - "latest_checksum": "{{.LatestChecksum}}", - "rev": {{.Revision}}, - "latest_history_id": "{{.LatestHistoryId}}", - "latest_change": "{{.LatestChange}}", -}` -} diff --git a/vendor/github.com/iron-io/iron_go3/examples/tasks/info/main.go b/vendor/github.com/iron-io/iron_go3/examples/tasks/info/main.go deleted file mode 100644 index f52fcb364..000000000 --- a/vendor/github.com/iron-io/iron_go3/examples/tasks/info/main.go +++ /dev/null @@ -1,120 +0,0 @@ -/* - This code sample demonstrates how to get a list of existing tasks - - http://dev.iron.io/worker/reference/api/ - http://dev.iron.io/worker/reference/api/#get_info_about_a_task -*/ -package main - -import ( - "bytes" - "encoding/json" - "github.com/iron-io/iron_go3/api" - "github.com/iron-io/iron_go3/config" - "io/ioutil" - "log" - "text/template" - "time" -) - -type ( - Task struct { - Id string `json:"id"` - ProjectId string `json:"project_id"` - CodeId string `json:"code_id"` - CodeHistoryId string `json:"code_history_id"` - Status string `json:"status"` - CodeName string `json:"code_name"` - CodeRevision string `json:"code_rev"` - StartTime time.Time `json:"start_time"` - EndTime time.Time `json:"end_time"` - Duration int `json:"duration"` - Timeout int `json:"timeout"` - Payload string `json:"payload"` - UpdatedAt time.Time `json:"updated_at"` - CreatedAt time.Time `json:"created_at"` - } -) - -func main() { - // Create your configuration for iron_worker - // Find these value in credentials - config := config.Config("iron_worker") - config.ProjectId = "your_project_id" - config.Token = "your_token" - - // Capture info for this task - taskId := "52b45b17a31186632b00da4c" - - // Create your endpoint url for tasks - url := api.Action(config, "tasks", taskId) - log.Printf("Url: %s\n", url.URL.String()) - - // Post the request to Iron.io - resp, err := url.Request("GET", nil) - defer resp.Body.Close() - if err != nil { - log.Println(err) - return - } - - // Check the status code - if resp.StatusCode != 200 { - log.Printf("%v\n", resp) - return - } - - // Capture the response - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - log.Println(err) - return - } - - // Unmarshall to struct - task := &Task{} - err = json.Unmarshal(body, task) - if err != nil { - log.Printf("%v\n", err) - return - } - - // Or you can Unmarshall to map - results := map[string]interface{}{} - err = json.Unmarshal(body, &results) - if err != nil { - log.Printf("%v\n", err) - return - } - - // Pretty print the response - prettyPrint(task) -} - -func prettyPrint(task *Task) { - prettyTemplate := template.Must(template.New("pretty").Parse(prettyPrintFormat())) - - display := new(bytes.Buffer) - - prettyTemplate.Execute(display, task) - log.Printf("%s,\n", display.String()) -} - -func prettyPrintFormat() string { - return `{ - "id": "{{.Id}}", - "project_id": "{{.ProjectId}}", - "code_id": "{{.CodeId}}", - "code_history_id": "{{.CodeHistoryId}}", - "status": "{{.Status}}", - "code_name": "{{.CodeName}}", - "code_revision": "{{.CodeRevision}}", - "start_time": "{{.StartTime}}", - "end_time": "{{.EndTime}}", - "duration": {{.Duration}}, - "timeout": {{.Timeout}}, - "payload": {{.Payload}}, - "created_at": "{{.CreatedAt}}", - "updated_at": "{{.UpdatedAt}}", -}` -} diff --git a/vendor/github.com/iron-io/iron_go3/examples/tasks/list/main.go b/vendor/github.com/iron-io/iron_go3/examples/tasks/list/main.go deleted file mode 100644 index 78b957226..000000000 --- a/vendor/github.com/iron-io/iron_go3/examples/tasks/list/main.go +++ /dev/null @@ -1,129 +0,0 @@ -/* - This code sample demonstrates how to get a list of existing tasks - - http://dev.iron.io/worker/reference/api/ - http://dev.iron.io/worker/reference/api/#list_tasks -*/ -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "github.com/iron-io/iron_go3/api" - "github.com/iron-io/iron_go3/config" - "io/ioutil" - "log" - "text/template" - "time" -) - -type ( - TaskResponse struct { - Tasks []Task `json:"tasks"` - } - - Task struct { - Id string `json:"id"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - ProjectId string `json:"project_id"` - CodeId string `json:"code_id"` - Status string `json:"status"` - Message string `json:"msg"` - CodeName string `json:"code_name"` - StartTime time.Time `json:"start_time"` - EndTime time.Time `json:"end_time"` - Duration int `json:"duration"` - RunTimes int `json:"run_times"` - Timeout int `json:"timeout"` - Percent int `json:"percent"` - } -) - -func main() { - // Create your configuration for iron_worker - // Find these value in credentials - config := config.Config("iron_worker") - config.ProjectId = "your_project_id" - config.Token = "your_token" - - // Create your endpoint url for tasks - url := api.ActionEndpoint(config, "tasks") - url.QueryAdd("code_name", "%s", "task") - log.Printf("Url: %s\n", url.URL.String()) - - // Post the request to Iron.io - resp, err := url.Request("GET", nil) - defer resp.Body.Close() - if err != nil { - log.Println(err) - return - } - - // Check the status code - if resp.StatusCode != 200 { - log.Printf("%v\n", resp) - return - } - - // Capture the response - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - log.Println(err) - return - } - - // Unmarshall to struct - taskResponse := &TaskResponse{} - err = json.Unmarshal(body, taskResponse) - if err != nil { - log.Printf("%v\n", err) - return - } - - // Or you can Unmarshall to map - results := map[string]interface{}{} - err = json.Unmarshal(body, &results) - if err != nil { - log.Printf("%v\n", err) - return - } - - // Pretty print the response - prettyPrint(taskResponse) -} - -func prettyPrint(taskResponse *TaskResponse) { - prettyTemplate := template.Must(template.New("pretty").Parse(prettyPrintFormat())) - - tasks := "\n" - display := new(bytes.Buffer) - - for _, task := range taskResponse.Tasks { - display.Reset() - prettyTemplate.Execute(display, task) - tasks += fmt.Sprintf("%s,\n", display.String()) - } - - log.Printf(tasks) -} - -func prettyPrintFormat() string { - return `{ - "id": "{{.Id}}", - "created_at": "{{.CreatedAt}}", - "updated_at": "{{.UpdatedAt}}", - "project_id": "{{.ProjectId}}", - "code_id": "{{.CodeId}}", - "status": "{{.Status}}", - "msg": "{{.Message}}", - "code_name": "{{.CodeName}}", - "start_time": "{{.StartTime}}", - "end_time": "{{.EndTime}}", - "duration": {{.Duration}}, - "run_times": {{.RunTimes}}, - "timeout": {{.Timeout}}, - "percent": {{.Percent}} -}` -} diff --git a/vendor/github.com/iron-io/iron_go3/examples/tasks/log/main.go b/vendor/github.com/iron-io/iron_go3/examples/tasks/log/main.go deleted file mode 100644 index 542045c82..000000000 --- a/vendor/github.com/iron-io/iron_go3/examples/tasks/log/main.go +++ /dev/null @@ -1,73 +0,0 @@ -/* - This code sample demonstrates how to get the log for a task - - http://dev.iron.io/worker/reference/api/ - http://dev.iron.io/worker/reference/api/#get_a_tasks_log -*/ -package main - -import ( - "github.com/iron-io/iron_go3/api" - "github.com/iron-io/iron_go3/config" - "io/ioutil" - "log" - "time" -) - -type ( - Task struct { - Id string `json:"id"` - ProjectId string `json:"project_id"` - CodeId string `json:"code_id"` - CodeHistoryId string `json:"code_history_id"` - Status string `json:"status"` - CodeName string `json:"code_name"` - CodeRevision string `json:"code_rev"` - StartTime time.Time `json:"start_time"` - EndTime time.Time `json:"end_time"` - Duration int `json:"duration"` - Timeout int `json:"timeout"` - Payload string `json:"payload"` - UpdatedAt time.Time `json:"updated_at"` - CreatedAt time.Time `json:"created_at"` - } -) - -func main() { - // Create your configuration for iron_worker - // Find these value in credentials - config := config.Config("iron_worker") - config.ProjectId = "your_project_id" - config.Token = "your_token" - - // Capture info for this task - taskId := "52b45b17a31186632b00da4c" - - // Create your endpoint url for tasks - url := api.Action(config, "tasks", taskId, "log") - log.Printf("Url: %s\n", url.URL.String()) - - // Post the request to Iron.io - resp, err := url.Request("GET", nil) - defer resp.Body.Close() - if err != nil { - log.Println(err) - return - } - - // Check the status code - if resp.StatusCode != 200 { - log.Printf("%v\n", resp) - return - } - - // Capture the response - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - log.Println(err) - return - } - - // Display the log - log.Printf("\n%s\n", string(body)) -} diff --git a/vendor/github.com/iron-io/iron_go3/examples/tasks/queue/main.go b/vendor/github.com/iron-io/iron_go3/examples/tasks/queue/main.go deleted file mode 100644 index db7ba02b4..000000000 --- a/vendor/github.com/iron-io/iron_go3/examples/tasks/queue/main.go +++ /dev/null @@ -1,109 +0,0 @@ -/* - This code sample demonstrates how to queue a worker from your your existing - task list. - - http://dev.iron.io/worker/reference/api/ - http://dev.iron.io/worker/reference/api/#queue_a_task -*/ -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "github.com/iron-io/iron_go3/api" - "github.com/iron-io/iron_go3/config" - "io/ioutil" - "log" - "text/template" -) - -type ( - TaskResponse struct { - Message string `json:"msg"` - Tasks []Task `json:"tasks"` - } - - Task struct { - Id string `json:"id"` - } -) - -// payload defines a sample payload document -var payload = `{"tasks":[ -{ - "code_name" : "Worker-Name", - "timeout" : 20, - "payload" : "{ \"key\" : \"value", \"key\" : \"value\" }" -}]}` - -func main() { - // Create your configuration for iron_worker - // Find these value in credentials - config := config.Config("iron_worker") - config.ProjectId = "your_project_id" - config.Token = "your_token" - - // Create your endpoint url for tasks - url := api.ActionEndpoint(config, "tasks") - log.Printf("Url: %s\n", url.URL.String()) - - // Convert the payload to a slice of bytes - postData := bytes.NewBufferString(payload) - - // Post the request to Iron.io - resp, err := url.Request("POST", postData) - defer resp.Body.Close() - if err != nil { - log.Println(err) - return - } - - // Capture the response - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - log.Println(err) - return - } - - // Unmarshall to struct - taskResponse := &TaskResponse{} - err = json.Unmarshal(body, taskResponse) - if err != nil { - log.Printf("%v\n", err) - return - } - - // Or you can Unmarshall to map - results := map[string]interface{}{} - err = json.Unmarshal(body, &results) - if err != nil { - log.Printf("%v\n", err) - return - } - - // Pretty print the response - prettyPrint(taskResponse) -} - -func prettyPrint(taskResponse *TaskResponse) { - prettyTemplate := template.Must(template.New("pretty").Parse(prettyPrintFormat())) - - tasks := "\n" - tasks += "\"msg\": " + taskResponse.Message + "\n" - display := new(bytes.Buffer) - - for _, task := range taskResponse.Tasks { - display.Reset() - prettyTemplate.Execute(display, task) - tasks += fmt.Sprintf("%s,\n", display.String()) - } - - log.Printf(tasks) -} - -func prettyPrintFormat() string { - return `{ - "id": "{{.Id}}", -}` -} diff --git a/vendor/github.com/iron-io/iron_go3/mq/example_test.go b/vendor/github.com/iron-io/iron_go3/mq/example_test.go deleted file mode 100644 index 54cef1080..000000000 --- a/vendor/github.com/iron-io/iron_go3/mq/example_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package mq_test - -import ( - "errors" - - "github.com/iron-io/iron_go3/mq" -) - -func ExampleQueue() error { - // Standard way of using a queue will be to just start pushing or - // getting messages, q.Upsert isn't necessary unless you explicitly - // need to create a queue with custom settings. - - q := mq.New("my_queue2") - // Simply pushing messages will create a queue if it doesn't exist, with defaults. - _, err := q.PushStrings("msg1", "msg2") - if err != nil { - return err - } - msgs, err := q.GetN(2) - if err != nil { - return err - } - if len(msgs) != 2 { - return errors.New("not good") - } - - return nil -} - -func ExampleQueue_Upsert() error { - // Prepare a Queue from configs - q := mq.New("my_queue") - // Upsert will create the queue on the server or update its message_timeout - // to 120 if it already exists. - - // Let's just make sure we don't have a queue, because we can. - if _, err := q.Info(); mq.ErrQueueNotFound(err) { - _, err := q.Update(mq.QueueInfo{MessageTimeout: 120}) // ok, we'll make one. - if err != nil { - return err - } - } - // Definitely exists now. - - // Let's just add some messages. - _, err := q.PushStrings("msg1", "msg2") - if err != nil { - return err - } - msgs, err := q.Peek() - if len(msgs) != 2 { - // and it has messages already... - } - return nil -} - -func ExampleList() error { - qs, err := mq.List() // Will get up to 30 queues. All ready to use. - if err != nil { - return err - } - - // Pop a message off of each queue. - for _, q := range qs { - _, err := q.Pop() - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/iron-io/iron_go3/mq/mq.go b/vendor/github.com/iron-io/iron_go3/mq/mq.go deleted file mode 100644 index 0803fca50..000000000 --- a/vendor/github.com/iron-io/iron_go3/mq/mq.go +++ /dev/null @@ -1,563 +0,0 @@ -// IronMQ (elastic message queue) client library -package mq - -import ( - "encoding/json" - "errors" - "time" - - "github.com/iron-io/iron_go3/api" - "github.com/iron-io/iron_go3/config" -) - -type Timestamped struct { - CreatedAt time.Time `json:"created_at,omitempty"` - UpdatedAt time.Time `json:"updated_at,omitempty"` -} - -// A Queue is the client's idea of a queue, sufficient for getting -// information for the queue with given Name at the server configured -// with Settings. See mq.New() -type Queue struct { - Settings config.Settings `json:"-"` - Name string `json:"name"` -} - -// When used for create/update, Size and TotalMessages will be omitted. -type QueueInfo struct { - Name string `json:"name"` - - Size int `json:"size"` - TotalMessages int `json:"total_messages"` - - MessageExpiration int `json:"message_expiration"` - MessageTimeout int `json:"message_timeout"` - Type string `json:"type,omitempty"` - Push *PushInfo `json:"push,omitempty"` - Alerts []Alert `json:"alerts,omitempty"` -} - -type PushInfo struct { - RetriesDelay int `json:"retries_delay,omitempty"` - Retries int `json:"retries,omitempty"` - Subscribers []QueueSubscriber `json:"subscribers,omitempty"` - ErrorQueue string `json:"error_queue,omitempty"` -} - -type QueueSubscriber struct { - Name string `json:"name"` - URL string `json:"url"` - Headers map[string]string `json:"headers,omitempty"` // HTTP headers -} - -type Alert struct { - Type string `json:"type"` - Trigger int `json:"trigger"` - Direction string `json:"direction"` - Queue string `json:"queue"` - Snooze int `json:"snooze"` -} - -// Message is dual purpose, as it represents a returned message and also -// can be used for creation. For creation, only Body and Delay are valid. -// Delay will not be present in returned message. -type Message struct { - Id string `json:"id,omitempty"` - Body string `json:"body"` - Delay int64 `json:"delay,omitempty"` // time in seconds to wait before enqueue, default 0 - ReservedUntil time.Time `json:"reserved_until,omitempty"` - ReservedCount int `json:"reserved_count,omitempty"` - ReservationId string `json:"reservation_id,omitempty"` - q Queue // todo: shouldn't this be a pointer? -} - -type PushStatus struct { - Retried int `json:"retried"` - StatusCode int `json:"status_code"` - Status string `json:"status"` -} - -type Subscriber struct { - Retried int `json:"retried"` - StatusCode int `json:"status_code"` - Status string `json:"status"` - URL string `json:"url"` -} - -type Subscription struct { - PushType string - Retries int - RetriesDelay int -} - -func ErrQueueNotFound(err error) bool { - return err.Error() == "404 Not Found: Queue not found" -} - -// New uses the configuration specified in an iron.json file or environment variables -// to return a Queue object capable of acquiring information about or modifying the queue -// specified by queueName. -func New(queueName string) Queue { - return Queue{Settings: config.Config("iron_mq"), Name: queueName} -} - -// ConfigNew uses the specified settings over configuration specified in an iron.json file or -// environment variables to return a Queue object capable of acquiring information about or -// modifying the queue specified by queueName. -func ConfigNew(queueName string, settings *config.Settings) Queue { - return Queue{Settings: config.ManualConfig("iron_mq", settings), Name: queueName} -} - -// Will create a new queue, all fields are optional. -// Queue type cannot be changed. -func CreateQueue(queueName string, queueInfo QueueInfo) (QueueInfo, error) { - info := queueInfo - info.Name = queueName - return ConfigCreateQueue(info, nil) -} - -// Will create a new queue, all fields are optional. -// Queue type cannot be changed. -func ConfigCreateQueue(queueInfo QueueInfo, settings *config.Settings) (QueueInfo, error) { - if queueInfo.Name == "" { - return QueueInfo{}, errors.New("Name of queue is empty") - } - - url := api.Action(config.ManualConfig("iron_mq", settings), "queues", queueInfo.Name) - - in := struct { - Queue QueueInfo `json:"queue"` - }{ - Queue: queueInfo, - } - - var out struct { - Queue QueueInfo `json:"queue"` - } - - err := url.Req("PUT", in, &out) - return out.Queue, err -} - -// List will get a listQueues of all queues for the configured project, paginated 30 at a time. -// For paging or filtering, see ListPage and Filter. -func List() ([]Queue, error) { - return ListQueues(config.Config("iron_mq"), "", "", 0) -} - -// ListPage is like List, but will allow specifying a page length and pagination. -// To get the first page, let prev = "". -// To get the second page, use the name of the last queue on the first page as "prev". -func ListPage(prev string, perPage int) ([]Queue, error) { - return ListQueues(config.Config("iron_mq"), "", prev, perPage) -} - -// Filter is like List, but will only return queues with the specified prefix. -func Filter(prefix string) ([]Queue, error) { - return ListQueues(config.Config("iron_mq"), prefix, "", 0) -} - -// Like ListPage, but with an added filter. -func FilterPage(prefix, prev string, perPage int) ([]Queue, error) { - return ListQueues(config.Config("iron_mq"), prefix, prev, perPage) -} - -func ListQueues(s config.Settings, prefix, prev string, perPage int) ([]Queue, error) { - var out struct { - Queues []Queue `json:"queues"` - } - - url := api.Action(s, "queues") - - if prev != "" { - url.QueryAdd("previous", "%v", prev) - } - if prefix != "" { - url.QueryAdd("prefix", "%v", prefix) - } - if perPage != 0 { - url.QueryAdd("per_page", "%d", perPage) - } - - err := url.Req("GET", nil, &out) - if err != nil { - return nil, err - } - - for idx := range out.Queues { - out.Queues[idx].Settings = s - } - - return out.Queues, nil -} - -func (q Queue) queues(s ...string) *api.URL { return api.Action(q.Settings, "queues", s...) } - -func (q *Queue) UnmarshalJSON(data []byte) error { - var name struct { - Name string `json:"name"` - } - err := json.Unmarshal(data, &name) - q.Name = name.Name - return err -} - -// Will return information about a queue, could also be used to check existence. -// TODO make QueueNotExist err -func (q Queue) Info() (QueueInfo, error) { - var out struct { - QI QueueInfo `json:"queue"` - } - err := q.queues(q.Name).Req("GET", nil, &out) - return out.QI, err -} - -// Will create or update a queue, all QueueInfo fields are optional. -// Queue type cannot be changed. -func (q Queue) Update(queueInfo QueueInfo) (QueueInfo, error) { - var out struct { - QI QueueInfo `json:"queue"` - } - in := struct { - QI QueueInfo `json:"queue"` - }{ - QI: queueInfo, - } - - err := q.queues(q.Name).Req("PATCH", in, &out) - return out.QI, err -} - -func (q Queue) Delete() error { - return q.queues(q.Name).Req("DELETE", nil, nil) -} - -// PushString enqueues a message with body specified and no delay. -func (q Queue) PushString(body string) (id string, err error) { - ids, err := q.PushStrings(body) - if err != nil { - return - } - return ids[0], nil -} - -// PushStrings enqueues messages with specified bodies and no delay. -func (q Queue) PushStrings(bodies ...string) (ids []string, err error) { - msgs := make([]Message, len(bodies)) - for i, body := range bodies { - msgs[i] = Message{Body: body} - } - - return q.PushMessages(msgs...) -} - -// PushMessage enqueues a message. -func (q Queue) PushMessage(msg Message) (id string, err error) { - ids, err := q.PushMessages(msg) - if err != nil { - return "", err - } else if len(ids) < 1 { - return "", errors.New("didn't receive message ID for pushing message") - } - return ids[0], err -} - -// PushMessages enqueues each message in order. -func (q Queue) PushMessages(msgs ...Message) (ids []string, err error) { - in := struct { - Messages []Message `json:"messages"` - }{ - Messages: msgs, - } - - var out struct { - IDs []string `json:"ids"` - Msg string `json:"msg"` // TODO get rid of this on server and here, too. - } - - err = q.queues(q.Name, "messages").Req("POST", &in, &out) - return out.IDs, err -} - -// Peek first 30 messages on queue. -func (q Queue) Peek() ([]Message, error) { - return q.PeekN(30) -} - -// Peek with N, max 100. -func (q Queue) PeekN(n int) ([]Message, error) { - var out struct { - Messages []Message `json:"messages"` - } - - err := q.queues(q.Name, "messages"). - QueryAdd("n", "%d", n). - Req("GET", nil, &out) - - for i, _ := range out.Messages { - out.Messages[i].q = q - } - - return out.Messages, err -} - -// Reserves a message from the queue. -// The message will not be deleted, but will be reserved until the timeout -// expires. If the timeout expires before the message is deleted, the message -// will be placed back onto the queue. -// As a result, be sure to Delete a message after you're done with it. - -func (q Queue) Reserve() (msg *Message, err error) { - msgs, err := q.GetN(1) - if len(msgs) > 0 { - return &msgs[0], err - } - return nil, err -} - -// ReserveN reserves multiple messages from the queue. -func (q Queue) ReserveN(n int) ([]Message, error) { - return q.LongPoll(n, 60, 0, false) -} - -// Get reserves a message from the queue. -// Deprecated, use Reserve instead. -func (q Queue) Get() (msg *Message, err error) { - return q.Reserve() -} - -// GetN is Get for N. -// Deprecated, use ReserveN instead. -func (q Queue) GetN(n int) ([]Message, error) { - return q.ReserveN(n) -} - -// TODO deprecate for LongPoll? -func (q Queue) GetNWithTimeout(n, timeout int) ([]Message, error) { - return q.LongPoll(n, timeout, 0, false) -} - -// Pop will get and delete a message from the queue. -func (q Queue) Pop() (msg Message, err error) { - msgs, err := q.PopN(1) - if len(msgs) > 0 { - msg = msgs[0] - } - return msg, err -} - -// PopN is Pop for N. -func (q Queue) PopN(n int) ([]Message, error) { - return q.LongPoll(n, 0, 0, true) -} - -// LongPoll is the long form for Get, Pop, with all options available. -// If wait = 0, then LongPoll is simply a get, otherwise, the server -// will poll for n messages up to wait seconds (max 30). -// If delete is specified, then each message will be deleted instead -// of being put back onto the queue. -func (q Queue) LongPoll(n, timeout, wait int, delete bool) ([]Message, error) { - in := struct { - N int `json:"n"` - Timeout int `json:"timeout"` - Wait int `json:"wait"` - Delete bool `json:"delete"` - }{ - N: n, - Timeout: timeout, - Wait: wait, - Delete: delete, - } - var out struct { - Messages []Message `json:"messages"` // TODO don't think we need pointer here - } - - err := q.queues(q.Name, "reservations").Req("POST", &in, &out) - - for i, _ := range out.Messages { - out.Messages[i].q = q - } - - return out.Messages, err -} - -// Delete all messages in the queue -func (q Queue) Clear() (err error) { - return q.queues(q.Name, "messages").Req("DELETE", &struct{}{}, nil) -} - -// Delete message from queue -func (q Queue) DeleteMessage(msgId, reservationId string) (err error) { - body := struct { - Res string `json:"reservation_id"` - }{Res: reservationId} - return q.queues(q.Name, "messages", msgId).Req("DELETE", body, nil) -} - -// Delete multiple messages by id -func (q Queue) DeleteMessages(ids []string) error { - in := struct { - Ids []delmsg `json:"ids"` - }{Ids: make([]delmsg, len(ids))} - - for i, val := range ids { - in.Ids[i].Id = val - } - return q.queues(q.Name, "messages").Req("DELETE", in, nil) -} - -type delmsg struct { - Id string `json:"id"` - Res string `json:"reservation_id"` -} - -// Delete multiple reserved messages from the queue -func (q Queue) DeleteReservedMessages(messages []Message) error { - ids := struct { - Ids []delmsg `json:"ids"` - }{Ids: make([]delmsg, len(messages))} - - for i, val := range messages { - ids.Ids[i].Id = val.Id - ids.Ids[i].Res = val.ReservationId - } - return q.queues(q.Name, "messages").Req("DELETE", ids, nil) -} - -// Reset timeout of message to keep it reserved -func (q Queue) TouchMessage(msgId, reservationId string) (string, error) { - return q.TouchMessageFor(msgId, reservationId, 0) -} - -// Reset timeout of message to keep it reserved -func (q Queue) TouchMessageFor(msgId, reservationId string, timeout int) (string, error) { - in := struct { - Timeout int `json:"timeout,omitempty"` - ReservationId string `json:"reservation_id,omitempty"` - }{ReservationId: reservationId} - if timeout > 0 { - in.Timeout = timeout - } - out := &Message{} - err := q.queues(q.Name, "messages", msgId, "touch").Req("POST", in, out) - return out.ReservationId, err -} - -// Put message back in the queue, message will be available after +delay+ seconds. -func (q Queue) ReleaseMessage(msgId, reservationId string, delay int64) (err error) { - body := struct { - Delay int64 `json:"delay"` - ReservationId string `json:"reservation_id"` - }{Delay: delay, ReservationId: reservationId} - return q.queues(q.Name, "messages", msgId, "release").Req("POST", &body, nil) -} - -func (q Queue) MessageSubscribers(msgId string) ([]Subscriber, error) { - out := struct { - Subscribers []Subscriber `json:"subscribers"` - }{} - err := q.queues(q.Name, "messages", msgId, "subscribers").Req("GET", nil, &out) - return out.Subscribers, err -} - -func (q Queue) AddSubscribers(subscribers ...QueueSubscriber) error { - collection := struct { - Subscribers []QueueSubscriber `json:"subscribers,omitempty"` - }{ - Subscribers: subscribers, - } - return q.queues(q.Name, "subscribers").Req("POST", &collection, nil) -} - -func (q Queue) ReplaceSubscribers(subscribers ...QueueSubscriber) error { - collection := struct { - Subscribers []QueueSubscriber `json:"subscribers,omitempty"` - }{ - Subscribers: subscribers, - } - return q.queues(q.Name, "subscribers").Req("PUT", &collection, nil) -} - -func (q Queue) RemoveSubscribers(subscribers ...string) error { - collection := make([]QueueSubscriber, len(subscribers)) - for i, subscriber := range subscribers { - collection[i].Name = subscriber - } - return q.RemoveSubscribersCollection(collection...) -} - -func (q Queue) RemoveSubscribersCollection(subscribers ...QueueSubscriber) error { - collection := struct { - Subscribers []QueueSubscriber `json:"subscribers,omitempty"` - }{ - Subscribers: subscribers, - } - return q.queues(q.Name, "subscribers").Req("DELETE", &collection, nil) -} - -func (q Queue) MessageSubscribersPollN(msgId string, n int) ([]Subscriber, error) { - subs, err := q.MessageSubscribers(msgId) - for { - time.Sleep(100 * time.Millisecond) - subs, err = q.MessageSubscribers(msgId) - if err != nil { - return subs, err - } - if len(subs) >= n && actualPushStatus(subs) { - return subs, nil - } - } - return subs, err -} - -func (q Queue) AddAlerts(alerts ...*Alert) (err error) { - var queue struct { - QI QueueInfo `json:"queue"` - } - in := QueueInfo{ - Alerts: make([]Alert, len(alerts)), - } - - for i, alert := range alerts { - in.Alerts[i] = *alert - } - queue.QI = in - - return q.queues(q.Name).Req("PATCH", &queue, nil) -} - -func actualPushStatus(subs []Subscriber) bool { - for _, sub := range subs { - if sub.Status == "queued" { - return false - } - } - - return true -} - -// Delete message from queue -func (m Message) Delete() (err error) { - return m.q.DeleteMessage(m.Id, m.ReservationId) -} - -// Reset timeout of message to keep it reserved -func (m *Message) Touch() (err error) { - return m.TouchFor(0) -} - -// Reset timeout of message to keep it reserved -func (m *Message) TouchFor(timeout int) (err error) { - reservationId, error := m.q.TouchMessageFor(m.Id, m.ReservationId, timeout) - m.ReservationId = reservationId - return error -} - -// Put message back in the queue, message will be available after +delay+ seconds. -func (m Message) Release(delay int64) (err error) { - return m.q.ReleaseMessage(m.Id, m.ReservationId, delay) -} - -func (m Message) Subscribers() (interface{}, error) { - return m.q.MessageSubscribers(m.Id) -} diff --git a/vendor/github.com/iron-io/iron_go3/mq/mq_test.go b/vendor/github.com/iron-io/iron_go3/mq/mq_test.go deleted file mode 100644 index 84cd1a5ed..000000000 --- a/vendor/github.com/iron-io/iron_go3/mq/mq_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package mq - -import ( - "fmt" - "testing" - "time" - - . "github.com/jeffh/go.bdd" -) - -func TestEverything(t *testing.T) {} - -func q(name string) Queue { - c := New(name) - return c -} - -func init() { - defer PrintSpecReport() - - Describe("IronMQ", func() { - It("Deletes all existing messages", func() { - c := q("queuename") - - _, err := c.PushString("just a little test") - Expect(err, ToBeNil) - - Expect(c.Clear(), ToBeNil) - - info, err := c.Info() - Expect(err, ToBeNil) - Expect(info.Size, ToEqual, 0x0) - }) - - It("Pushes ands gets a message", func() { - c := q("queuename") - id1, err := c.PushString("just a little test") - Expect(err, ToBeNil) - - msg, err := c.Get() - Expect(err, ToBeNil) - - Expect(msg, ToNotBeNil) - Expect(msg.Id, ToDeepEqual, id1) - Expect(msg.Body, ToDeepEqual, "just a little test") - - err = c.DeleteMessage(msg.Id, msg.ReservationId) - Expect(err, ToBeNil) - - info, err := c.Info() - Expect(err, ToBeNil) - Expect(info.Size, ToEqual, 0x0) - - }) - - It("clears the queue", func() { - q := q("queuename") - - strings := []string{} - for n := 0; n < 100; n++ { - strings = append(strings, fmt.Sprint("test: ", n)) - } - - _, err := q.PushStrings(strings...) - Expect(err, ToBeNil) - - info, err := q.Info() - Expect(err, ToBeNil) - Expect(info.Size, ToEqual, 100) - - Expect(q.Clear(), ToBeNil) - - info, err = q.Info() - Expect(err, ToBeNil) - Expect(info.Size, ToEqual, 0) - }) - - It("Lists all queues", func() { - c := q("queuename") - queues, err := ListQueues(c.Settings, "", "", 101) // can't check the caches value just yet. - Expect(err, ToBeNil) - l := len(queues) - t := l >= 1 - Expect(t, ToBeTrue) - found := false - for _, queue := range queues { - if queue.Name == "queuename" { - found = true - break - } - } - Expect(found, ToEqual, true) - }) - - It("releases a message", func() { - c := q("queuename") - - id, err := c.PushString("trying") - Expect(err, ToBeNil) - - msg, err := c.Get() - Expect(err, ToBeNil) - - err = msg.Release(3) - Expect(err, ToBeNil) - - msg, err = c.Get() - Expect(msg, ToBeNil) - - time.Sleep(4 * time.Second) - - msg, err = c.Get() - Expect(err, ToBeNil) - Expect(msg, ToNotBeNil) - Expect(msg.Id, ToEqual, id) - }) - - It("updates a queue", func() { - name := "pushqueue" + time.Now().String() - - _, err := CreateQueue(name, QueueInfo{Type: "multicast", Push: &PushInfo{ - Subscribers: []QueueSubscriber{{Name: "first", URL: "http://hit.me.with.a.message"}}}}) - Expect(err, ToBeNil) - - c := q(name) - - info, err := c.Info() - Expect(err, ToBeNil) - - qi := QueueInfo{Type: "multicast", Push: &PushInfo{ - Subscribers: []QueueSubscriber{{Name: "first", URL: "http://hit.me.with.another.message"}}}} - rc, err := c.Update(qi) - Expect(err, ToBeNil) - info, err = c.Info() - Expect(err, ToBeNil) - Expect(info.Name, ToEqual, rc.Name) - - err = c.Delete() - Expect(err, ToBeNil) - }) - }) -} diff --git a/vendor/github.com/iron-io/iron_go3/worker/methods.go b/vendor/github.com/iron-io/iron_go3/worker/methods.go deleted file mode 100644 index 4c16ceb14..000000000 --- a/vendor/github.com/iron-io/iron_go3/worker/methods.go +++ /dev/null @@ -1,616 +0,0 @@ -package worker - -import ( - "archive/zip" - "crypto/aes" - "crypto/cipher" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/x509" - "encoding/base64" - "encoding/json" - "encoding/pem" - "fmt" - "io" - "io/ioutil" - "mime/multipart" - "time" -) - -type Schedule struct { - CodeName string `json:"code_name"` - Delay *time.Duration `json:"delay"` - EndAt *time.Time `json:"end_at"` - MaxConcurrency *int `json:"max_concurrency"` - Name string `json:"name"` - Payload string `json:"payload"` - Priority *int `json:"priority"` - RunEvery *int `json:"run_every"` - RunTimes *int `json:"run_times"` - StartAt *time.Time `json:"start_at"` - Cluster string `json:"cluster"` - Label string `json:"label"` -} - -type ScheduleInfo struct { - CodeName string `json:"code_name"` - CreatedAt time.Time `json:"created_at"` - EndAt time.Time `json:"end_at"` - Id string `json:"id"` - LastRunTime time.Time `json:"last_run_time"` - MaxConcurrency int `json:"max_concurrency"` - Msg string `json:"msg"` - NextStart time.Time `json:"next_start"` - ProjectId string `json:"project_id"` - RunCount int `json:"run_count"` - RunTimes int `json:"run_times"` - StartAt time.Time `json:"start_at"` - Status string `json:"status"` - UpdatedAt time.Time `json:"updated_at"` -} - -type Task struct { - CodeName string `json:"code_name"` - Payload string `json:"payload"` - Priority int `json:"priority"` - Timeout *time.Duration `json:"timeout"` - Delay *time.Duration `json:"delay"` - Cluster string `json:"cluster"` - Label string `json:"label"` -} - -type TaskInfo struct { - CodeHistoryId string `json:"code_history_id"` - CodeId string `json:"code_id"` - CodeName string `json:"code_name"` - CodeRev string `json:"code_rev"` - Id string `json:"id"` - Payload string `json:"payload"` - ProjectId string `json:"project_id"` - Status string `json:"status"` - Msg string `json:"msg,omitempty"` - ScheduleId string `json:"schedule_id"` - Duration int `json:"duration"` - RunTimes int `json:"run_times"` - Timeout int `json:"timeout"` - Percent int `json:"percent,omitempty"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - StartTime time.Time `json:"start_time"` - EndTime time.Time `json:"end_time"` -} - -type CodeSource map[string][]byte // map[pathInZip]code - -type Code struct { - Id string `json:"id,omitempty"` - Name string `json:"name"` - Runtime string `json:"runtime"` - FileName string `json:"file_name"` - Config string `json:"config,omitempty"` - MaxConcurrency int `json:"max_concurrency,omitempty"` - Retries *int `json:"retries,omitempty"` - RetriesDelay *int `json:"retries_delay,omitempty"` // seconds - Stack string `json:"stack"` - Image string `json:"image"` - Command string `json:"command"` - Host string `json:"host,omitempty"` // PaaS router thing - EnvVars map[string]string `json:"env_vars"` - Source CodeSource `json:"-"` - DefaultPriority int `json:"default_priority,omitempty"` -} - -type CodeInfo struct { - Id string `json:"id"` - LatestChecksum string `json:"latest_checksum"` - LatestHistoryId string `json:"latest_history_id"` - Name string `json:"name"` - ProjectId string `json:"project_id"` - Runtime *string `json:"runtime"` - Rev int `json:"rev"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - LatestChange time.Time `json:"latest_change"` -} - -// CodePackageList lists code packages. -// -// The page argument decides the page of code packages you want to retrieve, starting from 0, maximum is 100. -// -// The perPage argument determines the number of code packages to return. Note -// this is a maximum value, so there may be fewer packages returned if there -// aren’t enough results. If this is < 1, 1 will be the default. Maximum is 100. -func (w *Worker) CodePackageList(page, perPage int) (codes []CodeInfo, err error) { - out := map[string][]CodeInfo{} - - err = w.codes(). - QueryAdd("page", "%d", page). - QueryAdd("per_page", "%d", perPage). - Req("GET", nil, &out) - if err != nil { - return - } - - return out["codes"], nil -} - -// CodePackageInfo gets info about a code package -func (w *Worker) CodePackageInfo(codeId string) (code CodeInfo, err error) { - out := CodeInfo{} - err = w.codes(codeId).Req("GET", nil, &out) - return out, err -} - -// CodePackageDelete deletes a code package -func (w *Worker) CodePackageDelete(codeId string) (err error) { - return w.codes(codeId).Req("DELETE", nil, nil) -} - -// CodePackageDownload downloads a code package -func (w *Worker) CodePackageDownload(codeId string) (code Code, err error) { - out := Code{} - err = w.codes(codeId, "download").Req("GET", nil, &out) - return out, err -} - -// CodePackageRevisions lists the revisions of a code pacakge -func (w *Worker) CodePackageRevisions(codeId string) (code Code, err error) { - out := Code{} - err = w.codes(codeId, "revisions").Req("GET", nil, &out) - return out, err -} - -// CodePackageZipUpload can be used to upload a code package with a zip -// package, where zipName is a filepath where the zip can be located. If -// zipName is an empty string, then the code package will be uploaded without a -// zip package (see CodePackageUpload). -func (w *Worker) CodePackageZipUpload(zipName string, args Code) (*Code, error) { - return w.codePackageUpload(zipName, args) -} - -// CodePackageUpload uploads a code package without a zip file. -func (w *Worker) CodePackageUpload(args Code) (*Code, error) { - return w.codePackageUpload("", args) -} - -func (w *Worker) codePackageUpload(zipName string, args Code) (*Code, error) { - b := randomBoundary() - r := &streamZipPipe{zipName: zipName, args: args, boundary: b} - defer r.Close() - - var out Code - err := w.codes(). - SetContentType("multipart/form-data; boundary="+b). - Req("POST", r, &out) - - return &out, err -} - -func randomBoundary() string { - var buf [30]byte - _, err := io.ReadFull(rand.Reader, buf[:]) - if err != nil { - panic(err) - } - return fmt.Sprintf("%x", buf[:]) -} - -// implement seek so that we can retry it. not thread safe, -// Read and Seek must be called in same thread. -type streamZipPipe struct { - zipName string - args Code - boundary string - - r io.ReadCloser - w io.WriteCloser - once bool - err chan error -} - -// safe to call multiple times, implement io.Closer so http will call this -func (s *streamZipPipe) Close() error { - if s.r != nil { - return s.r.Close() - } - return nil -} - -// only seeks to beginning, ignores parameters -func (s *streamZipPipe) Seek(offset int64, whence int) (int64, error) { - // just restart the whole thing, the last pipe should have errored out and been closed - s.r, s.w = io.Pipe() - s.err = make(chan error, 1) - s.once = true - go s.pipe() - return 0, nil -} - -func (s *streamZipPipe) Read(b []byte) (int, error) { - if !s.once { - s.once = true - s.r, s.w = io.Pipe() - s.err = make(chan error, 1) - go s.pipe() - } - - select { - case err := <-s.err: - if err != nil { - return 0, err // n should get ignored - } - default: - } - return s.r.Read(b) -} - -func (s *streamZipPipe) pipe() { - defer s.w.Close() - - mWriter := multipart.NewWriter(s.w) - mWriter.SetBoundary(s.boundary) - mMetaWriter, err := mWriter.CreateFormField("data") - if err != nil { - s.err <- err - return - } - - if err := json.NewEncoder(mMetaWriter).Encode(s.args); err != nil { - s.err <- err - return - } - - if s.zipName != "" { - r, err := zip.OpenReader(s.zipName) - if err != nil { - s.err <- err - return - } - defer r.Close() - - mFileWriter, err := mWriter.CreateFormFile("file", "worker.zip") - if err != nil { - s.err <- err - return - } - zWriter := zip.NewWriter(mFileWriter) - - for _, f := range r.File { - fWriter, err := zWriter.Create(f.Name) - if err != nil { - s.err <- err - return - } - rc, err := f.Open() - if err != nil { - s.err <- err - return - } - _, err = io.Copy(fWriter, rc) - rc.Close() - if err != nil { - s.err <- err - return - } - } - - if err := zWriter.Close(); err != nil { - s.err <- err - return - } - } - if err := mWriter.Close(); err != nil { - s.err <- err - } -} - -func (w *Worker) TaskList() (tasks []TaskInfo, err error) { - out := map[string][]TaskInfo{} - err = w.tasks().Req("GET", nil, &out) - if err != nil { - return - } - return out["tasks"], nil -} - -type TaskListParams struct { - CodeName string - Label string - Page int - PerPage int - FromTime time.Time - ToTime time.Time - Statuses []string -} - -func (w *Worker) FilteredTaskList(params TaskListParams) (tasks []TaskInfo, err error) { - out := map[string][]TaskInfo{} - url := w.tasks() - - url.QueryAdd("code_name", "%s", params.CodeName) - - if params.Label != "" { - url.QueryAdd("label", "%s", params.Label) - } - - if params.Page > 0 { - url.QueryAdd("page", "%d", params.Page) - } - - if params.PerPage > 0 { - url.QueryAdd("per_page", "%d", params.PerPage) - } - - if fromTimeSeconds := params.FromTime.Unix(); fromTimeSeconds > 0 { - url.QueryAdd("from_time", "%d", fromTimeSeconds) - } - - if toTimeSeconds := params.ToTime.Unix(); toTimeSeconds > 0 { - url.QueryAdd("to_time", "%d", toTimeSeconds) - } - - for _, status := range params.Statuses { - url.QueryAdd(status, "%d", true) - } - - err = url.Req("GET", nil, &out) - - if err != nil { - return - } - - return out["tasks"], nil -} - -// TaskQueue queues a task -func (w *Worker) TaskQueue(tasks ...Task) (taskIds []string, err error) { - outTasks := make([]map[string]interface{}, 0, len(tasks)) - - for _, task := range tasks { - thisTask := map[string]interface{}{ - "code_name": task.CodeName, - "payload": task.Payload, - "priority": task.Priority, - "cluster": task.Cluster, - "label": task.Label, - } - if task.Timeout != nil { - thisTask["timeout"] = (*task.Timeout).Seconds() - } - if task.Delay != nil { - thisTask["delay"] = int64((*task.Delay).Seconds()) - } - - outTasks = append(outTasks, thisTask) - } - - in := map[string][]map[string]interface{}{"tasks": outTasks} - out := struct { - Tasks []struct { - Id string `json:"id"` - } `json:"tasks"` - Msg string `json:"msg"` - }{} - - err = w.tasks().Req("POST", &in, &out) - if err != nil { - return - } - - taskIds = make([]string, 0, len(out.Tasks)) - for _, task := range out.Tasks { - taskIds = append(taskIds, task.Id) - } - - return -} - -// TaskInfo gives info about a given task -func (w *Worker) TaskInfo(taskId string) (task TaskInfo, err error) { - out := TaskInfo{} - err = w.tasks(taskId).Req("GET", nil, &out) - return out, err -} - -func (w *Worker) TaskLog(taskId string) (log []byte, err error) { - response, err := w.tasks(taskId, "log").Request("GET", nil) - if err != nil { - return - } - - log, err = ioutil.ReadAll(response.Body) - return -} - -// TaskCancel cancels a Task -func (w *Worker) TaskCancel(taskId string) (err error) { - _, err = w.tasks(taskId, "cancel").Request("POST", nil) - return err -} - -// TaskProgress sets a Task's Progress -func (w *Worker) TaskProgress(taskId string, progress int, msg string) (err error) { - payload := map[string]interface{}{ - "msg": msg, - "percent": progress, - } - - err = w.tasks(taskId, "progress").Req("POST", payload, nil) - return -} - -// TaskQueueWebhook queues a Task from a Webhook -func (w *Worker) TaskQueueWebhook() (err error) { return } - -// ScheduleList lists Scheduled Tasks -func (w *Worker) ScheduleList() (schedules []ScheduleInfo, err error) { - out := map[string][]ScheduleInfo{} - err = w.schedules().Req("GET", nil, &out) - if err != nil { - return - } - return out["schedules"], nil -} - -// Schedule a Task -func (w *Worker) Schedule(schedules ...Schedule) (scheduleIds []string, err error) { - outSchedules := make([]map[string]interface{}, 0, len(schedules)) - - for _, schedule := range schedules { - sm := map[string]interface{}{ - "code_name": schedule.CodeName, - "name": schedule.Name, - "payload": schedule.Payload, - "label": schedule.Label, - "cluster": schedule.Cluster, - } - if schedule.Delay != nil { - sm["delay"] = (*schedule.Delay).Seconds() - } - if schedule.EndAt != nil { - sm["end_at"] = *schedule.EndAt - } - if schedule.MaxConcurrency != nil { - sm["max_concurrency"] = *schedule.MaxConcurrency - } - if schedule.Priority != nil { - sm["priority"] = *schedule.Priority - } - if schedule.RunEvery != nil { - sm["run_every"] = *schedule.RunEvery - } - if schedule.RunTimes != nil { - sm["run_times"] = *schedule.RunTimes - } - if schedule.StartAt != nil { - sm["start_at"] = *schedule.StartAt - } - outSchedules = append(outSchedules, sm) - } - - in := map[string][]map[string]interface{}{"schedules": outSchedules} - out := struct { - Schedules []struct { - Id string `json:"id"` - } `json:"schedules"` - Msg string `json:"msg"` - }{} - - err = w.schedules().Req("POST", &in, &out) - if err != nil { - return - } - - scheduleIds = make([]string, 0, len(out.Schedules)) - - for _, schedule := range out.Schedules { - scheduleIds = append(scheduleIds, schedule.Id) - } - - return -} - -// ScheduleInfo gets info about a scheduled task -func (w *Worker) ScheduleInfo(scheduleId string) (info ScheduleInfo, err error) { - info = ScheduleInfo{} - err = w.schedules(scheduleId).Req("GET", nil, &info) - return info, nil -} - -// ScheduleCancel cancels a scheduled task -func (w *Worker) ScheduleCancel(scheduleId string) (err error) { - _, err = w.schedules(scheduleId, "cancel").Request("POST", nil) - return -} - -// TODO we should probably support other crypto functions at some point so that people have a choice. - -// - expects an x509 rsa public key (ala "-----BEGIN RSA PUBLIC KEY-----") -// - returns a base64 ciphertext with an rsa encrypted aes-128 session key stored in the bit length -// of the modulus of the given RSA key first bits (i.e. 2048 = first 256 bytes), followed by -// the aes cipher with a new, random iv in the first 12 bytes, -// and the auth tag in the last 16 bytes of the cipher. -// - must have RSA key >= 1024 -// - end format w/ RSA of 2048 for display purposes, all base64 encoded: -// [ 256 byte RSA encrypted AES key | len(payload) AES-GCM cipher | 16 bytes AES tag | 12 bytes AES nonce ] -// - each task will be encrypted with a different AES session key -// -// EncryptPayloads will return a copy of the input tasks with the Payload field modified -// to be encrypted as described above. Upon any error, the tasks returned will be nil. -func EncryptPayloads(publicKey []byte, in ...Task) ([]Task, error) { - rsablock, _ := pem.Decode(publicKey) - rsaKey, err := x509.ParsePKIXPublicKey(rsablock.Bytes) - if err != nil { - return nil, err - } - rsaPublicKey := rsaKey.(*rsa.PublicKey) - - tasks := make([]Task, len(in)) - copy(tasks, in) - - for i := range tasks { - // get a random aes-128 session key to encrypt - aesKey := make([]byte, 128/8) - if _, err := rand.Read(aesKey); err != nil { - return nil, err - } - - // have to use sha1 b/c ruby openssl picks it for OAEP: https://www.openssl.org/docs/manmaster/crypto/RSA_public_encrypt.html - aesKeyCipher, err := rsa.EncryptOAEP(sha1.New(), rand.Reader, rsaPublicKey, aesKey, nil) - if err != nil { - return nil, err - } - - block, err := aes.NewCipher(aesKey) - if err != nil { - return nil, err - } - gcm, err := cipher.NewGCM(block) - if err != nil { - return nil, err - } - - pbytes := []byte(tasks[i].Payload) - // The IV needs to be unique, but not secure. last 12 bytes are IV. - ciphertext := make([]byte, len(pbytes)+gcm.Overhead()+gcm.NonceSize()) - nonce := ciphertext[len(ciphertext)-gcm.NonceSize():] - if _, err := rand.Read(nonce); err != nil { - return nil, err - } - // tag is appended to cipher as last 16 bytes. https://golang.org/src/crypto/cipher/gcm.go?s=2318:2357#L145 - gcm.Seal(ciphertext[:0], nonce, pbytes, nil) - - // base64 the whole thing - tasks[i].Payload = base64.StdEncoding.EncodeToString(append(aesKeyCipher, ciphertext...)) - } - return tasks, nil -} - -type Cluster struct { - Id string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - Memory int64 `json:"memory,omitempty"` - DiskSpace int64 `json:"disk_space,omitempty"` - CpuShare *int32 `json:"cpu_share,omitempty"` -} - -func (w *Worker) ClusterCreate(c Cluster) (Cluster, error) { - var out struct { - C Cluster `json:"cluster"` - } - err := w.clusters().Req("POST", c, &out) - return out.C, err -} - -func (w *Worker) ClusterDelete(id string) error { - return w.clusters(id).Req("DELETE", nil, nil) -} - -func (w *Worker) ClusterToken(id string) (string, error) { - var out struct { - Token string `json:"token"` - } - err := w.clusters(id, "credentials").Req("GET", nil, &out) - return out.Token, err -} diff --git a/vendor/github.com/iron-io/iron_go3/worker/remote.go b/vendor/github.com/iron-io/iron_go3/worker/remote.go deleted file mode 100644 index e9ad0b056..000000000 --- a/vendor/github.com/iron-io/iron_go3/worker/remote.go +++ /dev/null @@ -1,101 +0,0 @@ -package worker - -import ( - "encoding/json" - "flag" - "io" - "io/ioutil" - "os" -) - -var ( - TaskDir string - envFlag string - payloadFlag string - TaskId string - configFlag string -) - -// call this to parse flags before using the other methods. -func ParseFlags() { - flag.StringVar(&TaskDir, "d", "", "task dir") - flag.StringVar(&envFlag, "e", "", "environment type") - flag.StringVar(&payloadFlag, "payload", "", "payload file") - flag.StringVar(&TaskId, "id", "", "task id") - flag.StringVar(&configFlag, "config", "", "config file") - flag.Parse() - if os.Getenv("TASK_ID") != "" { - TaskId = os.Getenv("TASK_ID") - } - if os.Getenv("TASK_DIR") != "" { - TaskDir = os.Getenv("TASK_DIR") - } - if os.Getenv("PAYLOAD_FILE") != "" { - payloadFlag = os.Getenv("PAYLOAD_FILE") - } - if os.Getenv("CONFIG_FILE") != "" { - configFlag = os.Getenv("CONFIG_FILE") - } -} - -func PayloadReader() (io.ReadCloser, error) { - return os.Open(payloadFlag) -} - -func PayloadFromJSON(v interface{}) error { - reader, err := PayloadReader() - if err != nil { - return err - } - defer reader.Close() - return json.NewDecoder(reader).Decode(v) -} - -func PayloadAsString() (string, error) { - reader, err := PayloadReader() - if err != nil { - return "", err - } - defer reader.Close() - - b, err := ioutil.ReadAll(reader) - if err != nil { - return "", err - } - return string(b), nil -} - -func ConfigReader() (io.ReadCloser, error) { - return os.Open(configFlag) -} - -func ConfigFromJSON(v interface{}) error { - reader, err := ConfigReader() - if err != nil { - return err - } - defer reader.Close() - return json.NewDecoder(reader).Decode(v) -} - -func ConfigAsString() (string, error) { - reader, err := ConfigReader() - if err != nil { - return "", err - } - defer reader.Close() - - b, err := ioutil.ReadAll(reader) - if err != nil { - return "", err - } - return string(b), nil -} - -func IronTaskId() string { - return TaskId -} - -func IronTaskDir() string { - return TaskDir -} diff --git a/vendor/github.com/iron-io/iron_go3/worker/worker.go b/vendor/github.com/iron-io/iron_go3/worker/worker.go deleted file mode 100644 index 9ba778244..000000000 --- a/vendor/github.com/iron-io/iron_go3/worker/worker.go +++ /dev/null @@ -1,105 +0,0 @@ -// IronWorker (elastic computing) client library -package worker - -import ( - "time" - - "github.com/iron-io/iron_go3/api" - "github.com/iron-io/iron_go3/config" -) - -type Worker struct { - Settings config.Settings -} - -func New() *Worker { - return &Worker{Settings: config.Config("iron_worker")} -} - -func (w *Worker) codes(s ...string) *api.URL { return api.Action(w.Settings, "codes", s...) } -func (w *Worker) tasks(s ...string) *api.URL { return api.Action(w.Settings, "tasks", s...) } -func (w *Worker) schedules(s ...string) *api.URL { return api.Action(w.Settings, "schedules", s...) } -func (w *Worker) clusters(s ...string) *api.URL { return api.RootAction(w.Settings, "clusters", s...) } - -// exponential sleep between retries, replace this with your own preferred strategy -func sleepBetweenRetries(previousDuration time.Duration) time.Duration { - if previousDuration >= 60*time.Second { - return previousDuration - } - return previousDuration + previousDuration -} - -var GoCodeRunner = []byte(`#!/bin/sh -root() { - while [ $# -gt 0 ]; do - if [ "$1" = "-d" ]; then - printf "%s\n" "$2" - break - fi - done -} -cd "$(root "$@")" -chmod +x worker -./worker "$@" -`) - -// WaitForTask returns a channel that will receive the completed task and is closed afterwards. -// If an error occured during the wait, the channel will be closed. -func (w *Worker) WaitForTask(taskId string) chan TaskInfo { - out := make(chan TaskInfo) - go func() { - defer close(out) - retryDelay := 100 * time.Millisecond - - for { - info, err := w.TaskInfo(taskId) - if err != nil { - return - } - - if info.Status == "queued" || info.Status == "running" { - time.Sleep(retryDelay) - retryDelay = sleepBetweenRetries(retryDelay) - } else { - out <- info - return - } - } - }() - - return out -} - -func (w *Worker) WaitForTaskLog(taskId string) chan []byte { - out := make(chan []byte) - - go func() { - defer close(out) - retryDelay := 100 * time.Millisecond - - for { - log, err := w.TaskLog(taskId) - if err != nil { - e, ok := err.(api.HTTPResponseError) - if ok && e.StatusCode() == 404 { - time.Sleep(retryDelay) - retryDelay = sleepBetweenRetries(retryDelay) - continue - } - return - } - out <- log - return - } - }() - return out -} - -func clamp(value, min, max int) int { - if value < min { - return min - } else if value > max { - return max - } - return value -} diff --git a/vendor/github.com/iron-io/iron_go3/worker/worker_test.go b/vendor/github.com/iron-io/iron_go3/worker/worker_test.go deleted file mode 100644 index 6ffc96bab..000000000 --- a/vendor/github.com/iron-io/iron_go3/worker/worker_test.go +++ /dev/null @@ -1,151 +0,0 @@ -package worker - -import ( - "io/ioutil" - "os" - "testing" - "time" - - // "github.com/iron-io/iron_go/worker" - . "github.com/jeffh/go.bdd" -) - -func TestEverything(*testing.T) { - defer PrintSpecReport() - - Describe("iron.io worker", func() { - w := New() - - It("Prepares the specs by deleting all existing code packages", func() { - codes, err := w.CodePackageList(0, 100) - Expect(err, ToBeNil) - for _, code := range codes { - err = w.CodePackageDelete(code.Id) - Expect(err, ToBeNil) - } - - codes, err = w.CodePackageList(0, 100) - Expect(err, ToBeNil) - Expect(len(codes), ToEqual, 0) - }) - - It("Creates a code package", func() { - tempDir, err := ioutil.TempDir("", "iron-worker") - Expect(err, ToBeNil) - defer os.RemoveAll(tempDir) - - fd, err := os.Create(tempDir + "/main.go") - Expect(err, ToBeNil) - - n, err := fd.WriteString(`package main; func main(){ println("Hello world!") }`) - Expect(err, ToBeNil) - Expect(n, ToEqual, 52) - - Expect(fd.Close(), ToBeNil) - - pkg, err := NewGoCodePackage("GoFun", fd.Name()) - Expect(err, ToBeNil) - - id, err := w.CodePackageUpload(pkg) - Expect(err, ToBeNil) - - info, err := w.CodePackageInfo(id) - Expect(err, ToBeNil) - Expect(info.Id, ToEqual, id) - Expect(info.Name, ToEqual, "GoFun") - Expect(info.Rev, ToEqual, 1) - }) - - It("Queues a Task", func() { - ids, err := w.TaskQueue(Task{CodeName: "GoFun"}) - Expect(err, ToBeNil) - - id := ids[0] - info, err := w.TaskInfo(id) - Expect(err, ToBeNil) - Expect(info.CodeName, ToEqual, "GoFun") - - select { - case info = <-w.WaitForTask(id): - Expect(info.Status, ToEqual, "complete") - case <-time.After(5 * time.Second): - panic("info timed out") - } - - log, err := w.TaskLog(id) - Expect(err, ToBeNil) - Expect(log, ToDeepEqual, []byte("Hello world!\n")) - }) - - It("Cancels a task", func() { - delay := 10 * time.Second - ids, err := w.TaskQueue(Task{CodeName: "GoFun", Delay: &delay}) - Expect(err, ToBeNil) - - id := ids[0] - err = w.TaskCancel(id) - Expect(err, ToBeNil) - - info, err := w.TaskInfo(id) - Expect(info.Status, ToEqual, "cancelled") - }) - - It("Queues a lot of tasks and lists them", func() { - delay := 100 * time.Second - ids, err := w.TaskQueue(Task{CodeName: "GoFun", Delay: &delay}) - Expect(err, ToBeNil) - firstId := ids[0] - time.Sleep(1 * time.Second) - - ids, err = w.TaskQueue(Task{CodeName: "GoFun", Delay: &delay}) - Expect(err, ToBeNil) - secondId := ids[0] - - tasks, err := w.TaskList() - Expect(err, ToBeNil) - - Expect(tasks[0].CreatedAt.After(tasks[1].CreatedAt), ToEqual, true) - Expect(tasks[0].Id, ToEqual, secondId) - Expect(tasks[1].Id, ToEqual, firstId) - }) - - It("Schedules a Task ", func() { - delay := 10 * time.Second - ids, err := w.Schedule(Schedule{ - Name: "ScheduledGoFun", - CodeName: "GoFun", - Payload: "foobar", - Delay: &delay, - }) - - Expect(err, ToBeNil) - id := ids[0] - - info, err := w.ScheduleInfo(id) - Expect(err, ToBeNil) - Expect(info.CodeName, ToEqual, "GoFun") - Expect(info.Status, ToEqual, "scheduled") - }) - - It("Cancels a scheduled task", func() { - delay := 10 * time.Second - ids, err := w.Schedule(Schedule{ - Name: "ScheduledGoFun", - CodeName: "GoFun", - Payload: "foobar", - Delay: &delay, - }) - - Expect(err, ToBeNil) - id := ids[0] - - err = w.ScheduleCancel(id) - Expect(err, ToBeNil) - - info, err := w.ScheduleInfo(id) - Expect(err, ToBeNil) - Expect(info.CodeName, ToEqual, "GoFun") - Expect(info.Status, ToEqual, "cancelled") - }) - }) -} diff --git a/vendor/github.com/mattes/migrate/.gitignore b/vendor/github.com/mattes/migrate/.gitignore new file mode 100644 index 000000000..938901207 --- /dev/null +++ b/vendor/github.com/mattes/migrate/.gitignore @@ -0,0 +1,6 @@ +.DS_Store +cli/build +cli/cli +cli/migrate +.coverage +.godoc.pid diff --git a/vendor/github.com/mattes/migrate/.travis.yml b/vendor/github.com/mattes/migrate/.travis.yml new file mode 100644 index 000000000..9535b3658 --- /dev/null +++ b/vendor/github.com/mattes/migrate/.travis.yml @@ -0,0 +1,62 @@ +language: go +sudo: required + +go: + - 1.7 + - 1.8 + - 1.9 + +env: + - MIGRATE_TEST_CONTAINER_BOOT_DELAY=10 + +# TODO: https://docs.docker.com/engine/installation/linux/ubuntu/ +# pre-provision with travis docker setup and pin down docker version in install step +services: + - docker + +install: + - make deps + - (cd $GOPATH/src/github.com/docker/docker && git fetch --all --tags --prune && git checkout v17.05.0-ce) + - sudo apt-get update && sudo apt-get install docker-ce=17.05.0* + - go get github.com/mattn/goveralls + +script: + - make test + +after_success: + - goveralls -service=travis-ci -coverprofile .coverage/combined.txt + - make list-external-deps > dependency_tree.txt && cat dependency_tree.txt + +before_deploy: + - make build-cli + - gem install --no-ri --no-rdoc fpm + - fpm -s dir -t deb -n migrate -v "$(git describe --tags 2>/dev/null | cut -c 2-)" --license MIT -m matthias.kadenbach@gmail.com --url https://github.com/mattes/migrate --description='Database migrations' -a amd64 -p migrate.$(git describe --tags 2>/dev/null | cut -c 2-).deb --deb-no-default-config-files -f -C cli/build migrate.linux-amd64=/usr/bin/migrate + +deploy: + - provider: releases + api_key: + secure: EFow50BI448HVb/uQ1Kk2Kq0xzmwIYq3V67YyymXIuqSCodvXEsMiBPUoLrxEknpPEIc67LEQTNdfHBgvyHk6oRINWAfie+7pr5tKrpOTF9ghyxoN1PlO8WKQCqwCvGMBCnc5ur5rvzp0bqfpV2rs5q9/nngy3kBuEvs12V7iho= + skip_cleanup: true + on: + go: 1.8 + repo: mattes/migrate + tags: true + file: + - cli/build/migrate.linux-amd64.tar.gz + - cli/build/migrate.darwin-amd64.tar.gz + - cli/build/migrate.windows-amd64.exe.tar.gz + - cli/build/sha256sum.txt + - dependency_tree.txt + - provider: packagecloud + repository: migrate + username: mattes + token: + secure: RiHJ/+J9DvXUah/APYdWySWZ5uOOISYJ0wS7xddc7/BNStRVjzFzvJ9zmb67RkyZZrvGuVjPiL4T8mtDyCJCj47RmU/56wPdEHbar/FjsiUCgwvR19RlulkgbV4okBCePbwzMw6HNHRp14TzfQCPtnN4kef0lOI4gZJkImN7rtQ= + dist: ubuntu/xenial + package_glob: '*.deb' + skip_cleanup: true + on: + go: 1.8 + repo: mattes/migrate + tags: true + diff --git a/vendor/github.com/mattes/migrate/CONTRIBUTING.md b/vendor/github.com/mattes/migrate/CONTRIBUTING.md new file mode 100644 index 000000000..fcf82a42e --- /dev/null +++ b/vendor/github.com/mattes/migrate/CONTRIBUTING.md @@ -0,0 +1,22 @@ +# Development, Testing and Contributing + + 1. Make sure you have a running Docker daemon + (Install for [MacOS](https://docs.docker.com/docker-for-mac/)) + 2. Fork this repo and `git clone` somewhere to `$GOPATH/src/github.com/%you%/migrate` + 3. `make rewrite-import-paths` to update imports to your local fork + 4. Confirm tests are working: `make test-short` + 5. Write awesome code ... + 6. `make test` to run all tests against all database versions + 7. `make restore-import-paths` to restore import paths + 8. Push code and open Pull Request + +Some more helpful commands: + + * You can specify which database/ source tests to run: + `make test-short SOURCE='file go-bindata' DATABASE='postgres cassandra'` + * After `make test`, run `make html-coverage` which opens a shiny test coverage overview. + * Missing imports? `make deps` + * `make build-cli` builds the CLI in directory `cli/build/`. + * `make list-external-deps` lists all external dependencies for each package + * `make docs && make open-docs` opens godoc in your browser, `make kill-docs` kills the godoc server. + Repeatedly call `make docs` to refresh the server. diff --git a/vendor/github.com/mattes/migrate/FAQ.md b/vendor/github.com/mattes/migrate/FAQ.md new file mode 100644 index 000000000..f8bb9a85b --- /dev/null +++ b/vendor/github.com/mattes/migrate/FAQ.md @@ -0,0 +1,67 @@ +# FAQ + +#### How is the code base structured? + ``` + / package migrate (the heart of everything) + /cli the CLI wrapper + /database database driver and sub directories have the actual driver implementations + /source source driver and sub directories have the actual driver implementations + ``` + +#### Why is there no `source/driver.go:Last()`? + It's not needed. And unless the source has a "native" way to read a directory in reversed order, + it might be expensive to do a full directory scan in order to get the last element. + +#### What is a NilMigration? NilVersion? + NilMigration defines a migration without a body. NilVersion is defined as const -1. + +#### What is the difference between uint(version) and int(targetVersion)? + version refers to an existing migration version coming from a source and therefor can never be negative. + targetVersion can either be a version OR represent a NilVersion, which equals -1. + +#### What's the difference between Next/Previous and Up/Down? + ``` + 1_first_migration.up.extension next -> 2_second_migration.up.extension ... + 1_first_migration.down.extension <- previous 2_second_migration.down.extension ... + ``` + +#### Why two separate files (up and down) for a migration? + It makes all of our lives easier. No new markup/syntax to learn for users + and existing database utility tools continue to work as expected. + +#### How many migrations can migrate handle? + Whatever the maximum positive signed integer value is for your platform. + For 32bit it would be 2,147,483,647 migrations. Migrate only keeps references to + the currently run and pre-fetched migrations in memory. Please note that some + source drivers need to do build a full "directory" tree first, which puts some + heat on the memory consumption. + +#### Are the table tests in migrate_test.go bloated? + Yes and no. There are duplicate test cases for sure but they don't hurt here. In fact + the tests are very visual now and might help new users understand expected behaviors quickly. + Migrate from version x to y and y is the last migration? Just check out the test for + that particular case and know what's going on instantly. + +#### What is Docker being used for? + Only for testing. See [testing/docker.go](testing/docker.go) + +#### Why not just use docker-compose? + It doesn't give us enough runtime control for testing. We want to be able to bring up containers fast + and whenever we want, not just once at the beginning of all tests. + +#### Can I maintain my driver in my own repository? + Yes, technically thats possible. We want to encourage you to contribute your driver to this respository though. + The driver's functionality is dictated by migrate's interfaces. That means there should really + just be one driver for a database/ source. We want to prevent a future where several drivers doing the exact same thing, + just implemented a bit differently, co-exist somewhere on Github. If users have to do research first to find the + "best" available driver for a database in order to get started, we would have failed as an open source community. + +#### Can I mix multiple sources during a batch of migrations? + No. + +#### What does "dirty" database mean? + Before a migration runs, each database sets a dirty flag. Execution stops if a migration fails and the dirty state persists, + which prevents attempts to run more migrations on top of a failed migration. You need to manually fix the error + and then "force" the expected version. + + diff --git a/vendor/github.com/mattes/migrate/LICENSE b/vendor/github.com/mattes/migrate/LICENSE new file mode 100644 index 000000000..62efa3670 --- /dev/null +++ b/vendor/github.com/mattes/migrate/LICENSE @@ -0,0 +1,23 @@ +The MIT License (MIT) + +Copyright (c) 2016 Matthias Kadenbach + +https://github.com/mattes/migrate + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mattes/migrate/MIGRATIONS.md b/vendor/github.com/mattes/migrate/MIGRATIONS.md new file mode 100644 index 000000000..fbefb9278 --- /dev/null +++ b/vendor/github.com/mattes/migrate/MIGRATIONS.md @@ -0,0 +1,81 @@ +# Migrations + +## Migration Filename Format + +A single logical migration is represented as two separate migration files, one +to migrate "up" to the specified version from the previous version, and a second +to migrate back "down" to the previous version. These migrations can be provided +by any one of the supported [migration sources](./README.md#migration-sources). + +The ordering and direction of the migration files is determined by the filenames +used for them. `migrate` expects the filenames of migrations to have the format: + + {version}_{title}.up.{extension} + {version}_{title}.down.{extension} + +The `title` of each migration is unused, and is only for readability. Similarly, +the `extension` of the migration files is not checked by the library, and should +be an appropriate format for the database in use (`.sql` for SQL variants, for +instance). + +Versions of migrations may be represented as any 64 bit unsigned integer. +All migrations are applied upward in order of increasing version number, and +downward by decreasing version number. + +Common versioning schemes include incrementing integers: + + 1_initialize_schema.down.sql + 1_initialize_schema.up.sql + 2_add_table.down.sql + 2_add_table.up.sql + ... + +Or timestamps at an appropriate resolution: + + 1500360784_initialize_schema.down.sql + 1500360784_initialize_schema.up.sql + 1500445949_add_table.down.sql + 1500445949_add_table.up.sql + ... + +But any scheme resulting in distinct, incrementing integers as versions is valid. + +It is suggested that the version number of corresponding `up` and `down` migration +files be equivalent for clarity, but they are allowed to differ so long as the +relative ordering of the migrations is preserved. + +The migration files are permitted to be empty, so in the event that a migration +is a no-op or is irreversible, it is recommended to still include both migration +files, and either leaving them empty or adding a comment as appropriate. + +## Migration Content Format + +The format of the migration files themselves varies between database systems. +Different databases have different semantics around schema changes and when and +how they are allowed to occur (for instance, if schema changes can occur within +a transaction). + +As such, the `migrate` library has little to no checking around the format of +migration sources. The migration files are generally processed directly by the +drivers as raw operations. + +## Reversibility of Migrations + +Best practice for writing schema migration is that all migrations should be +reversible. It should in theory be possible for run migrations down and back up +through any and all versions with the state being fully cleaned and recreated +by doing so. + +By adhering to this recommended practice, development and deployment of new code +is cleaner and easier (cleaning database state for a new feature should be as +easy as migrating down to a prior version, and back up to the latest). + +As opposed to some other migration libraries, `migrate` represents up and down +migrations as separate files. This prevents any non-standard file syntax from +being introduced which may result in unintended behavior or errors, depending +on what database is processing the file. + +While it is technically possible for an up or down migration to exist on its own +without an equivalently versioned counterpart, it is strongly recommended to +always include a down migration which cleans up the state of the corresponding +up migration. diff --git a/vendor/github.com/mattes/migrate/Makefile b/vendor/github.com/mattes/migrate/Makefile new file mode 100644 index 000000000..e36394bed --- /dev/null +++ b/vendor/github.com/mattes/migrate/Makefile @@ -0,0 +1,123 @@ +SOURCE ?= file go-bindata github aws-s3 google-cloud-storage +DATABASE ?= postgres mysql redshift cassandra sqlite3 spanner cockroachdb clickhouse +VERSION ?= $(shell git describe --tags 2>/dev/null | cut -c 2-) +TEST_FLAGS ?= +REPO_OWNER ?= $(shell cd .. && basename "$$(pwd)") + + +build-cli: clean + -mkdir ./cli/build + cd ./cli && CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -a -o build/migrate.linux-amd64 -ldflags='-X main.Version=$(VERSION)' -tags '$(DATABASE) $(SOURCE)' . + cd ./cli && CGO_ENABLED=1 GOOS=darwin GOARCH=amd64 go build -a -o build/migrate.darwin-amd64 -ldflags='-X main.Version=$(VERSION)' -tags '$(DATABASE) $(SOURCE)' . + cd ./cli && CGO_ENABLED=1 GOOS=windows GOARCH=amd64 go build -a -o build/migrate.windows-amd64.exe -ldflags='-X main.Version=$(VERSION)' -tags '$(DATABASE) $(SOURCE)' . + cd ./cli/build && find . -name 'migrate*' | xargs -I{} tar czf {}.tar.gz {} + cd ./cli/build && shasum -a 256 * > sha256sum.txt + cat ./cli/build/sha256sum.txt + + +clean: + -rm -r ./cli/build + + +test-short: + make test-with-flags --ignore-errors TEST_FLAGS='-short' + + +test: + @-rm -r .coverage + @mkdir .coverage + make test-with-flags TEST_FLAGS='-v -race -covermode atomic -coverprofile .coverage/_$$(RAND).txt -bench=. -benchmem' + @echo 'mode: atomic' > .coverage/combined.txt + @cat .coverage/*.txt | grep -v 'mode: atomic' >> .coverage/combined.txt + + +test-with-flags: + @echo SOURCE: $(SOURCE) + @echo DATABASE: $(DATABASE) + + @go test $(TEST_FLAGS) . + @go test $(TEST_FLAGS) ./cli/... + @go test $(TEST_FLAGS) ./testing/... + + @echo -n '$(SOURCE)' | tr -s ' ' '\n' | xargs -I{} go test $(TEST_FLAGS) ./source/{} + @go test $(TEST_FLAGS) ./source/testing/... + @go test $(TEST_FLAGS) ./source/stub/... + + @echo -n '$(DATABASE)' | tr -s ' ' '\n' | xargs -I{} go test $(TEST_FLAGS) ./database/{} + @go test $(TEST_FLAGS) ./database/testing/... + @go test $(TEST_FLAGS) ./database/stub/... + + +kill-orphaned-docker-containers: + docker rm -f $(shell docker ps -aq --filter label=migrate_test) + + +html-coverage: + go tool cover -html=.coverage/combined.txt + + +deps: + -go get -v -u ./... + -go test -v -i ./... + # TODO: why is this not being fetched with the command above? + -go get -u github.com/fsouza/fake-gcs-server/fakestorage + + +list-external-deps: + $(call external_deps,'.') + $(call external_deps,'./cli/...') + $(call external_deps,'./testing/...') + + $(foreach v, $(SOURCE), $(call external_deps,'./source/$(v)/...')) + $(call external_deps,'./source/testing/...') + $(call external_deps,'./source/stub/...') + + $(foreach v, $(DATABASE), $(call external_deps,'./database/$(v)/...')) + $(call external_deps,'./database/testing/...') + $(call external_deps,'./database/stub/...') + + +restore-import-paths: + find . -name '*.go' -type f -execdir sed -i '' s%\"github.com/$(REPO_OWNER)/migrate%\"github.com/mattes/migrate%g '{}' \; + + +rewrite-import-paths: + find . -name '*.go' -type f -execdir sed -i '' s%\"github.com/mattes/migrate%\"github.com/$(REPO_OWNER)/migrate%g '{}' \; + + +# example: fswatch -0 --exclude .godoc.pid --event Updated . | xargs -0 -n1 -I{} make docs +docs: + -make kill-docs + nohup godoc -play -http=127.0.0.1:6064 /dev/null 2>&1 & echo $$! > .godoc.pid + cat .godoc.pid + + +kill-docs: + @cat .godoc.pid + kill -9 $$(cat .godoc.pid) + rm .godoc.pid + + +open-docs: + open http://localhost:6064/pkg/github.com/$(REPO_OWNER)/migrate + + +# example: make release V=0.0.0 +release: + git tag v$(V) + @read -p "Press enter to confirm and push to origin ..." && git push origin v$(V) + + +define external_deps + @echo '-- $(1)'; go list -f '{{join .Deps "\n"}}' $(1) | grep -v github.com/$(REPO_OWNER)/migrate | xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}' + +endef + + +.PHONY: build-cli clean test-short test test-with-flags deps html-coverage \ + restore-import-paths rewrite-import-paths list-external-deps release \ + docs kill-docs open-docs kill-orphaned-docker-containers + +SHELL = /bin/bash +RAND = $(shell echo $$RANDOM) + diff --git a/vendor/github.com/mattes/migrate/README.md b/vendor/github.com/mattes/migrate/README.md new file mode 100644 index 000000000..cbc19d88c --- /dev/null +++ b/vendor/github.com/mattes/migrate/README.md @@ -0,0 +1,140 @@ +[![Build Status](https://travis-ci.org/mattes/migrate.svg?branch=master)](https://travis-ci.org/mattes/migrate) +[![GoDoc](https://godoc.org/github.com/mattes/migrate?status.svg)](https://godoc.org/github.com/mattes/migrate) +[![Coverage Status](https://coveralls.io/repos/github/mattes/migrate/badge.svg?branch=v3.0-prev)](https://coveralls.io/github/mattes/migrate?branch=v3.0-prev) +[![packagecloud.io](https://img.shields.io/badge/deb-packagecloud.io-844fec.svg)](https://packagecloud.io/mattes/migrate?filter=debs) + +# migrate + +__Database migrations written in Go. Use as [CLI](#cli-usage) or import as [library](#use-in-your-go-project).__ + + * Migrate reads migrations from [sources](#migration-sources) + and applies them in correct order to a [database](#databases). + * Drivers are "dumb", migrate glues everything together and makes sure the logic is bulletproof. + (Keeps the drivers lightweight, too.) + * Database drivers don't assume things or try to correct user input. When in doubt, fail. + + +Looking for [v1](https://github.com/mattes/migrate/tree/v1)? + + +## Databases + +Database drivers run migrations. [Add a new database?](database/driver.go) + + * [PostgreSQL](database/postgres) + * [Redshift](database/redshift) + * [Ql](database/ql) + * [Cassandra](database/cassandra) + * [SQLite](database/sqlite3) + * [MySQL/ MariaDB](database/mysql) + * [Neo4j](database/neo4j) ([todo #167](https://github.com/mattes/migrate/issues/167)) + * [MongoDB](database/mongodb) ([todo #169](https://github.com/mattes/migrate/issues/169)) + * [CrateDB](database/crate) ([todo #170](https://github.com/mattes/migrate/issues/170)) + * [Shell](database/shell) ([todo #171](https://github.com/mattes/migrate/issues/171)) + * [Google Cloud Spanner](database/spanner) + * [CockroachDB](database/cockroachdb) + * [ClickHouse](database/clickhouse) + + +## Migration Sources + +Source drivers read migrations from local or remote sources. [Add a new source?](source/driver.go) + + * [Filesystem](source/file) - read from fileystem (always included) + * [Go-Bindata](source/go-bindata) - read from embedded binary data ([jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata)) + * [Github](source/github) - read from remote Github repositories + * [AWS S3](source/aws-s3) - read from Amazon Web Services S3 + * [Google Cloud Storage](source/google-cloud-storage) - read from Google Cloud Platform Storage + + + +## CLI usage + + * Simple wrapper around this library. + * Handles ctrl+c (SIGINT) gracefully. + * No config search paths, no config files, no magic ENV var injections. + +__[CLI Documentation](cli)__ + +([brew todo #156](https://github.com/mattes/migrate/issues/156)) + +``` +$ brew install migrate --with-postgres +$ migrate -database postgres://localhost:5432/database up 2 +``` + + +## Use in your Go project + + * API is stable and frozen for this release (v3.x). + * Package migrate has no external dependencies. + * Only import the drivers you need. + (check [dependency_tree.txt](https://github.com/mattes/migrate/releases) for each driver) + * To help prevent database corruptions, it supports graceful stops via `GracefulStop chan bool`. + * Bring your own logger. + * Uses `io.Reader` streams internally for low memory overhead. + * Thread-safe and no goroutine leaks. + +__[Go Documentation](https://godoc.org/github.com/mattes/migrate)__ + +```go +import ( + "github.com/mattes/migrate" + _ "github.com/mattes/migrate/database/postgres" + _ "github.com/mattes/migrate/source/github" +) + +func main() { + m, err := migrate.New( + "github://mattes:personal-access-token@mattes/migrate_test", + "postgres://localhost:5432/database?sslmode=enable") + m.Steps(2) +} +``` + +Want to use an existing database client? + +```go +import ( + "database/sql" + _ "github.com/lib/pq" + "github.com/mattes/migrate" + "github.com/mattes/migrate/database/postgres" + _ "github.com/mattes/migrate/source/file" +) + +func main() { + db, err := sql.Open("postgres", "postgres://localhost:5432/database?sslmode=enable") + driver, err := postgres.WithInstance(db, &postgres.Config{}) + m, err := migrate.NewWithDatabaseInstance( + "file:///migrations", + "postgres", driver) + m.Steps(2) +} +``` + +## Migration files + +Each migration has an up and down migration. [Why?](FAQ.md#why-two-separate-files-up-and-down-for-a-migration) + +``` +1481574547_create_users_table.up.sql +1481574547_create_users_table.down.sql +``` + +[Best practices: How to write migrations.](MIGRATIONS.md) + + + +## Development and Contributing + +Yes, please! [`Makefile`](Makefile) is your friend, +read the [development guide](CONTRIBUTING.md). + +Also have a look at the [FAQ](FAQ.md). + + + +--- + +Looking for alternatives? [https://awesome-go.com/#database](https://awesome-go.com/#database). diff --git a/vendor/github.com/mattes/migrate/cli/README.md b/vendor/github.com/mattes/migrate/cli/README.md new file mode 100644 index 000000000..c0886d5a7 --- /dev/null +++ b/vendor/github.com/mattes/migrate/cli/README.md @@ -0,0 +1,113 @@ +# migrate CLI + +## Installation + +#### With Go toolchain + +``` +$ go get -u -d github.com/mattes/migrate/cli github.com/lib/pq +$ go build -tags 'postgres' -o /usr/local/bin/migrate github.com/mattes/migrate/cli +``` + +Note: This example builds the cli which will only work with postgres. In order +to build the cli for use with other databases, replace the `postgres` build tag +with the appropriate database tag(s) for the databases desired. The tags +correspond to the names of the sub-packages underneath the +[`database`](../database) package. + +#### MacOS + +([todo #156](https://github.com/mattes/migrate/issues/156)) + +``` +$ brew install migrate --with-postgres +``` + +#### Linux (*.deb package) + +``` +$ curl -L https://packagecloud.io/mattes/migrate/gpgkey | apt-key add - +$ echo "deb https://packagecloud.io/mattes/migrate/ubuntu/ xenial main" > /etc/apt/sources.list.d/migrate.list +$ apt-get update +$ apt-get install -y migrate +``` + +#### Download pre-build binary (Windows, MacOS, or Linux) + +[Release Downloads](https://github.com/mattes/migrate/releases) + +``` +$ curl -L https://github.com/mattes/migrate/releases/download/$version/migrate.$platform-amd64.tar.gz | tar xvz +``` + + + +## Usage + +``` +$ migrate -help +Usage: migrate OPTIONS COMMAND [arg...] + migrate [ -version | -help ] + +Options: + -source Location of the migrations (driver://url) + -path Shorthand for -source=file://path + -database Run migrations against this database (driver://url) + -prefetch N Number of migrations to load in advance before executing (default 10) + -lock-timeout N Allow N seconds to acquire database lock (default 15) + -verbose Print verbose logging + -version Print version + -help Print usage + +Commands: + create [-ext E] [-dir D] NAME + Create a set of timestamped up/down migrations titled NAME, in directory D with extension E + goto V Migrate to version V + up [N] Apply all or N up migrations + down [N] Apply all or N down migrations + drop Drop everyting inside database + force V Set version V but don't run migration (ignores dirty state) + version Print current migration version +``` + + +So let's say you want to run the first two migrations + +``` +$ migrate -database postgres://localhost:5432/database up 2 +``` + +If your migrations are hosted on github + +``` +$ migrate -source github://mattes:personal-access-token@mattes/migrate_test \ + -database postgres://localhost:5432/database down 2 +``` + +The CLI will gracefully stop at a safe point when SIGINT (ctrl+c) is received. +Send SIGKILL for immediate halt. + + + +## Reading CLI arguments from somewhere else + +##### ENV variables + +``` +$ migrate -database "$MY_MIGRATE_DATABASE" +``` + +##### JSON files + +Check out https://stedolan.github.io/jq/ + +``` +$ migrate -database "$(cat config.json | jq '.database')" +``` + +##### YAML files + +```` +$ migrate -database "$(cat config/database.yml | ruby -ryaml -e "print YAML.load(STDIN.read)['database']")" +$ migrate -database "$(cat config/database.yml | python -c 'import yaml,sys;print yaml.safe_load(sys.stdin)["database"]')" +``` diff --git a/vendor/github.com/mattes/migrate/cli/build_aws-s3.go b/vendor/github.com/mattes/migrate/cli/build_aws-s3.go new file mode 100644 index 000000000..766fd5663 --- /dev/null +++ b/vendor/github.com/mattes/migrate/cli/build_aws-s3.go @@ -0,0 +1,7 @@ +// +build aws-s3 + +package main + +import ( + _ "github.com/mattes/migrate/source/aws-s3" +) diff --git a/vendor/github.com/mattes/migrate/cli/build_cassandra.go b/vendor/github.com/mattes/migrate/cli/build_cassandra.go new file mode 100644 index 000000000..319b52d2a --- /dev/null +++ b/vendor/github.com/mattes/migrate/cli/build_cassandra.go @@ -0,0 +1,7 @@ +// +build cassandra + +package main + +import ( + _ "github.com/mattes/migrate/database/cassandra" +) diff --git a/vendor/github.com/mattes/migrate/cli/build_clickhouse.go b/vendor/github.com/mattes/migrate/cli/build_clickhouse.go new file mode 100644 index 000000000..c9175e280 --- /dev/null +++ b/vendor/github.com/mattes/migrate/cli/build_clickhouse.go @@ -0,0 +1,8 @@ +// +build clickhouse + +package main + +import ( + _ "github.com/kshvakov/clickhouse" + _ "github.com/mattes/migrate/database/clickhouse" +) diff --git a/vendor/github.com/mattes/migrate/cli/build_cockroachdb.go b/vendor/github.com/mattes/migrate/cli/build_cockroachdb.go new file mode 100644 index 000000000..e5fdf073e --- /dev/null +++ b/vendor/github.com/mattes/migrate/cli/build_cockroachdb.go @@ -0,0 +1,7 @@ +// +build cockroachdb + +package main + +import ( + _ "github.com/mattes/migrate/database/cockroachdb" +) diff --git a/vendor/github.com/mattes/migrate/cli/build_github.go b/vendor/github.com/mattes/migrate/cli/build_github.go new file mode 100644 index 000000000..9c813b46c --- /dev/null +++ b/vendor/github.com/mattes/migrate/cli/build_github.go @@ -0,0 +1,7 @@ +// +build github + +package main + +import ( + _ "github.com/mattes/migrate/source/github" +) diff --git a/vendor/github.com/mattes/migrate/cli/build_go-bindata.go b/vendor/github.com/mattes/migrate/cli/build_go-bindata.go new file mode 100644 index 000000000..8a6a89349 --- /dev/null +++ b/vendor/github.com/mattes/migrate/cli/build_go-bindata.go @@ -0,0 +1,7 @@ +// +build go-bindata + +package main + +import ( + _ "github.com/mattes/migrate/source/go-bindata" +) diff --git a/vendor/github.com/mattes/migrate/cli/build_google-cloud-storage.go b/vendor/github.com/mattes/migrate/cli/build_google-cloud-storage.go new file mode 100644 index 000000000..04f314338 --- /dev/null +++ b/vendor/github.com/mattes/migrate/cli/build_google-cloud-storage.go @@ -0,0 +1,7 @@ +// +build google-cloud-storage + +package main + +import ( + _ "github.com/mattes/migrate/source/google-cloud-storage" +) diff --git a/vendor/github.com/mattes/migrate/cli/build_mysql.go b/vendor/github.com/mattes/migrate/cli/build_mysql.go new file mode 100644 index 000000000..177766f5e --- /dev/null +++ b/vendor/github.com/mattes/migrate/cli/build_mysql.go @@ -0,0 +1,7 @@ +// +build mysql + +package main + +import ( + _ "github.com/mattes/migrate/database/mysql" +) diff --git a/vendor/github.com/mattes/migrate/cli/build_postgres.go b/vendor/github.com/mattes/migrate/cli/build_postgres.go new file mode 100644 index 000000000..87f6be757 --- /dev/null +++ b/vendor/github.com/mattes/migrate/cli/build_postgres.go @@ -0,0 +1,7 @@ +// +build postgres + +package main + +import ( + _ "github.com/mattes/migrate/database/postgres" +) diff --git a/vendor/github.com/mattes/migrate/cli/build_ql.go b/vendor/github.com/mattes/migrate/cli/build_ql.go new file mode 100644 index 000000000..cd56ef958 --- /dev/null +++ b/vendor/github.com/mattes/migrate/cli/build_ql.go @@ -0,0 +1,7 @@ +// +build ql + +package main + +import ( + _ "github.com/mattes/migrate/database/ql" +) diff --git a/vendor/github.com/mattes/migrate/cli/build_redshift.go b/vendor/github.com/mattes/migrate/cli/build_redshift.go new file mode 100644 index 000000000..8153d0aa3 --- /dev/null +++ b/vendor/github.com/mattes/migrate/cli/build_redshift.go @@ -0,0 +1,7 @@ +// +build redshift + +package main + +import ( + _ "github.com/mattes/migrate/database/redshift" +) diff --git a/vendor/github.com/mattes/migrate/cli/build_spanner.go b/vendor/github.com/mattes/migrate/cli/build_spanner.go new file mode 100644 index 000000000..7223d820b --- /dev/null +++ b/vendor/github.com/mattes/migrate/cli/build_spanner.go @@ -0,0 +1,7 @@ +// +build spanner + +package main + +import ( + _ "github.com/mattes/migrate/database/spanner" +) diff --git a/vendor/github.com/mattes/migrate/cli/build_sqlite3.go b/vendor/github.com/mattes/migrate/cli/build_sqlite3.go new file mode 100644 index 000000000..48ae8ebc2 --- /dev/null +++ b/vendor/github.com/mattes/migrate/cli/build_sqlite3.go @@ -0,0 +1,7 @@ +// +build sqlite3 + +package main + +import ( + _ "github.com/mattes/migrate/database/sqlite3" +) diff --git a/vendor/github.com/mattes/migrate/cli/commands.go b/vendor/github.com/mattes/migrate/cli/commands.go new file mode 100644 index 000000000..703896dc1 --- /dev/null +++ b/vendor/github.com/mattes/migrate/cli/commands.go @@ -0,0 +1,96 @@ +package main + +import ( + "github.com/mattes/migrate" + _ "github.com/mattes/migrate/database/stub" // TODO remove again + _ "github.com/mattes/migrate/source/file" + "os" + "fmt" +) + +func createCmd(dir string, timestamp int64, name string, ext string) { + base := fmt.Sprintf("%v%v_%v.", dir, timestamp, name) + os.MkdirAll(dir, os.ModePerm) + createFile(base + "up" + ext) + createFile(base + "down" + ext) +} + +func createFile(fname string) { + if _, err := os.Create(fname); err != nil { + log.fatalErr(err) + } +} + +func gotoCmd(m *migrate.Migrate, v uint) { + if err := m.Migrate(v); err != nil { + if err != migrate.ErrNoChange { + log.fatalErr(err) + } else { + log.Println(err) + } + } +} + +func upCmd(m *migrate.Migrate, limit int) { + if limit >= 0 { + if err := m.Steps(limit); err != nil { + if err != migrate.ErrNoChange { + log.fatalErr(err) + } else { + log.Println(err) + } + } + } else { + if err := m.Up(); err != nil { + if err != migrate.ErrNoChange { + log.fatalErr(err) + } else { + log.Println(err) + } + } + } +} + +func downCmd(m *migrate.Migrate, limit int) { + if limit >= 0 { + if err := m.Steps(-limit); err != nil { + if err != migrate.ErrNoChange { + log.fatalErr(err) + } else { + log.Println(err) + } + } + } else { + if err := m.Down(); err != nil { + if err != migrate.ErrNoChange { + log.fatalErr(err) + } else { + log.Println(err) + } + } + } +} + +func dropCmd(m *migrate.Migrate) { + if err := m.Drop(); err != nil { + log.fatalErr(err) + } +} + +func forceCmd(m *migrate.Migrate, v int) { + if err := m.Force(v); err != nil { + log.fatalErr(err) + } +} + +func versionCmd(m *migrate.Migrate) { + v, dirty, err := m.Version() + if err != nil { + log.fatalErr(err) + } + if dirty { + log.Printf("%v (dirty)\n", v) + } else { + log.Println(v) + } +} diff --git a/vendor/github.com/mattes/migrate/cli/examples/Dockerfile b/vendor/github.com/mattes/migrate/cli/examples/Dockerfile new file mode 100644 index 000000000..740f951f8 --- /dev/null +++ b/vendor/github.com/mattes/migrate/cli/examples/Dockerfile @@ -0,0 +1,12 @@ +FROM ubuntu:xenial + +RUN apt-get update && \ + apt-get install -y curl apt-transport-https + +RUN curl -L https://packagecloud.io/mattes/migrate/gpgkey | apt-key add - && \ + echo "deb https://packagecloud.io/mattes/migrate/ubuntu/ xenial main" > /etc/apt/sources.list.d/migrate.list && \ + apt-get update && \ + apt-get install -y migrate + +RUN migrate -version + diff --git a/vendor/github.com/mattes/migrate/cli/log.go b/vendor/github.com/mattes/migrate/cli/log.go new file mode 100644 index 000000000..a119d3481 --- /dev/null +++ b/vendor/github.com/mattes/migrate/cli/log.go @@ -0,0 +1,45 @@ +package main + +import ( + "fmt" + logpkg "log" + "os" +) + +type Log struct { + verbose bool +} + +func (l *Log) Printf(format string, v ...interface{}) { + if l.verbose { + logpkg.Printf(format, v...) + } else { + fmt.Fprintf(os.Stderr, format, v...) + } +} + +func (l *Log) Println(args ...interface{}) { + if l.verbose { + logpkg.Println(args...) + } else { + fmt.Fprintln(os.Stderr, args...) + } +} + +func (l *Log) Verbose() bool { + return l.verbose +} + +func (l *Log) fatalf(format string, v ...interface{}) { + l.Printf(format, v...) + os.Exit(1) +} + +func (l *Log) fatal(args ...interface{}) { + l.Println(args...) + os.Exit(1) +} + +func (l *Log) fatalErr(err error) { + l.fatal("error:", err) +} diff --git a/vendor/github.com/mattes/migrate/cli/main.go b/vendor/github.com/mattes/migrate/cli/main.go new file mode 100644 index 000000000..4c727a972 --- /dev/null +++ b/vendor/github.com/mattes/migrate/cli/main.go @@ -0,0 +1,237 @@ +package main + +import ( + "flag" + "fmt" + "os" + "os/signal" + "strconv" + "strings" + "syscall" + "time" + + "github.com/mattes/migrate" +) + +// set main log +var log = &Log{} + +func main() { + helpPtr := flag.Bool("help", false, "") + versionPtr := flag.Bool("version", false, "") + verbosePtr := flag.Bool("verbose", false, "") + prefetchPtr := flag.Uint("prefetch", 10, "") + lockTimeoutPtr := flag.Uint("lock-timeout", 15, "") + pathPtr := flag.String("path", "", "") + databasePtr := flag.String("database", "", "") + sourcePtr := flag.String("source", "", "") + + flag.Usage = func() { + fmt.Fprint(os.Stderr, + `Usage: migrate OPTIONS COMMAND [arg...] + migrate [ -version | -help ] + +Options: + -source Location of the migrations (driver://url) + -path Shorthand for -source=file://path + -database Run migrations against this database (driver://url) + -prefetch N Number of migrations to load in advance before executing (default 10) + -lock-timeout N Allow N seconds to acquire database lock (default 15) + -verbose Print verbose logging + -version Print version + -help Print usage + +Commands: + create [-ext E] [-dir D] NAME + Create a set of timestamped up/down migrations titled NAME, in directory D with extension E + goto V Migrate to version V + up [N] Apply all or N up migrations + down [N] Apply all or N down migrations + drop Drop everyting inside database + force V Set version V but don't run migration (ignores dirty state) + version Print current migration version +`) + } + + flag.Parse() + + // initialize logger + log.verbose = *verbosePtr + + // show cli version + if *versionPtr { + fmt.Fprintln(os.Stderr, Version) + os.Exit(0) + } + + // show help + if *helpPtr { + flag.Usage() + os.Exit(0) + } + + // translate -path into -source if given + if *sourcePtr == "" && *pathPtr != "" { + *sourcePtr = fmt.Sprintf("file://%v", *pathPtr) + } + + // initialize migrate + // don't catch migraterErr here and let each command decide + // how it wants to handle the error + migrater, migraterErr := migrate.New(*sourcePtr, *databasePtr) + defer func() { + if migraterErr == nil { + migrater.Close() + } + }() + if migraterErr == nil { + migrater.Log = log + migrater.PrefetchMigrations = *prefetchPtr + migrater.LockTimeout = time.Duration(int64(*lockTimeoutPtr)) * time.Second + + // handle Ctrl+c + signals := make(chan os.Signal, 1) + signal.Notify(signals, syscall.SIGINT) + go func() { + for range signals { + log.Println("Stopping after this running migration ...") + migrater.GracefulStop <- true + return + } + }() + } + + startTime := time.Now() + + switch flag.Arg(0) { + case "create": + args := flag.Args()[1:] + + createFlagSet := flag.NewFlagSet("create", flag.ExitOnError) + extPtr := createFlagSet.String("ext", "", "File extension") + dirPtr := createFlagSet.String("dir", "", "Directory to place file in (default: current working directory)") + createFlagSet.Parse(args) + + if createFlagSet.NArg() == 0 { + log.fatal("error: please specify name") + } + name := createFlagSet.Arg(0) + + if *extPtr != "" { + *extPtr = "." + strings.TrimPrefix(*extPtr, ".") + } + if *dirPtr != "" { + *dirPtr = strings.Trim(*dirPtr, "/") + "/" + } + + timestamp := startTime.Unix() + + createCmd(*dirPtr, timestamp, name, *extPtr) + + case "goto": + if migraterErr != nil { + log.fatalErr(migraterErr) + } + + if flag.Arg(1) == "" { + log.fatal("error: please specify version argument V") + } + + v, err := strconv.ParseUint(flag.Arg(1), 10, 64) + if err != nil { + log.fatal("error: can't read version argument V") + } + + gotoCmd(migrater, uint(v)) + + if log.verbose { + log.Println("Finished after", time.Now().Sub(startTime)) + } + + case "up": + if migraterErr != nil { + log.fatalErr(migraterErr) + } + + limit := -1 + if flag.Arg(1) != "" { + n, err := strconv.ParseUint(flag.Arg(1), 10, 64) + if err != nil { + log.fatal("error: can't read limit argument N") + } + limit = int(n) + } + + upCmd(migrater, limit) + + if log.verbose { + log.Println("Finished after", time.Now().Sub(startTime)) + } + + case "down": + if migraterErr != nil { + log.fatalErr(migraterErr) + } + + limit := -1 + if flag.Arg(1) != "" { + n, err := strconv.ParseUint(flag.Arg(1), 10, 64) + if err != nil { + log.fatal("error: can't read limit argument N") + } + limit = int(n) + } + + downCmd(migrater, limit) + + if log.verbose { + log.Println("Finished after", time.Now().Sub(startTime)) + } + + case "drop": + if migraterErr != nil { + log.fatalErr(migraterErr) + } + + dropCmd(migrater) + + if log.verbose { + log.Println("Finished after", time.Now().Sub(startTime)) + } + + case "force": + if migraterErr != nil { + log.fatalErr(migraterErr) + } + + if flag.Arg(1) == "" { + log.fatal("error: please specify version argument V") + } + + v, err := strconv.ParseInt(flag.Arg(1), 10, 64) + if err != nil { + log.fatal("error: can't read version argument V") + } + + if v < -1 { + log.fatal("error: argument V must be >= -1") + } + + forceCmd(migrater, int(v)) + + if log.verbose { + log.Println("Finished after", time.Now().Sub(startTime)) + } + + case "version": + if migraterErr != nil { + log.fatalErr(migraterErr) + } + + versionCmd(migrater) + + default: + flag.Usage() + os.Exit(0) + } +} diff --git a/vendor/github.com/mattes/migrate/cli/version.go b/vendor/github.com/mattes/migrate/cli/version.go new file mode 100644 index 000000000..6c3ec49fe --- /dev/null +++ b/vendor/github.com/mattes/migrate/cli/version.go @@ -0,0 +1,4 @@ +package main + +// Version is set in Makefile with build flags +var Version = "dev" diff --git a/vendor/github.com/mattes/migrate/database/cassandra/README.md b/vendor/github.com/mattes/migrate/database/cassandra/README.md new file mode 100644 index 000000000..f99b1105e --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/cassandra/README.md @@ -0,0 +1,31 @@ +# Cassandra + +* Drop command will not work on Cassandra 2.X because it rely on +system_schema table which comes with 3.X +* Other commands should work properly but are **not tested** + + +## Usage +`cassandra://host:port/keyspace?param1=value¶m2=value2` + + +| URL Query | Default value | Description | +|------------|-------------|-----------| +| `x-migrations-table` | schema_migrations | Name of the migrations table | +| `port` | 9042 | The port to bind to | +| `consistency` | ALL | Migration consistency +| `protocol` | | Cassandra protocol version (3 or 4) +| `timeout` | 1 minute | Migration timeout +| `username` | nil | Username to use when authenticating. | +| `password` | nil | Password to use when authenticating. | + + +`timeout` is parsed using [time.ParseDuration(s string)](https://golang.org/pkg/time/#ParseDuration) + + +## Upgrading from v1 + +1. Write down the current migration version from schema_migrations +2. `DROP TABLE schema_migrations` +4. Download and install the latest migrate version. +5. Force the current migration version with `migrate force `. diff --git a/vendor/github.com/mattes/migrate/database/cassandra/cassandra.go b/vendor/github.com/mattes/migrate/database/cassandra/cassandra.go new file mode 100644 index 000000000..42563fdbe --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/cassandra/cassandra.go @@ -0,0 +1,228 @@ +package cassandra + +import ( + "fmt" + "io" + "io/ioutil" + nurl "net/url" + "strconv" + "time" + + "github.com/gocql/gocql" + "github.com/mattes/migrate/database" +) + +func init() { + db := new(Cassandra) + database.Register("cassandra", db) +} + +var DefaultMigrationsTable = "schema_migrations" +var dbLocked = false + +var ( + ErrNilConfig = fmt.Errorf("no config") + ErrNoKeyspace = fmt.Errorf("no keyspace provided") + ErrDatabaseDirty = fmt.Errorf("database is dirty") +) + +type Config struct { + MigrationsTable string + KeyspaceName string +} + +type Cassandra struct { + session *gocql.Session + isLocked bool + + // Open and WithInstance need to guarantee that config is never nil + config *Config +} + +func (p *Cassandra) Open(url string) (database.Driver, error) { + u, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + // Check for missing mandatory attributes + if len(u.Path) == 0 { + return nil, ErrNoKeyspace + } + + migrationsTable := u.Query().Get("x-migrations-table") + if len(migrationsTable) == 0 { + migrationsTable = DefaultMigrationsTable + } + + p.config = &Config{ + KeyspaceName: u.Path, + MigrationsTable: migrationsTable, + } + + cluster := gocql.NewCluster(u.Host) + cluster.Keyspace = u.Path[1:len(u.Path)] + cluster.Consistency = gocql.All + cluster.Timeout = 1 * time.Minute + + if len(u.Query().Get("username")) > 0 && len(u.Query().Get("password")) > 0 { + authenticator := gocql.PasswordAuthenticator{ + Username: u.Query().Get("username"), + Password: u.Query().Get("password"), + } + cluster.Authenticator = authenticator + } + + // Retrieve query string configuration + if len(u.Query().Get("consistency")) > 0 { + var consistency gocql.Consistency + consistency, err = parseConsistency(u.Query().Get("consistency")) + if err != nil { + return nil, err + } + + cluster.Consistency = consistency + } + if len(u.Query().Get("protocol")) > 0 { + var protoversion int + protoversion, err = strconv.Atoi(u.Query().Get("protocol")) + if err != nil { + return nil, err + } + cluster.ProtoVersion = protoversion + } + if len(u.Query().Get("timeout")) > 0 { + var timeout time.Duration + timeout, err = time.ParseDuration(u.Query().Get("timeout")) + if err != nil { + return nil, err + } + cluster.Timeout = timeout + } + + p.session, err = cluster.CreateSession() + + if err != nil { + return nil, err + } + + if err := p.ensureVersionTable(); err != nil { + return nil, err + } + + return p, nil +} + +func (p *Cassandra) Close() error { + p.session.Close() + return nil +} + +func (p *Cassandra) Lock() error { + if dbLocked { + return database.ErrLocked + } + dbLocked = true + return nil +} + +func (p *Cassandra) Unlock() error { + dbLocked = false + return nil +} + +func (p *Cassandra) Run(migration io.Reader) error { + migr, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + // run migration + query := string(migr[:]) + if err := p.session.Query(query).Exec(); err != nil { + // TODO: cast to Cassandra error and get line number + return database.Error{OrigErr: err, Err: "migration failed", Query: migr} + } + + return nil +} + +func (p *Cassandra) SetVersion(version int, dirty bool) error { + query := `TRUNCATE "` + p.config.MigrationsTable + `"` + if err := p.session.Query(query).Exec(); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + if version >= 0 { + query = `INSERT INTO "` + p.config.MigrationsTable + `" (version, dirty) VALUES (?, ?)` + if err := p.session.Query(query, version, dirty).Exec(); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + + return nil +} + +// Return current keyspace version +func (p *Cassandra) Version() (version int, dirty bool, err error) { + query := `SELECT version, dirty FROM "` + p.config.MigrationsTable + `" LIMIT 1` + err = p.session.Query(query).Scan(&version, &dirty) + switch { + case err == gocql.ErrNotFound: + return database.NilVersion, false, nil + + case err != nil: + if _, ok := err.(*gocql.Error); ok { + return database.NilVersion, false, nil + } + return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} + + default: + return version, dirty, nil + } +} + +func (p *Cassandra) Drop() error { + // select all tables in current schema + query := fmt.Sprintf(`SELECT table_name from system_schema.tables WHERE keyspace_name='%s'`, p.config.KeyspaceName[1:]) // Skip '/' character + iter := p.session.Query(query).Iter() + var tableName string + for iter.Scan(&tableName) { + err := p.session.Query(fmt.Sprintf(`DROP TABLE %s`, tableName)).Exec() + if err != nil { + return err + } + } + // Re-create the version table + if err := p.ensureVersionTable(); err != nil { + return err + } + return nil +} + +// Ensure version table exists +func (p *Cassandra) ensureVersionTable() error { + err := p.session.Query(fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (version bigint, dirty boolean, PRIMARY KEY(version))", p.config.MigrationsTable)).Exec() + if err != nil { + return err + } + if _, _, err = p.Version(); err != nil { + return err + } + return nil +} + +// ParseConsistency wraps gocql.ParseConsistency +// to return an error instead of a panicking. +func parseConsistency(consistencyStr string) (consistency gocql.Consistency, err error) { + defer func() { + if r := recover(); r != nil { + var ok bool + err, ok = r.(error) + if !ok { + err = fmt.Errorf("Failed to parse consistency \"%s\": %v", consistencyStr, r) + } + } + }() + consistency = gocql.ParseConsistency(consistencyStr) + + return consistency, nil +} diff --git a/vendor/github.com/mattes/migrate/database/cassandra/cassandra_test.go b/vendor/github.com/mattes/migrate/database/cassandra/cassandra_test.go new file mode 100644 index 000000000..4ca764a04 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/cassandra/cassandra_test.go @@ -0,0 +1,53 @@ +package cassandra + +import ( + "fmt" + "testing" + dt "github.com/mattes/migrate/database/testing" + mt "github.com/mattes/migrate/testing" + "github.com/gocql/gocql" + "time" + "strconv" +) + +var versions = []mt.Version{ + {Image: "cassandra:3.0.10"}, + {Image: "cassandra:3.0"}, +} + +func isReady(i mt.Instance) bool { + // Cassandra exposes 5 ports (7000, 7001, 7199, 9042 & 9160) + // We only need the port bound to 9042, but we can only access to the first one + // through 'i.Port()' (which calls DockerContainer.firstPortMapping()) + // So we need to get port mapping to retrieve correct port number bound to 9042 + portMap := i.NetworkSettings().Ports + port, _ := strconv.Atoi(portMap["9042/tcp"][0].HostPort) + + cluster := gocql.NewCluster(i.Host()) + cluster.Port = port + //cluster.ProtoVersion = 4 + cluster.Consistency = gocql.All + cluster.Timeout = 1 * time.Minute + p, err := cluster.CreateSession() + if err != nil { + return false + } + // Create keyspace for tests + p.Query("CREATE KEYSPACE testks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor':1}").Exec() + return true +} + +func Test(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + p := &Cassandra{} + portMap := i.NetworkSettings().Ports + port, _ := strconv.Atoi(portMap["9042/tcp"][0].HostPort) + addr := fmt.Sprintf("cassandra://%v:%v/testks", i.Host(), port) + d, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + dt.Test(t, d, []byte("SELECT table_name from system_schema.tables")) + }) +} diff --git a/vendor/github.com/mattes/migrate/database/clickhouse/README.md b/vendor/github.com/mattes/migrate/database/clickhouse/README.md new file mode 100644 index 000000000..16dbbf965 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/clickhouse/README.md @@ -0,0 +1,12 @@ +# ClickHouse + +`clickhouse://host:port?username=user&password=qwerty&database=clicks` + +| URL Query | Description | +|------------|-------------| +| `x-migrations-table`| Name of the migrations table | +| `database` | The name of the database to connect to | +| `username` | The user to sign in as | +| `password` | The user's password | +| `host` | The host to connect to. | +| `port` | The port to bind to. | diff --git a/vendor/github.com/mattes/migrate/database/clickhouse/clickhouse.go b/vendor/github.com/mattes/migrate/database/clickhouse/clickhouse.go new file mode 100644 index 000000000..fffc5585d --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/clickhouse/clickhouse.go @@ -0,0 +1,196 @@ +package clickhouse + +import ( + "database/sql" + "fmt" + "io" + "io/ioutil" + "net/url" + "time" + + "github.com/mattes/migrate" + "github.com/mattes/migrate/database" +) + +var DefaultMigrationsTable = "schema_migrations" + +var ErrNilConfig = fmt.Errorf("no config") + +type Config struct { + DatabaseName string + MigrationsTable string +} + +func init() { + database.Register("clickhouse", &ClickHouse{}) +} + +func WithInstance(conn *sql.DB, config *Config) (database.Driver, error) { + if config == nil { + return nil, ErrNilConfig + } + + if err := conn.Ping(); err != nil { + return nil, err + } + + ch := &ClickHouse{ + conn: conn, + config: config, + } + + if err := ch.init(); err != nil { + return nil, err + } + + return ch, nil +} + +type ClickHouse struct { + conn *sql.DB + config *Config +} + +func (ch *ClickHouse) Open(dsn string) (database.Driver, error) { + purl, err := url.Parse(dsn) + if err != nil { + return nil, err + } + q := migrate.FilterCustomQuery(purl) + q.Scheme = "tcp" + conn, err := sql.Open("clickhouse", q.String()) + if err != nil { + return nil, err + } + + ch = &ClickHouse{ + conn: conn, + config: &Config{ + MigrationsTable: purl.Query().Get("x-migrations-table"), + DatabaseName: purl.Query().Get("database"), + }, + } + + if err := ch.init(); err != nil { + return nil, err + } + + return ch, nil +} + +func (ch *ClickHouse) init() error { + if len(ch.config.DatabaseName) == 0 { + if err := ch.conn.QueryRow("SELECT currentDatabase()").Scan(&ch.config.DatabaseName); err != nil { + return err + } + } + + if len(ch.config.MigrationsTable) == 0 { + ch.config.MigrationsTable = DefaultMigrationsTable + } + + return ch.ensureVersionTable() +} + +func (ch *ClickHouse) Run(r io.Reader) error { + migration, err := ioutil.ReadAll(r) + if err != nil { + return err + } + if _, err := ch.conn.Exec(string(migration)); err != nil { + return database.Error{OrigErr: err, Err: "migration failed", Query: migration} + } + + return nil +} +func (ch *ClickHouse) Version() (int, bool, error) { + var ( + version int + dirty uint8 + query = "SELECT version, dirty FROM `" + ch.config.MigrationsTable + "` ORDER BY sequence DESC LIMIT 1" + ) + if err := ch.conn.QueryRow(query).Scan(&version, &dirty); err != nil { + if err == sql.ErrNoRows { + return database.NilVersion, false, nil + } + return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} + } + return version, dirty == 1, nil +} + +func (ch *ClickHouse) SetVersion(version int, dirty bool) error { + var ( + bool = func(v bool) uint8 { + if v { + return 1 + } + return 0 + } + tx, err = ch.conn.Begin() + ) + if err != nil { + return err + } + + query := "INSERT INTO " + ch.config.MigrationsTable + " (version, dirty, sequence) VALUES (?, ?, ?)" + if _, err := tx.Exec(query, version, bool(dirty), time.Now().UnixNano()); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + return tx.Commit() +} + +func (ch *ClickHouse) ensureVersionTable() error { + var ( + table string + query = "SHOW TABLES FROM " + ch.config.DatabaseName + " LIKE '" + ch.config.MigrationsTable + "'" + ) + // check if migration table exists + if err := ch.conn.QueryRow(query).Scan(&table); err != nil { + if err != sql.ErrNoRows { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } else { + return nil + } + // if not, create the empty migration table + query = ` + CREATE TABLE ` + ch.config.MigrationsTable + ` ( + version UInt32, + dirty UInt8, + sequence UInt64 + ) Engine=TinyLog + ` + if _, err := ch.conn.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + return nil +} + +func (ch *ClickHouse) Drop() error { + var ( + query = "SHOW TABLES FROM " + ch.config.DatabaseName + tables, err = ch.conn.Query(query) + ) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + defer tables.Close() + for tables.Next() { + var table string + if err := tables.Scan(&table); err != nil { + return err + } + + query = "DROP TABLE IF EXISTS " + ch.config.DatabaseName + "." + table + + if _, err := ch.conn.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + return ch.ensureVersionTable() +} + +func (ch *ClickHouse) Lock() error { return nil } +func (ch *ClickHouse) Unlock() error { return nil } +func (ch *ClickHouse) Close() error { return ch.conn.Close() } diff --git a/vendor/github.com/mattes/migrate/database/clickhouse/examples/migrations/001_init.down.sql b/vendor/github.com/mattes/migrate/database/clickhouse/examples/migrations/001_init.down.sql new file mode 100644 index 000000000..51cd8bfb5 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/clickhouse/examples/migrations/001_init.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS test_1; \ No newline at end of file diff --git a/vendor/github.com/mattes/migrate/database/clickhouse/examples/migrations/001_init.up.sql b/vendor/github.com/mattes/migrate/database/clickhouse/examples/migrations/001_init.up.sql new file mode 100644 index 000000000..5436b6fdd --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/clickhouse/examples/migrations/001_init.up.sql @@ -0,0 +1,3 @@ +CREATE TABLE test_1 ( + Date Date +) Engine=Memory; \ No newline at end of file diff --git a/vendor/github.com/mattes/migrate/database/clickhouse/examples/migrations/002_create_table.down.sql b/vendor/github.com/mattes/migrate/database/clickhouse/examples/migrations/002_create_table.down.sql new file mode 100644 index 000000000..9d7712233 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/clickhouse/examples/migrations/002_create_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS test_2; \ No newline at end of file diff --git a/vendor/github.com/mattes/migrate/database/clickhouse/examples/migrations/002_create_table.up.sql b/vendor/github.com/mattes/migrate/database/clickhouse/examples/migrations/002_create_table.up.sql new file mode 100644 index 000000000..6b49ed99d --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/clickhouse/examples/migrations/002_create_table.up.sql @@ -0,0 +1,3 @@ +CREATE TABLE test_2 ( + Date Date +) Engine=Memory; \ No newline at end of file diff --git a/vendor/github.com/mattes/migrate/database/cockroachdb/README.md b/vendor/github.com/mattes/migrate/database/cockroachdb/README.md new file mode 100644 index 000000000..7931c2791 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/cockroachdb/README.md @@ -0,0 +1,19 @@ +# cockroachdb + +`cockroachdb://user:password@host:port/dbname?query` (`cockroach://`, and `crdb-postgres://` work, too) + +| URL Query | WithInstance Config | Description | +|------------|---------------------|-------------| +| `x-migrations-table` | `MigrationsTable` | Name of the migrations table | +| `x-lock-table` | `LockTable` | Name of the table which maintains the migration lock | +| `x-force-lock` | `ForceLock` | Force lock acquisition to fix faulty migrations which may not have released the schema lock (Boolean, default is `false`) | +| `dbname` | `DatabaseName` | The name of the database to connect to | +| `user` | | The user to sign in as | +| `password` | | The user's password | +| `host` | | The host to connect to. Values that start with / are for unix domain sockets. (default is localhost) | +| `port` | | The port to bind to. (default is 5432) | +| `connect_timeout` | | Maximum wait for connection, in seconds. Zero or not specified means wait indefinitely. | +| `sslcert` | | Cert file location. The file must contain PEM encoded data. | +| `sslkey` | | Key file location. The file must contain PEM encoded data. | +| `sslrootcert` | | The location of the root certificate file. The file must contain PEM encoded data. | +| `sslmode` | | Whether or not to use SSL (disable\|require\|verify-ca\|verify-full) | diff --git a/vendor/github.com/mattes/migrate/database/cockroachdb/cockroachdb.go b/vendor/github.com/mattes/migrate/database/cockroachdb/cockroachdb.go new file mode 100644 index 000000000..8da31d378 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/cockroachdb/cockroachdb.go @@ -0,0 +1,338 @@ +package cockroachdb + +import ( + "database/sql" + "fmt" + "io" + "io/ioutil" + nurl "net/url" + + "github.com/cockroachdb/cockroach-go/crdb" + "github.com/lib/pq" + "github.com/mattes/migrate" + "github.com/mattes/migrate/database" + "regexp" + "strconv" + "context" +) + +func init() { + db := CockroachDb{} + database.Register("cockroach", &db) + database.Register("cockroachdb", &db) + database.Register("crdb-postgres", &db) +} + +var DefaultMigrationsTable = "schema_migrations" +var DefaultLockTable = "schema_lock" + +var ( + ErrNilConfig = fmt.Errorf("no config") + ErrNoDatabaseName = fmt.Errorf("no database name") +) + +type Config struct { + MigrationsTable string + LockTable string + ForceLock bool + DatabaseName string +} + +type CockroachDb struct { + db *sql.DB + isLocked bool + + // Open and WithInstance need to guarantee that config is never nil + config *Config +} + +func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { + if config == nil { + return nil, ErrNilConfig + } + + if err := instance.Ping(); err != nil { + return nil, err + } + + query := `SELECT current_database()` + var databaseName string + if err := instance.QueryRow(query).Scan(&databaseName); err != nil { + return nil, &database.Error{OrigErr: err, Query: []byte(query)} + } + + if len(databaseName) == 0 { + return nil, ErrNoDatabaseName + } + + config.DatabaseName = databaseName + + if len(config.MigrationsTable) == 0 { + config.MigrationsTable = DefaultMigrationsTable + } + + if len(config.LockTable) == 0 { + config.LockTable = DefaultLockTable + } + + px := &CockroachDb{ + db: instance, + config: config, + } + + if err := px.ensureVersionTable(); err != nil { + return nil, err + } + + if err := px.ensureLockTable(); err != nil { + return nil, err + } + + return px, nil +} + +func (c *CockroachDb) Open(url string) (database.Driver, error) { + purl, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + // As Cockroach uses the postgres protocol, and 'postgres' is already a registered database, we need to replace the + // connect prefix, with the actual protocol, so that the library can differentiate between the implementations + re := regexp.MustCompile("^(cockroach(db)?|crdb-postgres)") + connectString := re.ReplaceAllString(migrate.FilterCustomQuery(purl).String(), "postgres") + + db, err := sql.Open("postgres", connectString) + if err != nil { + return nil, err + } + + migrationsTable := purl.Query().Get("x-migrations-table") + if len(migrationsTable) == 0 { + migrationsTable = DefaultMigrationsTable + } + + lockTable := purl.Query().Get("x-lock-table") + if len(lockTable) == 0 { + lockTable = DefaultLockTable + } + + forceLockQuery := purl.Query().Get("x-force-lock") + forceLock, err := strconv.ParseBool(forceLockQuery) + if err != nil { + forceLock = false + } + + px, err := WithInstance(db, &Config{ + DatabaseName: purl.Path, + MigrationsTable: migrationsTable, + LockTable: lockTable, + ForceLock: forceLock, + }) + if err != nil { + return nil, err + } + + return px, nil +} + +func (c *CockroachDb) Close() error { + return c.db.Close() +} + +// Locking is done manually with a separate lock table. Implementing advisory locks in CRDB is being discussed +// See: https://github.com/cockroachdb/cockroach/issues/13546 +func (c *CockroachDb) Lock() error { + err := crdb.ExecuteTx(context.Background(), c.db, nil, func(tx *sql.Tx) error { + aid, err := database.GenerateAdvisoryLockId(c.config.DatabaseName) + if err != nil { + return err + } + + query := "SELECT * FROM " + c.config.LockTable + " WHERE lock_id = $1" + rows, err := tx.Query(query, aid) + if err != nil { + return database.Error{OrigErr: err, Err: "failed to fetch migration lock", Query: []byte(query)} + } + defer rows.Close() + + // If row exists at all, lock is present + locked := rows.Next() + if locked && !c.config.ForceLock { + return database.Error{Err: "lock could not be acquired; already locked", Query: []byte(query)} + } + + query = "INSERT INTO " + c.config.LockTable + " (lock_id) VALUES ($1)" + if _, err := tx.Exec(query, aid) ; err != nil { + return database.Error{OrigErr: err, Err: "failed to set migration lock", Query: []byte(query)} + } + + return nil + }) + + if err != nil { + return err + } else { + c.isLocked = true + return nil + } +} + +// Locking is done manually with a separate lock table. Implementing advisory locks in CRDB is being discussed +// See: https://github.com/cockroachdb/cockroach/issues/13546 +func (c *CockroachDb) Unlock() error { + aid, err := database.GenerateAdvisoryLockId(c.config.DatabaseName) + if err != nil { + return err + } + + // In the event of an implementation (non-migration) error, it is possible for the lock to not be released. Until + // a better locking mechanism is added, a manual purging of the lock table may be required in such circumstances + query := "DELETE FROM " + c.config.LockTable + " WHERE lock_id = $1" + if _, err := c.db.Exec(query, aid); err != nil { + if e, ok := err.(*pq.Error); ok { + // 42P01 is "UndefinedTableError" in CockroachDB + // https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/pgwire/pgerror/codes.go + if e.Code == "42P01" { + // On drops, the lock table is fully removed; This is fine, and is a valid "unlocked" state for the schema + c.isLocked = false + return nil + } + } + return database.Error{OrigErr: err, Err: "failed to release migration lock", Query: []byte(query)} + } + + c.isLocked = false + return nil +} + +func (c *CockroachDb) Run(migration io.Reader) error { + migr, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + + // run migration + query := string(migr[:]) + if _, err := c.db.Exec(query); err != nil { + return database.Error{OrigErr: err, Err: "migration failed", Query: migr} + } + + return nil +} + +func (c *CockroachDb) SetVersion(version int, dirty bool) error { + return crdb.ExecuteTx(context.Background(), c.db, nil, func(tx *sql.Tx) error { + if _, err := tx.Exec( `TRUNCATE "` + c.config.MigrationsTable + `"`); err != nil { + return err + } + + if version >= 0 { + if _, err := tx.Exec(`INSERT INTO "` + c.config.MigrationsTable + `" (version, dirty) VALUES ($1, $2)`, version, dirty); err != nil { + return err + } + } + + return nil + }) +} + +func (c *CockroachDb) Version() (version int, dirty bool, err error) { + query := `SELECT version, dirty FROM "` + c.config.MigrationsTable + `" LIMIT 1` + err = c.db.QueryRow(query).Scan(&version, &dirty) + + switch { + case err == sql.ErrNoRows: + return database.NilVersion, false, nil + + case err != nil: + if e, ok := err.(*pq.Error); ok { + // 42P01 is "UndefinedTableError" in CockroachDB + // https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/pgwire/pgerror/codes.go + if e.Code == "42P01" { + return database.NilVersion, false, nil + } + } + return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} + + default: + return version, dirty, nil + } +} + +func (c *CockroachDb) Drop() error { + // select all tables in current schema + query := `SELECT table_name FROM information_schema.tables WHERE table_schema=(SELECT current_schema())` + tables, err := c.db.Query(query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + defer tables.Close() + + // delete one table after another + tableNames := make([]string, 0) + for tables.Next() { + var tableName string + if err := tables.Scan(&tableName); err != nil { + return err + } + if len(tableName) > 0 { + tableNames = append(tableNames, tableName) + } + } + + if len(tableNames) > 0 { + // delete one by one ... + for _, t := range tableNames { + query = `DROP TABLE IF EXISTS ` + t + ` CASCADE` + if _, err := c.db.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + if err := c.ensureVersionTable(); err != nil { + return err + } + } + + return nil +} + +func (c *CockroachDb) ensureVersionTable() error { + // check if migration table exists + var count int + query := `SELECT COUNT(1) FROM information_schema.tables WHERE table_name = $1 AND table_schema = (SELECT current_schema()) LIMIT 1` + if err := c.db.QueryRow(query, c.config.MigrationsTable).Scan(&count); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + if count == 1 { + return nil + } + + // if not, create the empty migration table + query = `CREATE TABLE "` + c.config.MigrationsTable + `" (version INT NOT NULL PRIMARY KEY, dirty BOOL NOT NULL)` + if _, err := c.db.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + return nil +} + + +func (c *CockroachDb) ensureLockTable() error { + // check if lock table exists + var count int + query := `SELECT COUNT(1) FROM information_schema.tables WHERE table_name = $1 AND table_schema = (SELECT current_schema()) LIMIT 1` + if err := c.db.QueryRow(query, c.config.LockTable).Scan(&count); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + if count == 1 { + return nil + } + + // if not, create the empty lock table + query = `CREATE TABLE "` + c.config.LockTable + `" (lock_id INT NOT NULL PRIMARY KEY)` + if _, err := c.db.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + return nil +} diff --git a/vendor/github.com/mattes/migrate/database/cockroachdb/cockroachdb_test.go b/vendor/github.com/mattes/migrate/database/cockroachdb/cockroachdb_test.go new file mode 100644 index 000000000..e2dc1f86e --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/cockroachdb/cockroachdb_test.go @@ -0,0 +1,91 @@ +package cockroachdb + +// error codes https://github.com/lib/pq/blob/master/error.go + +import ( + //"bytes" + "database/sql" + "fmt" + "io" + "testing" + + "github.com/lib/pq" + dt "github.com/mattes/migrate/database/testing" + mt "github.com/mattes/migrate/testing" + "bytes" +) + +var versions = []mt.Version{ + {Image: "cockroachdb/cockroach:v1.0.2", Cmd: []string{"start", "--insecure"}}, +} + +func isReady(i mt.Instance) bool { + db, err := sql.Open("postgres", fmt.Sprintf("postgres://root@%v:%v?sslmode=disable", i.Host(), i.PortFor(26257))) + if err != nil { + return false + } + defer db.Close() + err = db.Ping() + if err == io.EOF { + _, err = db.Exec("CREATE DATABASE migrate") + return err == nil; + } else if e, ok := err.(*pq.Error); ok { + if e.Code.Name() == "cannot_connect_now" { + return false + } + } + + _, err = db.Exec("CREATE DATABASE migrate") + return err == nil; + + return true +} + +func Test(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + c := &CockroachDb{} + addr := fmt.Sprintf("cockroach://root@%v:%v/migrate?sslmode=disable", i.Host(), i.PortFor(26257)) + d, err := c.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + dt.Test(t, d, []byte("SELECT 1")) + }) +} + +func TestMultiStatement(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + c := &CockroachDb{} + addr := fmt.Sprintf("cockroach://root@%v:%v/migrate?sslmode=disable", i.Host(), i.Port()) + d, err := c.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + if err := d.Run(bytes.NewReader([]byte("CREATE TABLE foo (foo text); CREATE TABLE bar (bar text);"))); err != nil { + t.Fatalf("expected err to be nil, got %v", err) + } + + // make sure second table exists + var exists bool + if err := d.(*CockroachDb).db.QueryRow("SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'bar' AND table_schema = (SELECT current_schema()))").Scan(&exists); err != nil { + t.Fatal(err) + } + if !exists { + t.Fatalf("expected table bar to exist") + } + }) +} + +func TestFilterCustomQuery(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + c := &CockroachDb{} + addr := fmt.Sprintf("cockroach://root@%v:%v/migrate?sslmode=disable&x-custom=foobar", i.Host(), i.PortFor(26257)) + _, err := c.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + }) +} diff --git a/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.down.sql b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.down.sql new file mode 100644 index 000000000..c99ddcdc8 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS users; diff --git a/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.up.sql b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.up.sql new file mode 100644 index 000000000..fc3210181 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE users ( + user_id INT UNIQUE, + name STRING(40), + email STRING(40) +); diff --git a/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.down.sql b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.down.sql new file mode 100644 index 000000000..940c60712 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.down.sql @@ -0,0 +1 @@ +ALTER TABLE users DROP COLUMN IF EXISTS city; diff --git a/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.up.sql b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.up.sql new file mode 100644 index 000000000..46204b0f8 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.up.sql @@ -0,0 +1 @@ +ALTER TABLE users ADD COLUMN city TEXT; diff --git a/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.down.sql b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.down.sql new file mode 100644 index 000000000..3e87dd229 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.down.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS users_email_index; diff --git a/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.up.sql b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.up.sql new file mode 100644 index 000000000..61f8ba0b9 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.up.sql @@ -0,0 +1,3 @@ +CREATE UNIQUE INDEX IF NOT EXISTS users_email_index ON users (email); + +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.down.sql b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.down.sql new file mode 100644 index 000000000..1a0b1a214 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS books; diff --git a/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.up.sql b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.up.sql new file mode 100644 index 000000000..0d3b99928 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE books ( + user_id INT, + name STRING(40), + author STRING(40) +); diff --git a/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.down.sql b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.down.sql new file mode 100644 index 000000000..3a5187689 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS movies; diff --git a/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.up.sql b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.up.sql new file mode 100644 index 000000000..d533be900 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE movies ( + user_id INT, + name STRING(40), + director STRING(40) +); diff --git a/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1585849751_just_a_comment.up.sql b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1585849751_just_a_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1585849751_just_a_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1685849751_another_comment.up.sql b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1685849751_another_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1685849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1785849751_another_comment.up.sql b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1785849751_another_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1785849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1885849751_another_comment.up.sql b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1885849751_another_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1885849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/database/crate/README.md b/vendor/github.com/mattes/migrate/database/crate/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/mattes/migrate/database/driver.go b/vendor/github.com/mattes/migrate/database/driver.go new file mode 100644 index 000000000..016eedcba --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/driver.go @@ -0,0 +1,112 @@ +// Package database provides the Database interface. +// All database drivers must implement this interface, register themselves, +// optionally provide a `WithInstance` function and pass the tests +// in package database/testing. +package database + +import ( + "fmt" + "io" + nurl "net/url" + "sync" +) + +var ( + ErrLocked = fmt.Errorf("can't acquire lock") +) + +const NilVersion int = -1 + +var driversMu sync.RWMutex +var drivers = make(map[string]Driver) + +// Driver is the interface every database driver must implement. +// +// How to implement a database driver? +// 1. Implement this interface. +// 2. Optionally, add a function named `WithInstance`. +// This function should accept an existing DB instance and a Config{} struct +// and return a driver instance. +// 3. Add a test that calls database/testing.go:Test() +// 4. Add own tests for Open(), WithInstance() (when provided) and Close(). +// All other functions are tested by tests in database/testing. +// Saves you some time and makes sure all database drivers behave the same way. +// 5. Call Register in init(). +// 6. Create a migrate/cli/build_.go file +// 7. Add driver name in 'DATABASE' variable in Makefile +// +// Guidelines: +// * Don't try to correct user input. Don't assume things. +// When in doubt, return an error and explain the situation to the user. +// * All configuration input must come from the URL string in func Open() +// or the Config{} struct in WithInstance. Don't os.Getenv(). +type Driver interface { + // Open returns a new driver instance configured with parameters + // coming from the URL string. Migrate will call this function + // only once per instance. + Open(url string) (Driver, error) + + // Close closes the underlying database instance managed by the driver. + // Migrate will call this function only once per instance. + Close() error + + // Lock should acquire a database lock so that only one migration process + // can run at a time. Migrate will call this function before Run is called. + // If the implementation can't provide this functionality, return nil. + // Return database.ErrLocked if database is already locked. + Lock() error + + // Unlock should release the lock. Migrate will call this function after + // all migrations have been run. + Unlock() error + + // Run applies a migration to the database. migration is garantueed to be not nil. + Run(migration io.Reader) error + + // SetVersion saves version and dirty state. + // Migrate will call this function before and after each call to Run. + // version must be >= -1. -1 means NilVersion. + SetVersion(version int, dirty bool) error + + // Version returns the currently active version and if the database is dirty. + // When no migration has been applied, it must return version -1. + // Dirty means, a previous migration failed and user interaction is required. + Version() (version int, dirty bool, err error) + + // Drop deletes everything in the database. + Drop() error +} + +// Open returns a new driver instance. +func Open(url string) (Driver, error) { + u, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + if u.Scheme == "" { + return nil, fmt.Errorf("database driver: invalid URL scheme") + } + + driversMu.RLock() + d, ok := drivers[u.Scheme] + driversMu.RUnlock() + if !ok { + return nil, fmt.Errorf("database driver: unknown driver %v (forgotten import?)", u.Scheme) + } + + return d.Open(url) +} + +// Register globally registers a driver. +func Register(name string, driver Driver) { + driversMu.Lock() + defer driversMu.Unlock() + if driver == nil { + panic("Register driver is nil") + } + if _, dup := drivers[name]; dup { + panic("Register called twice for driver " + name) + } + drivers[name] = driver +} diff --git a/vendor/github.com/mattes/migrate/database/driver_test.go b/vendor/github.com/mattes/migrate/database/driver_test.go new file mode 100644 index 000000000..c0a29304f --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/driver_test.go @@ -0,0 +1,8 @@ +package database + +func ExampleDriver() { + // see database/stub for an example + + // database/stub/stub.go has the driver implementation + // database/stub/stub_test.go runs database/testing/test.go:Test +} diff --git a/vendor/github.com/mattes/migrate/database/error.go b/vendor/github.com/mattes/migrate/database/error.go new file mode 100644 index 000000000..eb802c753 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/error.go @@ -0,0 +1,27 @@ +package database + +import ( + "fmt" +) + +// Error should be used for errors involving queries ran against the database +type Error struct { + // Optional: the line number + Line uint + + // Query is a query excerpt + Query []byte + + // Err is a useful/helping error message for humans + Err string + + // OrigErr is the underlying error + OrigErr error +} + +func (e Error) Error() string { + if len(e.Err) == 0 { + return fmt.Sprintf("%v in line %v: %s", e.OrigErr, e.Line, e.Query) + } + return fmt.Sprintf("%v in line %v: %s (details: %v)", e.Err, e.Line, e.Query, e.OrigErr) +} diff --git a/vendor/github.com/mattes/migrate/database/mongodb/README.md b/vendor/github.com/mattes/migrate/database/mongodb/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/mattes/migrate/database/mysql/README.md b/vendor/github.com/mattes/migrate/database/mysql/README.md new file mode 100644 index 000000000..490e90b21 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/mysql/README.md @@ -0,0 +1,53 @@ +# MySQL + +`mysql://user:password@tcp(host:port)/dbname?query` + +| URL Query | WithInstance Config | Description | +|------------|---------------------|-------------| +| `x-migrations-table` | `MigrationsTable` | Name of the migrations table | +| `dbname` | `DatabaseName` | The name of the database to connect to | +| `user` | | The user to sign in as | +| `password` | | The user's password | +| `host` | | The host to connect to. | +| `port` | | The port to bind to. | +| `x-tls-ca` | | The location of the root certificate file. | +| `x-tls-cert` | | Cert file location. | +| `x-tls-key` | | Key file location. | +| `x-tls-insecure-skip-verify` | | Whether or not to use SSL (true\|false) | + +## Use with existing client + +If you use the MySQL driver with existing database client, you must create the client with parameter `multiStatements=true`: + +```go +package main + +import ( + "database/sql" + + _ "github.com/go-sql-driver/mysql" + "github.com/mattes/migrate" + "github.com/mattes/migrate/database/mysql" + _ "github.com/mattes/migrate/source/file" +) + +func main() { + db, _ := sql.Open("mysql", "user:password@tcp(host:port)/dbname?multiStatements=true") + driver, _ := mysql.WithInstance(db, &mysql.Config{}) + m, _ := migrate.NewWithDatabaseInstance( + "file:///migrations", + "mysql", + driver, + ) + + m.Steps(2) +} +``` + +## Upgrading from v1 + +1. Write down the current migration version from schema_migrations +1. `DROP TABLE schema_migrations` +2. Wrap your existing migrations in transactions ([BEGIN/COMMIT](https://dev.mysql.com/doc/refman/5.7/en/commit.html)) if you use multiple statements within one migration. +3. Download and install the latest migrate version. +4. Force the current migration version with `migrate force `. diff --git a/vendor/github.com/mattes/migrate/database/mysql/mysql.go b/vendor/github.com/mattes/migrate/database/mysql/mysql.go new file mode 100644 index 000000000..f00f886e4 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/mysql/mysql.go @@ -0,0 +1,329 @@ +package mysql + +import ( + "crypto/tls" + "crypto/x509" + "database/sql" + "fmt" + "io" + "io/ioutil" + nurl "net/url" + "strconv" + "strings" + + "github.com/go-sql-driver/mysql" + "github.com/mattes/migrate" + "github.com/mattes/migrate/database" +) + +func init() { + database.Register("mysql", &Mysql{}) +} + +var DefaultMigrationsTable = "schema_migrations" + +var ( + ErrDatabaseDirty = fmt.Errorf("database is dirty") + ErrNilConfig = fmt.Errorf("no config") + ErrNoDatabaseName = fmt.Errorf("no database name") + ErrAppendPEM = fmt.Errorf("failed to append PEM") +) + +type Config struct { + MigrationsTable string + DatabaseName string +} + +type Mysql struct { + db *sql.DB + isLocked bool + + config *Config +} + +// instance must have `multiStatements` set to true +func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { + if config == nil { + return nil, ErrNilConfig + } + + if err := instance.Ping(); err != nil { + return nil, err + } + + query := `SELECT DATABASE()` + var databaseName sql.NullString + if err := instance.QueryRow(query).Scan(&databaseName); err != nil { + return nil, &database.Error{OrigErr: err, Query: []byte(query)} + } + + if len(databaseName.String) == 0 { + return nil, ErrNoDatabaseName + } + + config.DatabaseName = databaseName.String + + if len(config.MigrationsTable) == 0 { + config.MigrationsTable = DefaultMigrationsTable + } + + mx := &Mysql{ + db: instance, + config: config, + } + + if err := mx.ensureVersionTable(); err != nil { + return nil, err + } + + return mx, nil +} + +func (m *Mysql) Open(url string) (database.Driver, error) { + purl, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + q := purl.Query() + q.Set("multiStatements", "true") + purl.RawQuery = q.Encode() + + db, err := sql.Open("mysql", strings.Replace( + migrate.FilterCustomQuery(purl).String(), "mysql://", "", 1)) + if err != nil { + return nil, err + } + + migrationsTable := purl.Query().Get("x-migrations-table") + if len(migrationsTable) == 0 { + migrationsTable = DefaultMigrationsTable + } + + // use custom TLS? + ctls := purl.Query().Get("tls") + if len(ctls) > 0 { + if _, isBool := readBool(ctls); !isBool && strings.ToLower(ctls) != "skip-verify" { + rootCertPool := x509.NewCertPool() + pem, err := ioutil.ReadFile(purl.Query().Get("x-tls-ca")) + if err != nil { + return nil, err + } + + if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { + return nil, ErrAppendPEM + } + + certs, err := tls.LoadX509KeyPair(purl.Query().Get("x-tls-cert"), purl.Query().Get("x-tls-key")) + if err != nil { + return nil, err + } + + insecureSkipVerify := false + if len(purl.Query().Get("x-tls-insecure-skip-verify")) > 0 { + x, err := strconv.ParseBool(purl.Query().Get("x-tls-insecure-skip-verify")) + if err != nil { + return nil, err + } + insecureSkipVerify = x + } + + mysql.RegisterTLSConfig(ctls, &tls.Config{ + RootCAs: rootCertPool, + Certificates: []tls.Certificate{certs}, + InsecureSkipVerify: insecureSkipVerify, + }) + } + } + + mx, err := WithInstance(db, &Config{ + DatabaseName: purl.Path, + MigrationsTable: migrationsTable, + }) + if err != nil { + return nil, err + } + + return mx, nil +} + +func (m *Mysql) Close() error { + return m.db.Close() +} + +func (m *Mysql) Lock() error { + if m.isLocked { + return database.ErrLocked + } + + aid, err := database.GenerateAdvisoryLockId( + fmt.Sprintf("%s:%s", m.config.DatabaseName, m.config.MigrationsTable)) + if err != nil { + return err + } + + query := "SELECT GET_LOCK(?, 1)" + var success bool + if err := m.db.QueryRow(query, aid).Scan(&success); err != nil { + return &database.Error{OrigErr: err, Err: "try lock failed", Query: []byte(query)} + } + + if success { + m.isLocked = true + return nil + } + + return database.ErrLocked +} + +func (m *Mysql) Unlock() error { + if !m.isLocked { + return nil + } + + aid, err := database.GenerateAdvisoryLockId( + fmt.Sprintf("%s:%s", m.config.DatabaseName, m.config.MigrationsTable)) + if err != nil { + return err + } + + query := `SELECT RELEASE_LOCK(?)` + if _, err := m.db.Exec(query, aid); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + m.isLocked = false + return nil +} + +func (m *Mysql) Run(migration io.Reader) error { + migr, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + + query := string(migr[:]) + if _, err := m.db.Exec(query); err != nil { + return database.Error{OrigErr: err, Err: "migration failed", Query: migr} + } + + return nil +} + +func (m *Mysql) SetVersion(version int, dirty bool) error { + tx, err := m.db.Begin() + if err != nil { + return &database.Error{OrigErr: err, Err: "transaction start failed"} + } + + query := "TRUNCATE `" + m.config.MigrationsTable + "`" + if _, err := m.db.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + if version >= 0 { + query := "INSERT INTO `" + m.config.MigrationsTable + "` (version, dirty) VALUES (?, ?)" + if _, err := m.db.Exec(query, version, dirty); err != nil { + tx.Rollback() + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + + if err := tx.Commit(); err != nil { + return &database.Error{OrigErr: err, Err: "transaction commit failed"} + } + + return nil +} + +func (m *Mysql) Version() (version int, dirty bool, err error) { + query := "SELECT version, dirty FROM `" + m.config.MigrationsTable + "` LIMIT 1" + err = m.db.QueryRow(query).Scan(&version, &dirty) + switch { + case err == sql.ErrNoRows: + return database.NilVersion, false, nil + + case err != nil: + if e, ok := err.(*mysql.MySQLError); ok { + if e.Number == 0 { + return database.NilVersion, false, nil + } + } + return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} + + default: + return version, dirty, nil + } +} + +func (m *Mysql) Drop() error { + // select all tables + query := `SHOW TABLES LIKE '%'` + tables, err := m.db.Query(query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + defer tables.Close() + + // delete one table after another + tableNames := make([]string, 0) + for tables.Next() { + var tableName string + if err := tables.Scan(&tableName); err != nil { + return err + } + if len(tableName) > 0 { + tableNames = append(tableNames, tableName) + } + } + + if len(tableNames) > 0 { + // delete one by one ... + for _, t := range tableNames { + query = "DROP TABLE IF EXISTS `" + t + "` CASCADE" + if _, err := m.db.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + if err := m.ensureVersionTable(); err != nil { + return err + } + } + + return nil +} + +func (m *Mysql) ensureVersionTable() error { + // check if migration table exists + var result string + query := `SHOW TABLES LIKE "` + m.config.MigrationsTable + `"` + if err := m.db.QueryRow(query).Scan(&result); err != nil { + if err != sql.ErrNoRows { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } else { + return nil + } + + // if not, create the empty migration table + query = "CREATE TABLE `" + m.config.MigrationsTable + "` (version bigint not null primary key, dirty boolean not null)" + if _, err := m.db.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + return nil +} + +// Returns the bool value of the input. +// The 2nd return value indicates if the input was a valid bool value +// See https://github.com/go-sql-driver/mysql/blob/a059889267dc7170331388008528b3b44479bffb/utils.go#L71 +func readBool(input string) (value bool, valid bool) { + switch input { + case "1", "true", "TRUE", "True": + return true, true + case "0", "false", "FALSE", "False": + return false, true + } + + // Not a valid bool value + return +} diff --git a/vendor/github.com/mattes/migrate/database/mysql/mysql_test.go b/vendor/github.com/mattes/migrate/database/mysql/mysql_test.go new file mode 100644 index 000000000..f2b12e8ed --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/mysql/mysql_test.go @@ -0,0 +1,60 @@ +package mysql + +import ( + "database/sql" + sqldriver "database/sql/driver" + "fmt" + // "io/ioutil" + // "log" + "testing" + + // "github.com/go-sql-driver/mysql" + dt "github.com/mattes/migrate/database/testing" + mt "github.com/mattes/migrate/testing" +) + +var versions = []mt.Version{ + {Image: "mysql:8", ENV: []string{"MYSQL_ROOT_PASSWORD=root", "MYSQL_DATABASE=public"}}, + {Image: "mysql:5.7", ENV: []string{"MYSQL_ROOT_PASSWORD=root", "MYSQL_DATABASE=public"}}, + {Image: "mysql:5.6", ENV: []string{"MYSQL_ROOT_PASSWORD=root", "MYSQL_DATABASE=public"}}, + {Image: "mysql:5.5", ENV: []string{"MYSQL_ROOT_PASSWORD=root", "MYSQL_DATABASE=public"}}, +} + +func isReady(i mt.Instance) bool { + db, err := sql.Open("mysql", fmt.Sprintf("root:root@tcp(%v:%v)/public", i.Host(), i.Port())) + if err != nil { + return false + } + defer db.Close() + err = db.Ping() + + if err == sqldriver.ErrBadConn { + return false + } + + return true +} + +func Test(t *testing.T) { + // mysql.SetLogger(mysql.Logger(log.New(ioutil.Discard, "", log.Ltime))) + + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + p := &Mysql{} + addr := fmt.Sprintf("mysql://root:root@tcp(%v:%v)/public", i.Host(), i.Port()) + d, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + dt.Test(t, d, []byte("SELECT 1")) + + // check ensureVersionTable + if err := d.(*Mysql).ensureVersionTable(); err != nil { + t.Fatal(err) + } + // check again + if err := d.(*Mysql).ensureVersionTable(); err != nil { + t.Fatal(err) + } + }) +} diff --git a/vendor/github.com/mattes/migrate/database/neo4j/README.md b/vendor/github.com/mattes/migrate/database/neo4j/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/mattes/migrate/database/postgres/README.md b/vendor/github.com/mattes/migrate/database/postgres/README.md new file mode 100644 index 000000000..f6312392b --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/postgres/README.md @@ -0,0 +1,28 @@ +# postgres + +`postgres://user:password@host:port/dbname?query` (`postgresql://` works, too) + +| URL Query | WithInstance Config | Description | +|------------|---------------------|-------------| +| `x-migrations-table` | `MigrationsTable` | Name of the migrations table | +| `dbname` | `DatabaseName` | The name of the database to connect to | +| `search_path` | | This variable specifies the order in which schemas are searched when an object is referenced by a simple name with no schema specified. | +| `user` | | The user to sign in as | +| `password` | | The user's password | +| `host` | | The host to connect to. Values that start with / are for unix domain sockets. (default is localhost) | +| `port` | | The port to bind to. (default is 5432) | +| `fallback_application_name` | | An application_name to fall back to if one isn't provided. | +| `connect_timeout` | | Maximum wait for connection, in seconds. Zero or not specified means wait indefinitely. | +| `sslcert` | | Cert file location. The file must contain PEM encoded data. | +| `sslkey` | | Key file location. The file must contain PEM encoded data. | +| `sslrootcert` | | The location of the root certificate file. The file must contain PEM encoded data. | +| `sslmode` | | Whether or not to use SSL (disable\|require\|verify-ca\|verify-full) | + + +## Upgrading from v1 + +1. Write down the current migration version from schema_migrations +1. `DROP TABLE schema_migrations` +2. Wrap your existing migrations in transactions ([BEGIN/COMMIT](https://www.postgresql.org/docs/current/static/transaction-iso.html)) if you use multiple statements within one migration. +3. Download and install the latest migrate version. +4. Force the current migration version with `migrate force `. diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1085649617_create_users_table.down.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1085649617_create_users_table.down.sql new file mode 100644 index 000000000..c99ddcdc8 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1085649617_create_users_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS users; diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1085649617_create_users_table.up.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1085649617_create_users_table.up.sql new file mode 100644 index 000000000..92897dcab --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1085649617_create_users_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE users ( + user_id integer unique, + name varchar(40), + email varchar(40) +); diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.down.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.down.sql new file mode 100644 index 000000000..940c60712 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.down.sql @@ -0,0 +1 @@ +ALTER TABLE users DROP COLUMN IF EXISTS city; diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.up.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.up.sql new file mode 100644 index 000000000..67823edc9 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE users ADD COLUMN city varchar(100); + + diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.down.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.down.sql new file mode 100644 index 000000000..3e87dd229 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.down.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS users_email_index; diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.up.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.up.sql new file mode 100644 index 000000000..fbeb4ab4e --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.up.sql @@ -0,0 +1,3 @@ +CREATE UNIQUE INDEX CONCURRENTLY users_email_index ON users (email); + +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1385949617_create_books_table.down.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1385949617_create_books_table.down.sql new file mode 100644 index 000000000..1a0b1a214 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1385949617_create_books_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS books; diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1385949617_create_books_table.up.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1385949617_create_books_table.up.sql new file mode 100644 index 000000000..f1503b518 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1385949617_create_books_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE books ( + user_id integer, + name varchar(40), + author varchar(40) +); diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.down.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.down.sql new file mode 100644 index 000000000..3a5187689 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS movies; diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.up.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.up.sql new file mode 100644 index 000000000..f0ef5943b --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE movies ( + user_id integer, + name varchar(40), + director varchar(40) +); diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1585849751_just_a_comment.up.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1585849751_just_a_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1585849751_just_a_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1685849751_another_comment.up.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1685849751_another_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1685849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1785849751_another_comment.up.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1785849751_another_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1785849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1885849751_another_comment.up.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1885849751_another_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1885849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/database/postgres/postgres.go b/vendor/github.com/mattes/migrate/database/postgres/postgres.go new file mode 100644 index 000000000..fb2d61c28 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/postgres/postgres.go @@ -0,0 +1,273 @@ +package postgres + +import ( + "database/sql" + "fmt" + "io" + "io/ioutil" + nurl "net/url" + + "github.com/lib/pq" + "github.com/mattes/migrate" + "github.com/mattes/migrate/database" +) + +func init() { + db := Postgres{} + database.Register("postgres", &db) + database.Register("postgresql", &db) +} + +var DefaultMigrationsTable = "schema_migrations" + +var ( + ErrNilConfig = fmt.Errorf("no config") + ErrNoDatabaseName = fmt.Errorf("no database name") + ErrNoSchema = fmt.Errorf("no schema") + ErrDatabaseDirty = fmt.Errorf("database is dirty") +) + +type Config struct { + MigrationsTable string + DatabaseName string +} + +type Postgres struct { + db *sql.DB + isLocked bool + + // Open and WithInstance need to garantuee that config is never nil + config *Config +} + +func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { + if config == nil { + return nil, ErrNilConfig + } + + if err := instance.Ping(); err != nil { + return nil, err + } + + query := `SELECT CURRENT_DATABASE()` + var databaseName string + if err := instance.QueryRow(query).Scan(&databaseName); err != nil { + return nil, &database.Error{OrigErr: err, Query: []byte(query)} + } + + if len(databaseName) == 0 { + return nil, ErrNoDatabaseName + } + + config.DatabaseName = databaseName + + if len(config.MigrationsTable) == 0 { + config.MigrationsTable = DefaultMigrationsTable + } + + px := &Postgres{ + db: instance, + config: config, + } + + if err := px.ensureVersionTable(); err != nil { + return nil, err + } + + return px, nil +} + +func (p *Postgres) Open(url string) (database.Driver, error) { + purl, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + db, err := sql.Open("postgres", migrate.FilterCustomQuery(purl).String()) + if err != nil { + return nil, err + } + + migrationsTable := purl.Query().Get("x-migrations-table") + if len(migrationsTable) == 0 { + migrationsTable = DefaultMigrationsTable + } + + px, err := WithInstance(db, &Config{ + DatabaseName: purl.Path, + MigrationsTable: migrationsTable, + }) + if err != nil { + return nil, err + } + + return px, nil +} + +func (p *Postgres) Close() error { + return p.db.Close() +} + +// https://www.postgresql.org/docs/9.6/static/explicit-locking.html#ADVISORY-LOCKS +func (p *Postgres) Lock() error { + if p.isLocked { + return database.ErrLocked + } + + aid, err := database.GenerateAdvisoryLockId(p.config.DatabaseName) + if err != nil { + return err + } + + // This will either obtain the lock immediately and return true, + // or return false if the lock cannot be acquired immediately. + query := `SELECT pg_try_advisory_lock($1)` + var success bool + if err := p.db.QueryRow(query, aid).Scan(&success); err != nil { + return &database.Error{OrigErr: err, Err: "try lock failed", Query: []byte(query)} + } + + if success { + p.isLocked = true + return nil + } + + return database.ErrLocked +} + +func (p *Postgres) Unlock() error { + if !p.isLocked { + return nil + } + + aid, err := database.GenerateAdvisoryLockId(p.config.DatabaseName) + if err != nil { + return err + } + + query := `SELECT pg_advisory_unlock($1)` + if _, err := p.db.Exec(query, aid); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + p.isLocked = false + return nil +} + +func (p *Postgres) Run(migration io.Reader) error { + migr, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + + // run migration + query := string(migr[:]) + if _, err := p.db.Exec(query); err != nil { + // TODO: cast to postgress error and get line number + return database.Error{OrigErr: err, Err: "migration failed", Query: migr} + } + + return nil +} + +func (p *Postgres) SetVersion(version int, dirty bool) error { + tx, err := p.db.Begin() + if err != nil { + return &database.Error{OrigErr: err, Err: "transaction start failed"} + } + + query := `TRUNCATE "` + p.config.MigrationsTable + `"` + if _, err := tx.Exec(query); err != nil { + tx.Rollback() + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + if version >= 0 { + query = `INSERT INTO "` + p.config.MigrationsTable + `" (version, dirty) VALUES ($1, $2)` + if _, err := tx.Exec(query, version, dirty); err != nil { + tx.Rollback() + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + + if err := tx.Commit(); err != nil { + return &database.Error{OrigErr: err, Err: "transaction commit failed"} + } + + return nil +} + +func (p *Postgres) Version() (version int, dirty bool, err error) { + query := `SELECT version, dirty FROM "` + p.config.MigrationsTable + `" LIMIT 1` + err = p.db.QueryRow(query).Scan(&version, &dirty) + switch { + case err == sql.ErrNoRows: + return database.NilVersion, false, nil + + case err != nil: + if e, ok := err.(*pq.Error); ok { + if e.Code.Name() == "undefined_table" { + return database.NilVersion, false, nil + } + } + return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} + + default: + return version, dirty, nil + } +} + +func (p *Postgres) Drop() error { + // select all tables in current schema + query := `SELECT table_name FROM information_schema.tables WHERE table_schema=(SELECT current_schema())` + tables, err := p.db.Query(query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + defer tables.Close() + + // delete one table after another + tableNames := make([]string, 0) + for tables.Next() { + var tableName string + if err := tables.Scan(&tableName); err != nil { + return err + } + if len(tableName) > 0 { + tableNames = append(tableNames, tableName) + } + } + + if len(tableNames) > 0 { + // delete one by one ... + for _, t := range tableNames { + query = `DROP TABLE IF EXISTS ` + t + ` CASCADE` + if _, err := p.db.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + if err := p.ensureVersionTable(); err != nil { + return err + } + } + + return nil +} + +func (p *Postgres) ensureVersionTable() error { + // check if migration table exists + var count int + query := `SELECT COUNT(1) FROM information_schema.tables WHERE table_name = $1 AND table_schema = (SELECT current_schema()) LIMIT 1` + if err := p.db.QueryRow(query, p.config.MigrationsTable).Scan(&count); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + if count == 1 { + return nil + } + + // if not, create the empty migration table + query = `CREATE TABLE "` + p.config.MigrationsTable + `" (version bigint not null primary key, dirty boolean not null)` + if _, err := p.db.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + return nil +} diff --git a/vendor/github.com/mattes/migrate/database/postgres/postgres_test.go b/vendor/github.com/mattes/migrate/database/postgres/postgres_test.go new file mode 100644 index 000000000..9a367a059 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/postgres/postgres_test.go @@ -0,0 +1,150 @@ +package postgres + +// error codes https://github.com/lib/pq/blob/master/error.go + +import ( + "bytes" + "database/sql" + "fmt" + "io" + "testing" + + "github.com/lib/pq" + dt "github.com/mattes/migrate/database/testing" + mt "github.com/mattes/migrate/testing" +) + +var versions = []mt.Version{ + {Image: "postgres:9.6"}, + {Image: "postgres:9.5"}, + {Image: "postgres:9.4"}, + {Image: "postgres:9.3"}, + {Image: "postgres:9.2"}, +} + +func isReady(i mt.Instance) bool { + db, err := sql.Open("postgres", fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port())) + if err != nil { + return false + } + defer db.Close() + err = db.Ping() + if err == io.EOF { + return false + + } else if e, ok := err.(*pq.Error); ok { + if e.Code.Name() == "cannot_connect_now" { + return false + } + } + + return true +} + +func Test(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + p := &Postgres{} + addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port()) + d, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + dt.Test(t, d, []byte("SELECT 1")) + }) +} + +func TestMultiStatement(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + p := &Postgres{} + addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port()) + d, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + if err := d.Run(bytes.NewReader([]byte("CREATE TABLE foo (foo text); CREATE TABLE bar (bar text);"))); err != nil { + t.Fatalf("expected err to be nil, got %v", err) + } + + // make sure second table exists + var exists bool + if err := d.(*Postgres).db.QueryRow("SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'bar' AND table_schema = (SELECT current_schema()))").Scan(&exists); err != nil { + t.Fatal(err) + } + if !exists { + t.Fatalf("expected table bar to exist") + } + }) +} + +func TestFilterCustomQuery(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + p := &Postgres{} + addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable&x-custom=foobar", i.Host(), i.Port()) + _, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + }) +} + +func TestWithSchema(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + p := &Postgres{} + addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port()) + d, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + + // create foobar schema + if err := d.Run(bytes.NewReader([]byte("CREATE SCHEMA foobar AUTHORIZATION postgres"))); err != nil { + t.Fatal(err) + } + if err := d.SetVersion(1, false); err != nil { + t.Fatal(err) + } + + // re-connect using that schema + d2, err := p.Open(fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable&search_path=foobar", i.Host(), i.Port())) + if err != nil { + t.Fatalf("%v", err) + } + + version, _, err := d2.Version() + if err != nil { + t.Fatal(err) + } + if version != -1 { + t.Fatal("expected NilVersion") + } + + // now update version and compare + if err := d2.SetVersion(2, false); err != nil { + t.Fatal(err) + } + version, _, err = d2.Version() + if err != nil { + t.Fatal(err) + } + if version != 2 { + t.Fatal("expected version 2") + } + + // meanwhile, the public schema still has the other version + version, _, err = d.Version() + if err != nil { + t.Fatal(err) + } + if version != 1 { + t.Fatal("expected version 2") + } + }) +} + +func TestWithInstance(t *testing.T) { + +} diff --git a/vendor/github.com/mattes/migrate/database/ql/README.md b/vendor/github.com/mattes/migrate/database/ql/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/mattes/migrate/database/ql/migration/33_create_table.down.sql b/vendor/github.com/mattes/migrate/database/ql/migration/33_create_table.down.sql new file mode 100644 index 000000000..72d18c554 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/ql/migration/33_create_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS pets; \ No newline at end of file diff --git a/vendor/github.com/mattes/migrate/database/ql/migration/33_create_table.up.sql b/vendor/github.com/mattes/migrate/database/ql/migration/33_create_table.up.sql new file mode 100644 index 000000000..5ad3404d1 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/ql/migration/33_create_table.up.sql @@ -0,0 +1,3 @@ +CREATE TABLE pets ( + name string +); \ No newline at end of file diff --git a/vendor/github.com/mattes/migrate/database/ql/migration/44_alter_table.down.sql b/vendor/github.com/mattes/migrate/database/ql/migration/44_alter_table.down.sql new file mode 100644 index 000000000..72d18c554 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/ql/migration/44_alter_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS pets; \ No newline at end of file diff --git a/vendor/github.com/mattes/migrate/database/ql/migration/44_alter_table.up.sql b/vendor/github.com/mattes/migrate/database/ql/migration/44_alter_table.up.sql new file mode 100644 index 000000000..3993698de --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/ql/migration/44_alter_table.up.sql @@ -0,0 +1 @@ +ALTER TABLE pets ADD predator bool;; \ No newline at end of file diff --git a/vendor/github.com/mattes/migrate/database/ql/ql.go b/vendor/github.com/mattes/migrate/database/ql/ql.go new file mode 100644 index 000000000..46722a9c2 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/ql/ql.go @@ -0,0 +1,212 @@ +package ql + +import ( + "database/sql" + "fmt" + "io" + "io/ioutil" + "strings" + + nurl "net/url" + + _ "github.com/cznic/ql/driver" + "github.com/mattes/migrate" + "github.com/mattes/migrate/database" +) + +func init() { + database.Register("ql", &Ql{}) +} + +var DefaultMigrationsTable = "schema_migrations" +var ( + ErrDatabaseDirty = fmt.Errorf("database is dirty") + ErrNilConfig = fmt.Errorf("no config") + ErrNoDatabaseName = fmt.Errorf("no database name") + ErrAppendPEM = fmt.Errorf("failed to append PEM") +) + +type Config struct { + MigrationsTable string + DatabaseName string +} + +type Ql struct { + db *sql.DB + isLocked bool + + config *Config +} + +func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { + if config == nil { + return nil, ErrNilConfig + } + + if err := instance.Ping(); err != nil { + return nil, err + } + if len(config.MigrationsTable) == 0 { + config.MigrationsTable = DefaultMigrationsTable + } + + mx := &Ql{ + db: instance, + config: config, + } + if err := mx.ensureVersionTable(); err != nil { + return nil, err + } + return mx, nil +} +func (m *Ql) ensureVersionTable() error { + tx, err := m.db.Begin() + if err != nil { + return err + } + if _, err := tx.Exec(fmt.Sprintf(` + CREATE TABLE IF NOT EXISTS %s (version uint64,dirty bool); + CREATE UNIQUE INDEX IF NOT EXISTS version_unique ON %s (version); +`, m.config.MigrationsTable, m.config.MigrationsTable)); err != nil { + if err := tx.Rollback(); err != nil { + return err + } + return err + } + if err := tx.Commit(); err != nil { + return err + } + return nil +} + +func (m *Ql) Open(url string) (database.Driver, error) { + purl, err := nurl.Parse(url) + if err != nil { + return nil, err + } + dbfile := strings.Replace(migrate.FilterCustomQuery(purl).String(), "ql://", "", 1) + db, err := sql.Open("ql", dbfile) + if err != nil { + return nil, err + } + migrationsTable := purl.Query().Get("x-migrations-table") + if len(migrationsTable) == 0 { + migrationsTable = DefaultMigrationsTable + } + mx, err := WithInstance(db, &Config{ + DatabaseName: purl.Path, + MigrationsTable: migrationsTable, + }) + if err != nil { + return nil, err + } + return mx, nil +} +func (m *Ql) Close() error { + return m.db.Close() +} +func (m *Ql) Drop() error { + query := `SELECT Name FROM __Table` + tables, err := m.db.Query(query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + defer tables.Close() + tableNames := make([]string, 0) + for tables.Next() { + var tableName string + if err := tables.Scan(&tableName); err != nil { + return err + } + if len(tableName) > 0 { + if strings.HasPrefix(tableName, "__") == false { + tableNames = append(tableNames, tableName) + } + } + } + if len(tableNames) > 0 { + for _, t := range tableNames { + query := "DROP TABLE " + t + err = m.executeQuery(query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + if err := m.ensureVersionTable(); err != nil { + return err + } + } + + return nil +} +func (m *Ql) Lock() error { + if m.isLocked { + return database.ErrLocked + } + m.isLocked = true + return nil +} +func (m *Ql) Unlock() error { + if !m.isLocked { + return nil + } + m.isLocked = false + return nil +} +func (m *Ql) Run(migration io.Reader) error { + migr, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + query := string(migr[:]) + + return m.executeQuery(query) +} +func (m *Ql) executeQuery(query string) error { + tx, err := m.db.Begin() + if err != nil { + return &database.Error{OrigErr: err, Err: "transaction start failed"} + } + if _, err := tx.Exec(query); err != nil { + tx.Rollback() + return &database.Error{OrigErr: err, Query: []byte(query)} + } + if err := tx.Commit(); err != nil { + return &database.Error{OrigErr: err, Err: "transaction commit failed"} + } + return nil +} +func (m *Ql) SetVersion(version int, dirty bool) error { + tx, err := m.db.Begin() + if err != nil { + return &database.Error{OrigErr: err, Err: "transaction start failed"} + } + + query := "TRUNCATE TABLE " + m.config.MigrationsTable + if _, err := tx.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + if version >= 0 { + query := fmt.Sprintf(`INSERT INTO %s (version, dirty) VALUES (%d, %t)`, m.config.MigrationsTable, version, dirty) + if _, err := tx.Exec(query); err != nil { + tx.Rollback() + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + + if err := tx.Commit(); err != nil { + return &database.Error{OrigErr: err, Err: "transaction commit failed"} + } + + return nil +} + +func (m *Ql) Version() (version int, dirty bool, err error) { + query := "SELECT version, dirty FROM " + m.config.MigrationsTable + " LIMIT 1" + err = m.db.QueryRow(query).Scan(&version, &dirty) + if err != nil { + return database.NilVersion, false, nil + } + return version, dirty, nil +} diff --git a/vendor/github.com/mattes/migrate/database/ql/ql_test.go b/vendor/github.com/mattes/migrate/database/ql/ql_test.go new file mode 100644 index 000000000..f04383fa2 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/ql/ql_test.go @@ -0,0 +1,62 @@ +package ql + +import ( + "database/sql" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + _ "github.com/cznic/ql/driver" + "github.com/mattes/migrate" + dt "github.com/mattes/migrate/database/testing" + _ "github.com/mattes/migrate/source/file" +) + +func Test(t *testing.T) { + dir, err := ioutil.TempDir("", "ql-driver-test") + if err != nil { + return + } + defer func() { + os.RemoveAll(dir) + }() + fmt.Printf("DB path : %s\n", filepath.Join(dir, "ql.db")) + p := &Ql{} + addr := fmt.Sprintf("ql://%s", filepath.Join(dir, "ql.db")) + d, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + + db, err := sql.Open("ql", filepath.Join(dir, "ql.db")) + if err != nil { + return + } + defer func() { + if err := db.Close(); err != nil { + return + } + }() + dt.Test(t, d, []byte("CREATE TABLE t (Qty int, Name string);")) + driver, err := WithInstance(db, &Config{}) + if err != nil { + t.Fatalf("%v", err) + } + if err := d.Drop(); err != nil { + t.Fatal(err) + } + + m, err := migrate.NewWithDatabaseInstance( + "file://./migration", + "ql", driver) + if err != nil { + t.Fatalf("%v", err) + } + fmt.Println("UP") + err = m.Up() + if err != nil { + t.Fatalf("%v", err) + } +} diff --git a/vendor/github.com/mattes/migrate/database/redshift/README.md b/vendor/github.com/mattes/migrate/database/redshift/README.md new file mode 100644 index 000000000..a03d109ae --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/redshift/README.md @@ -0,0 +1,6 @@ +Redshift +=== + +This provides a Redshift driver for migrations. It is used whenever the URL of the database starts with `redshift://`. + +Redshift is PostgreSQL compatible but has some specific features (or lack thereof) that require slightly different behavior. diff --git a/vendor/github.com/mattes/migrate/database/redshift/redshift.go b/vendor/github.com/mattes/migrate/database/redshift/redshift.go new file mode 100644 index 000000000..99cdde725 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/redshift/redshift.go @@ -0,0 +1,46 @@ +package redshift + +import ( + "net/url" + + "github.com/mattes/migrate/database" + "github.com/mattes/migrate/database/postgres" +) + +// init registers the driver under the name 'redshift' +func init() { + db := new(Redshift) + db.Driver = new(postgres.Postgres) + + database.Register("redshift", db) +} + +// Redshift is a wrapper around the PostgreSQL driver which implements Redshift-specific behavior. +// +// Currently, the only different behaviour is the lack of locking in Redshift. The (Un)Lock() method(s) have been overridden from the PostgreSQL adapter to simply return nil. +type Redshift struct { + // The wrapped PostgreSQL driver. + database.Driver +} + +// Open implements the database.Driver interface by parsing the URL, switching the scheme from "redshift" to "postgres", and delegating to the underlying PostgreSQL driver. +func (driver *Redshift) Open(dsn string) (database.Driver, error) { + parsed, err := url.Parse(dsn) + if err != nil { + return nil, err + } + + parsed.Scheme = "postgres" + psql, err := driver.Driver.Open(parsed.String()) + if err != nil { + return nil, err + } + + return &Redshift{Driver: psql}, nil +} + +// Lock implements the database.Driver interface by not locking and returning nil. +func (driver *Redshift) Lock() error { return nil } + +// Unlock implements the database.Driver interface by not unlocking and returning nil. +func (driver *Redshift) Unlock() error { return nil } diff --git a/vendor/github.com/mattes/migrate/database/shell/README.md b/vendor/github.com/mattes/migrate/database/shell/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/mattes/migrate/database/spanner/README.md b/vendor/github.com/mattes/migrate/database/spanner/README.md new file mode 100644 index 000000000..0de867a8d --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/spanner/README.md @@ -0,0 +1,35 @@ +# Google Cloud Spanner + +## Usage + +The DSN must be given in the following format. + +`spanner://projects/{projectId}/instances/{instanceId}/databases/{databaseName}` + +See [Google Spanner Documentation](https://cloud.google.com/spanner/docs) for details. + + +| Param | WithInstance Config | Description | +| ----- | ------------------- | ----------- | +| `x-migrations-table` | `MigrationsTable` | Name of the migrations table | +| `url` | `DatabaseName` | The full path to the Spanner database resource. If provided as part of `Config` it must not contain a scheme or query string to match the format `projects/{projectId}/instances/{instanceId}/databases/{databaseName}`| +| `projectId` || The Google Cloud Platform project id +| `instanceId` || The id of the instance running Spanner +| `databaseName` || The name of the Spanner database + + +> **Note:** Google Cloud Spanner migrations can take a considerable amount of +> time. The migrations provided as part of the example take about 6 minutes to +> run on a small instance. +> +> ```log +> 1481574547/u create_users_table (21.354507597s) +> 1496539702/u add_city_to_users (41.647359754s) +> 1496601752/u add_index_on_user_emails (2m12.155787369s) +> 1496602638/u create_books_table (2m30.77299181s) + +## Testing + +To unit test the `spanner` driver, `SPANNER_DATABASE` needs to be set. You'll +need to sign-up to Google Cloud Platform (GCP) and have a running Spanner +instance since it is not possible to run Google Spanner outside GCP. \ No newline at end of file diff --git a/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1481574547_create_users_table.down.sql b/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1481574547_create_users_table.down.sql new file mode 100644 index 000000000..7bd522c12 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1481574547_create_users_table.down.sql @@ -0,0 +1 @@ +DROP TABLE Users diff --git a/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1481574547_create_users_table.up.sql b/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1481574547_create_users_table.up.sql new file mode 100644 index 000000000..97b8bdb74 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1481574547_create_users_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE Users ( + UserId INT64, + Name STRING(40), + Email STRING(83) +) PRIMARY KEY(UserId) \ No newline at end of file diff --git a/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.down.sql b/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.down.sql new file mode 100644 index 000000000..f0fcd0854 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.down.sql @@ -0,0 +1 @@ +ALTER TABLE Users DROP COLUMN city \ No newline at end of file diff --git a/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.up.sql b/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.up.sql new file mode 100644 index 000000000..b2d6c02bf --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.up.sql @@ -0,0 +1 @@ +ALTER TABLE Users ADD COLUMN city STRING(100) \ No newline at end of file diff --git a/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.down.sql b/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.down.sql new file mode 100644 index 000000000..29f92559d --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.down.sql @@ -0,0 +1 @@ +DROP INDEX UsersEmailIndex diff --git a/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.up.sql b/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.up.sql new file mode 100644 index 000000000..e77b7f2db --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.up.sql @@ -0,0 +1 @@ +CREATE UNIQUE INDEX UsersEmailIndex ON Users (Email) diff --git a/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1496602638_create_books_table.down.sql b/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1496602638_create_books_table.down.sql new file mode 100644 index 000000000..bd2ce054c --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1496602638_create_books_table.down.sql @@ -0,0 +1 @@ +DROP TABLE Books \ No newline at end of file diff --git a/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1496602638_create_books_table.up.sql b/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1496602638_create_books_table.up.sql new file mode 100644 index 000000000..0bfa0d484 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/spanner/examples/migrations/1496602638_create_books_table.up.sql @@ -0,0 +1,6 @@ +CREATE TABLE Books ( + UserId INT64, + Name STRING(40), + Author STRING(40) +) PRIMARY KEY(UserId, Name), +INTERLEAVE IN PARENT Users ON DELETE CASCADE diff --git a/vendor/github.com/mattes/migrate/database/spanner/spanner.go b/vendor/github.com/mattes/migrate/database/spanner/spanner.go new file mode 100644 index 000000000..6c65bab3f --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/spanner/spanner.go @@ -0,0 +1,294 @@ +package spanner + +import ( + "fmt" + "io" + "io/ioutil" + "log" + nurl "net/url" + "regexp" + "strings" + + "golang.org/x/net/context" + + "cloud.google.com/go/spanner" + sdb "cloud.google.com/go/spanner/admin/database/apiv1" + + "github.com/mattes/migrate" + "github.com/mattes/migrate/database" + + "google.golang.org/api/iterator" + adminpb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" +) + +func init() { + db := Spanner{} + database.Register("spanner", &db) +} + +// DefaultMigrationsTable is used if no custom table is specified +const DefaultMigrationsTable = "SchemaMigrations" + +// Driver errors +var ( + ErrNilConfig = fmt.Errorf("no config") + ErrNoDatabaseName = fmt.Errorf("no database name") + ErrNoSchema = fmt.Errorf("no schema") + ErrDatabaseDirty = fmt.Errorf("database is dirty") +) + +// Config used for a Spanner instance +type Config struct { + MigrationsTable string + DatabaseName string +} + +// Spanner implements database.Driver for Google Cloud Spanner +type Spanner struct { + db *DB + + config *Config +} + +type DB struct { + admin *sdb.DatabaseAdminClient + data *spanner.Client +} + +// WithInstance implements database.Driver +func WithInstance(instance *DB, config *Config) (database.Driver, error) { + if config == nil { + return nil, ErrNilConfig + } + + if len(config.DatabaseName) == 0 { + return nil, ErrNoDatabaseName + } + + if len(config.MigrationsTable) == 0 { + config.MigrationsTable = DefaultMigrationsTable + } + + sx := &Spanner{ + db: instance, + config: config, + } + + if err := sx.ensureVersionTable(); err != nil { + return nil, err + } + + return sx, nil +} + +// Open implements database.Driver +func (s *Spanner) Open(url string) (database.Driver, error) { + purl, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + ctx := context.Background() + + adminClient, err := sdb.NewDatabaseAdminClient(ctx) + if err != nil { + return nil, err + } + dbname := strings.Replace(migrate.FilterCustomQuery(purl).String(), "spanner://", "", 1) + dataClient, err := spanner.NewClient(ctx, dbname) + if err != nil { + log.Fatal(err) + } + + migrationsTable := purl.Query().Get("x-migrations-table") + if len(migrationsTable) == 0 { + migrationsTable = DefaultMigrationsTable + } + + db := &DB{admin: adminClient, data: dataClient} + return WithInstance(db, &Config{ + DatabaseName: dbname, + MigrationsTable: migrationsTable, + }) +} + +// Close implements database.Driver +func (s *Spanner) Close() error { + s.db.data.Close() + return s.db.admin.Close() +} + +// Lock implements database.Driver but doesn't do anything because Spanner only +// enqueues the UpdateDatabaseDdlRequest. +func (s *Spanner) Lock() error { + return nil +} + +// Unlock implements database.Driver but no action required, see Lock. +func (s *Spanner) Unlock() error { + return nil +} + +// Run implements database.Driver +func (s *Spanner) Run(migration io.Reader) error { + migr, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + + // run migration + stmts := migrationStatements(migr) + ctx := context.Background() + + op, err := s.db.admin.UpdateDatabaseDdl(ctx, &adminpb.UpdateDatabaseDdlRequest{ + Database: s.config.DatabaseName, + Statements: stmts, + }) + + if err != nil { + return &database.Error{OrigErr: err, Err: "migration failed", Query: migr} + } + + if err := op.Wait(ctx); err != nil { + return &database.Error{OrigErr: err, Err: "migration failed", Query: migr} + } + + return nil +} + +// SetVersion implements database.Driver +func (s *Spanner) SetVersion(version int, dirty bool) error { + ctx := context.Background() + + _, err := s.db.data.ReadWriteTransaction(ctx, + func(ctx context.Context, txn *spanner.ReadWriteTransaction) error { + m := []*spanner.Mutation{ + spanner.Delete(s.config.MigrationsTable, spanner.AllKeys()), + spanner.Insert(s.config.MigrationsTable, + []string{"Version", "Dirty"}, + []interface{}{version, dirty}, + )} + return txn.BufferWrite(m) + }) + if err != nil { + return &database.Error{OrigErr: err} + } + + return nil +} + +// Version implements database.Driver +func (s *Spanner) Version() (version int, dirty bool, err error) { + ctx := context.Background() + + stmt := spanner.Statement{ + SQL: `SELECT Version, Dirty FROM ` + s.config.MigrationsTable + ` LIMIT 1`, + } + iter := s.db.data.Single().Query(ctx, stmt) + defer iter.Stop() + + row, err := iter.Next() + switch err { + case iterator.Done: + return database.NilVersion, false, nil + case nil: + var v int64 + if err = row.Columns(&v, &dirty); err != nil { + return 0, false, &database.Error{OrigErr: err, Query: []byte(stmt.SQL)} + } + version = int(v) + default: + return 0, false, &database.Error{OrigErr: err, Query: []byte(stmt.SQL)} + } + + return version, dirty, nil +} + +// Drop implements database.Driver. Retrieves the database schema first and +// creates statements to drop the indexes and tables accordingly. +// Note: The drop statements are created in reverse order to how they're +// provided in the schema. Assuming the schema describes how the database can +// be "build up", it seems logical to "unbuild" the database simply by going the +// opposite direction. More testing +func (s *Spanner) Drop() error { + ctx := context.Background() + res, err := s.db.admin.GetDatabaseDdl(ctx, &adminpb.GetDatabaseDdlRequest{ + Database: s.config.DatabaseName, + }) + if err != nil { + return &database.Error{OrigErr: err, Err: "drop failed"} + } + if len(res.Statements) == 0 { + return nil + } + + r := regexp.MustCompile(`(CREATE TABLE\s(\S+)\s)|(CREATE.+INDEX\s(\S+)\s)`) + stmts := make([]string, 0) + for i := len(res.Statements) - 1; i >= 0; i-- { + s := res.Statements[i] + m := r.FindSubmatch([]byte(s)) + + if len(m) == 0 { + continue + } else if tbl := m[2]; len(tbl) > 0 { + stmts = append(stmts, fmt.Sprintf(`DROP TABLE %s`, tbl)) + } else if idx := m[4]; len(idx) > 0 { + stmts = append(stmts, fmt.Sprintf(`DROP INDEX %s`, idx)) + } + } + + op, err := s.db.admin.UpdateDatabaseDdl(ctx, &adminpb.UpdateDatabaseDdlRequest{ + Database: s.config.DatabaseName, + Statements: stmts, + }) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(strings.Join(stmts, "; "))} + } + if err := op.Wait(ctx); err != nil { + return &database.Error{OrigErr: err, Query: []byte(strings.Join(stmts, "; "))} + } + + if err := s.ensureVersionTable(); err != nil { + return err + } + + return nil +} + +func (s *Spanner) ensureVersionTable() error { + ctx := context.Background() + tbl := s.config.MigrationsTable + iter := s.db.data.Single().Read(ctx, tbl, spanner.AllKeys(), []string{"Version"}) + if err := iter.Do(func(r *spanner.Row) error { return nil }); err == nil { + return nil + } + + stmt := fmt.Sprintf(`CREATE TABLE %s ( + Version INT64 NOT NULL, + Dirty BOOL NOT NULL + ) PRIMARY KEY(Version)`, tbl) + + op, err := s.db.admin.UpdateDatabaseDdl(ctx, &adminpb.UpdateDatabaseDdlRequest{ + Database: s.config.DatabaseName, + Statements: []string{stmt}, + }) + + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(stmt)} + } + if err := op.Wait(ctx); err != nil { + return &database.Error{OrigErr: err, Query: []byte(stmt)} + } + + return nil +} + +func migrationStatements(migration []byte) []string { + regex := regexp.MustCompile(";$") + migrationString := string(migration[:]) + migrationString = strings.TrimSpace(migrationString) + migrationString = regex.ReplaceAllString(migrationString, "") + + statements := strings.Split(migrationString, ";") + return statements +} diff --git a/vendor/github.com/mattes/migrate/database/spanner/spanner_test.go b/vendor/github.com/mattes/migrate/database/spanner/spanner_test.go new file mode 100644 index 000000000..43d475ca4 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/spanner/spanner_test.go @@ -0,0 +1,28 @@ +package spanner + +import ( + "fmt" + "os" + "testing" + + dt "github.com/mattes/migrate/database/testing" +) + +func Test(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + db, ok := os.LookupEnv("SPANNER_DATABASE") + if !ok { + t.Skip("SPANNER_DATABASE not set, skipping test.") + } + + s := &Spanner{} + addr := fmt.Sprintf("spanner://%v", db) + d, err := s.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + dt.Test(t, d, []byte("SELECT 1")) +} diff --git a/vendor/github.com/mattes/migrate/database/sqlite3/README.md b/vendor/github.com/mattes/migrate/database/sqlite3/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/mattes/migrate/database/sqlite3/migration/33_create_table.down.sql b/vendor/github.com/mattes/migrate/database/sqlite3/migration/33_create_table.down.sql new file mode 100644 index 000000000..72d18c554 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/sqlite3/migration/33_create_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS pets; \ No newline at end of file diff --git a/vendor/github.com/mattes/migrate/database/sqlite3/migration/33_create_table.up.sql b/vendor/github.com/mattes/migrate/database/sqlite3/migration/33_create_table.up.sql new file mode 100644 index 000000000..5ad3404d1 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/sqlite3/migration/33_create_table.up.sql @@ -0,0 +1,3 @@ +CREATE TABLE pets ( + name string +); \ No newline at end of file diff --git a/vendor/github.com/mattes/migrate/database/sqlite3/migration/44_alter_table.down.sql b/vendor/github.com/mattes/migrate/database/sqlite3/migration/44_alter_table.down.sql new file mode 100644 index 000000000..72d18c554 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/sqlite3/migration/44_alter_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS pets; \ No newline at end of file diff --git a/vendor/github.com/mattes/migrate/database/sqlite3/migration/44_alter_table.up.sql b/vendor/github.com/mattes/migrate/database/sqlite3/migration/44_alter_table.up.sql new file mode 100644 index 000000000..f0682fcca --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/sqlite3/migration/44_alter_table.up.sql @@ -0,0 +1 @@ +ALTER TABLE pets ADD predator bool; diff --git a/vendor/github.com/mattes/migrate/database/sqlite3/sqlite3.go b/vendor/github.com/mattes/migrate/database/sqlite3/sqlite3.go new file mode 100644 index 000000000..bfd1a5b81 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/sqlite3/sqlite3.go @@ -0,0 +1,214 @@ +package sqlite3 + +import ( + "database/sql" + "fmt" + "github.com/mattes/migrate" + "github.com/mattes/migrate/database" + _ "github.com/mattn/go-sqlite3" + "io" + "io/ioutil" + nurl "net/url" + "strings" +) + +func init() { + database.Register("sqlite3", &Sqlite{}) +} + +var DefaultMigrationsTable = "schema_migrations" +var ( + ErrDatabaseDirty = fmt.Errorf("database is dirty") + ErrNilConfig = fmt.Errorf("no config") + ErrNoDatabaseName = fmt.Errorf("no database name") +) + +type Config struct { + MigrationsTable string + DatabaseName string +} + +type Sqlite struct { + db *sql.DB + isLocked bool + + config *Config +} + +func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { + if config == nil { + return nil, ErrNilConfig + } + + if err := instance.Ping(); err != nil { + return nil, err + } + if len(config.MigrationsTable) == 0 { + config.MigrationsTable = DefaultMigrationsTable + } + + mx := &Sqlite{ + db: instance, + config: config, + } + if err := mx.ensureVersionTable(); err != nil { + return nil, err + } + return mx, nil +} + +func (m *Sqlite) ensureVersionTable() error { + + query := fmt.Sprintf(` + CREATE TABLE IF NOT EXISTS %s (version uint64,dirty bool); + CREATE UNIQUE INDEX IF NOT EXISTS version_unique ON %s (version); + `, DefaultMigrationsTable, DefaultMigrationsTable) + + if _, err := m.db.Exec(query); err != nil { + return err + } + return nil +} + +func (m *Sqlite) Open(url string) (database.Driver, error) { + purl, err := nurl.Parse(url) + if err != nil { + return nil, err + } + dbfile := strings.Replace(migrate.FilterCustomQuery(purl).String(), "sqlite3://", "", 1) + db, err := sql.Open("sqlite3", dbfile) + if err != nil { + return nil, err + } + + migrationsTable := purl.Query().Get("x-migrations-table") + if len(migrationsTable) == 0 { + migrationsTable = DefaultMigrationsTable + } + mx, err := WithInstance(db, &Config{ + DatabaseName: purl.Path, + MigrationsTable: migrationsTable, + }) + if err != nil { + return nil, err + } + return mx, nil +} + +func (m *Sqlite) Close() error { + return m.db.Close() +} + +func (m *Sqlite) Drop() error { + query := `SELECT name FROM sqlite_master WHERE type = 'table';` + tables, err := m.db.Query(query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + defer tables.Close() + tableNames := make([]string, 0) + for tables.Next() { + var tableName string + if err := tables.Scan(&tableName); err != nil { + return err + } + if len(tableName) > 0 { + tableNames = append(tableNames, tableName) + } + } + if len(tableNames) > 0 { + for _, t := range tableNames { + query := "DROP TABLE " + t + err = m.executeQuery(query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + if err := m.ensureVersionTable(); err != nil { + return err + } + query := "VACUUM" + _, err = m.db.Query(query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + + return nil +} + +func (m *Sqlite) Lock() error { + if m.isLocked { + return database.ErrLocked + } + m.isLocked = true + return nil +} + +func (m *Sqlite) Unlock() error { + if !m.isLocked { + return nil + } + m.isLocked = false + return nil +} + +func (m *Sqlite) Run(migration io.Reader) error { + migr, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + query := string(migr[:]) + + return m.executeQuery(query) +} + +func (m *Sqlite) executeQuery(query string) error { + tx, err := m.db.Begin() + if err != nil { + return &database.Error{OrigErr: err, Err: "transaction start failed"} + } + if _, err := tx.Exec(query); err != nil { + tx.Rollback() + return &database.Error{OrigErr: err, Query: []byte(query)} + } + if err := tx.Commit(); err != nil { + return &database.Error{OrigErr: err, Err: "transaction commit failed"} + } + return nil +} + +func (m *Sqlite) SetVersion(version int, dirty bool) error { + tx, err := m.db.Begin() + if err != nil { + return &database.Error{OrigErr: err, Err: "transaction start failed"} + } + + query := "DELETE FROM " + m.config.MigrationsTable + if _, err := tx.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + if version >= 0 { + query := fmt.Sprintf(`INSERT INTO %s (version, dirty) VALUES (%d, '%t')`, m.config.MigrationsTable, version, dirty) + if _, err := tx.Exec(query); err != nil { + tx.Rollback() + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + + if err := tx.Commit(); err != nil { + return &database.Error{OrigErr: err, Err: "transaction commit failed"} + } + + return nil +} + +func (m *Sqlite) Version() (version int, dirty bool, err error) { + query := "SELECT version, dirty FROM " + m.config.MigrationsTable + " LIMIT 1" + err = m.db.QueryRow(query).Scan(&version, &dirty) + if err != nil { + return database.NilVersion, false, nil + } + return version, dirty, nil +} diff --git a/vendor/github.com/mattes/migrate/database/sqlite3/sqlite3_test.go b/vendor/github.com/mattes/migrate/database/sqlite3/sqlite3_test.go new file mode 100644 index 000000000..6a5c5c864 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/sqlite3/sqlite3_test.go @@ -0,0 +1,61 @@ +package sqlite3 + +import ( + "database/sql" + "fmt" + "github.com/mattes/migrate" + dt "github.com/mattes/migrate/database/testing" + _ "github.com/mattes/migrate/source/file" + _ "github.com/mattn/go-sqlite3" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func Test(t *testing.T) { + dir, err := ioutil.TempDir("", "sqlite3-driver-test") + if err != nil { + return + } + defer func() { + os.RemoveAll(dir) + }() + fmt.Printf("DB path : %s\n", filepath.Join(dir, "sqlite3.db")) + p := &Sqlite{} + addr := fmt.Sprintf("sqlite3://%s", filepath.Join(dir, "sqlite3.db")) + d, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + + db, err := sql.Open("sqlite3", filepath.Join(dir, "sqlite3.db")) + if err != nil { + return + } + defer func() { + if err := db.Close(); err != nil { + return + } + }() + dt.Test(t, d, []byte("CREATE TABLE t (Qty int, Name string);")) + driver, err := WithInstance(db, &Config{}) + if err != nil { + t.Fatalf("%v", err) + } + if err := d.Drop(); err != nil { + t.Fatal(err) + } + + m, err := migrate.NewWithDatabaseInstance( + "file://./migration", + "ql", driver) + if err != nil { + t.Fatalf("%v", err) + } + fmt.Println("UP") + err = m.Up() + if err != nil { + t.Fatalf("%v", err) + } +} diff --git a/vendor/github.com/mattes/migrate/database/stub/stub.go b/vendor/github.com/mattes/migrate/database/stub/stub.go new file mode 100644 index 000000000..172bcd37b --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/stub/stub.go @@ -0,0 +1,95 @@ +package stub + +import ( + "io" + "io/ioutil" + "reflect" + + "github.com/mattes/migrate/database" +) + +func init() { + database.Register("stub", &Stub{}) +} + +type Stub struct { + Url string + Instance interface{} + CurrentVersion int + MigrationSequence []string + LastRunMigration []byte // todo: make []string + IsDirty bool + IsLocked bool + + Config *Config +} + +func (s *Stub) Open(url string) (database.Driver, error) { + return &Stub{ + Url: url, + CurrentVersion: -1, + MigrationSequence: make([]string, 0), + Config: &Config{}, + }, nil +} + +type Config struct{} + +func WithInstance(instance interface{}, config *Config) (database.Driver, error) { + return &Stub{ + Instance: instance, + CurrentVersion: -1, + MigrationSequence: make([]string, 0), + Config: config, + }, nil +} + +func (s *Stub) Close() error { + return nil +} + +func (s *Stub) Lock() error { + if s.IsLocked { + return database.ErrLocked + } + s.IsLocked = true + return nil +} + +func (s *Stub) Unlock() error { + s.IsLocked = false + return nil +} + +func (s *Stub) Run(migration io.Reader) error { + m, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + s.LastRunMigration = m + s.MigrationSequence = append(s.MigrationSequence, string(m[:])) + return nil +} + +func (s *Stub) SetVersion(version int, state bool) error { + s.CurrentVersion = version + s.IsDirty = state + return nil +} + +func (s *Stub) Version() (version int, dirty bool, err error) { + return s.CurrentVersion, s.IsDirty, nil +} + +const DROP = "DROP" + +func (s *Stub) Drop() error { + s.CurrentVersion = -1 + s.LastRunMigration = nil + s.MigrationSequence = append(s.MigrationSequence, DROP) + return nil +} + +func (s *Stub) EqualSequence(seq []string) bool { + return reflect.DeepEqual(seq, s.MigrationSequence) +} diff --git a/vendor/github.com/mattes/migrate/database/stub/stub_test.go b/vendor/github.com/mattes/migrate/database/stub/stub_test.go new file mode 100644 index 000000000..3d8b8926c --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/stub/stub_test.go @@ -0,0 +1,16 @@ +package stub + +import ( + "testing" + + dt "github.com/mattes/migrate/database/testing" +) + +func Test(t *testing.T) { + s := &Stub{} + d, err := s.Open("") + if err != nil { + t.Fatal(err) + } + dt.Test(t, d, []byte("/* foobar migration */")) +} diff --git a/vendor/github.com/mattes/migrate/database/testing/testing.go b/vendor/github.com/mattes/migrate/database/testing/testing.go new file mode 100644 index 000000000..4ab090d1a --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/testing/testing.go @@ -0,0 +1,138 @@ +// Package testing has the database tests. +// All database drivers must pass the Test function. +// This lives in it's own package so it stays a test dependency. +package testing + +import ( + "bytes" + "fmt" + "io" + "testing" + "time" + + "github.com/mattes/migrate/database" +) + +// Test runs tests against database implementations. +func Test(t *testing.T, d database.Driver, migration []byte) { + if migration == nil { + panic("test must provide migration reader") + } + + TestNilVersion(t, d) // test first + TestLockAndUnlock(t, d) + TestRun(t, d, bytes.NewReader(migration)) + TestDrop(t, d) + TestSetVersion(t, d) // also tests Version() +} + +func TestNilVersion(t *testing.T, d database.Driver) { + v, _, err := d.Version() + if err != nil { + t.Fatal(err) + } + if v != database.NilVersion { + t.Fatalf("Version: expected version to be NilVersion (-1), got %v", v) + } +} + +func TestLockAndUnlock(t *testing.T, d database.Driver) { + // add a timeout, in case there is a deadlock + done := make(chan bool, 1) + go func() { + timeout := time.After(15 * time.Second) + for { + select { + case <-done: + return + case <-timeout: + panic(fmt.Sprintf("Timeout after 15 seconds. Looks like a deadlock in Lock/UnLock.\n%#v", d)) + } + } + }() + defer func() { + done <- true + }() + + // run the locking test ... + + if err := d.Lock(); err != nil { + t.Fatal(err) + } + + // try to acquire lock again + if err := d.Lock(); err == nil { + t.Fatal("Lock: expected err not to be nil") + } + + // unlock + if err := d.Unlock(); err != nil { + t.Fatal(err) + } + + // try to lock + if err := d.Lock(); err != nil { + t.Fatal(err) + } + if err := d.Unlock(); err != nil { + t.Fatal(err) + } +} + +func TestRun(t *testing.T, d database.Driver, migration io.Reader) { + if migration == nil { + panic("migration can't be nil") + } + + if err := d.Run(migration); err != nil { + t.Fatal(err) + } +} + +func TestDrop(t *testing.T, d database.Driver) { + if err := d.Drop(); err != nil { + t.Fatal(err) + } +} + +func TestSetVersion(t *testing.T, d database.Driver) { + if err := d.SetVersion(1, true); err != nil { + t.Fatal(err) + } + + // call again + if err := d.SetVersion(1, true); err != nil { + t.Fatal(err) + } + + v, dirty, err := d.Version() + if err != nil { + t.Fatal(err) + } + if !dirty { + t.Fatal("expected dirty") + } + if v != 1 { + t.Fatal("expected version to be 1") + } + + if err := d.SetVersion(2, false); err != nil { + t.Fatal(err) + } + + // call again + if err := d.SetVersion(2, false); err != nil { + t.Fatal(err) + } + + v, dirty, err = d.Version() + if err != nil { + t.Fatal(err) + } + if dirty { + t.Fatal("expected not dirty") + } + if v != 2 { + t.Fatal("expected version to be 2") + } +} diff --git a/vendor/github.com/mattes/migrate/database/util.go b/vendor/github.com/mattes/migrate/database/util.go new file mode 100644 index 000000000..c636a7abe --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/util.go @@ -0,0 +1,15 @@ +package database + +import ( + "fmt" + "hash/crc32" +) + +const advisoryLockIdSalt uint = 1486364155 + +// inspired by rails migrations, see https://goo.gl/8o9bCT +func GenerateAdvisoryLockId(databaseName string) (string, error) { + sum := crc32.ChecksumIEEE([]byte(databaseName)) + sum = sum * uint32(advisoryLockIdSalt) + return fmt.Sprintf("%v", sum), nil +} diff --git a/vendor/github.com/mattes/migrate/database/util_test.go b/vendor/github.com/mattes/migrate/database/util_test.go new file mode 100644 index 000000000..905c840b9 --- /dev/null +++ b/vendor/github.com/mattes/migrate/database/util_test.go @@ -0,0 +1,12 @@ +package database + +func TestGenerateAdvisoryLockId(t *testing.T) { + id, err := p.generateAdvisoryLockId("database_name") + if err != nil { + t.Errorf("expected err to be nil, got %v", err) + } + if len(id) == 0 { + t.Errorf("expected generated id not to be empty") + } + t.Logf("generated id: %v", id) +} diff --git a/vendor/github.com/mattes/migrate/log.go b/vendor/github.com/mattes/migrate/log.go new file mode 100644 index 000000000..cb00b7798 --- /dev/null +++ b/vendor/github.com/mattes/migrate/log.go @@ -0,0 +1,12 @@ +package migrate + +// Logger is an interface so you can pass in your own +// logging implementation. +type Logger interface { + + // Printf is like fmt.Printf + Printf(format string, v ...interface{}) + + // Verbose should return true when verbose logging output is wanted + Verbose() bool +} diff --git a/vendor/github.com/mattes/migrate/migrate.go b/vendor/github.com/mattes/migrate/migrate.go new file mode 100644 index 000000000..58414e8fc --- /dev/null +++ b/vendor/github.com/mattes/migrate/migrate.go @@ -0,0 +1,920 @@ +// Package migrate reads migrations from sources and runs them against databases. +// Sources are defined by the `source.Driver` and databases by the `database.Driver` +// interface. The driver interfaces are kept "dump", all migration logic is kept +// in this package. +package migrate + +import ( + "fmt" + "os" + "sync" + "time" + + "github.com/mattes/migrate/database" + "github.com/mattes/migrate/source" +) + +// DefaultPrefetchMigrations sets the number of migrations to pre-read +// from the source. This is helpful if the source is remote, but has little +// effect for a local source (i.e. file system). +// Please note that this setting has a major impact on the memory usage, +// since each pre-read migration is buffered in memory. See DefaultBufferSize. +var DefaultPrefetchMigrations = uint(10) + +// DefaultLockTimeout sets the max time a database driver has to acquire a lock. +var DefaultLockTimeout = 15 * time.Second + +var ( + ErrNoChange = fmt.Errorf("no change") + ErrNilVersion = fmt.Errorf("no migration") + ErrLocked = fmt.Errorf("database locked") + ErrLockTimeout = fmt.Errorf("timeout: can't acquire database lock") +) + +// ErrShortLimit is an error returned when not enough migrations +// can be returned by a source for a given limit. +type ErrShortLimit struct { + Short uint +} + +// Error implements the error interface. +func (e ErrShortLimit) Error() string { + return fmt.Sprintf("limit %v short", e.Short) +} + +type ErrDirty struct { + Version int +} + +func (e ErrDirty) Error() string { + return fmt.Sprintf("Dirty database version %v. Fix and force version.", e.Version) +} + +type Migrate struct { + sourceName string + sourceDrv source.Driver + databaseName string + databaseDrv database.Driver + + // Log accepts a Logger interface + Log Logger + + // GracefulStop accepts `true` and will stop executing migrations + // as soon as possible at a safe break point, so that the database + // is not corrupted. + GracefulStop chan bool + isGracefulStop bool + + isLockedMu *sync.Mutex + isLocked bool + + // PrefetchMigrations defaults to DefaultPrefetchMigrations, + // but can be set per Migrate instance. + PrefetchMigrations uint + + // LockTimeout defaults to DefaultLockTimeout, + // but can be set per Migrate instance. + LockTimeout time.Duration +} + +// New returns a new Migrate instance from a source URL and a database URL. +// The URL scheme is defined by each driver. +func New(sourceUrl, databaseUrl string) (*Migrate, error) { + m := newCommon() + + sourceName, err := schemeFromUrl(sourceUrl) + if err != nil { + return nil, err + } + m.sourceName = sourceName + + databaseName, err := schemeFromUrl(databaseUrl) + if err != nil { + return nil, err + } + m.databaseName = databaseName + + sourceDrv, err := source.Open(sourceUrl) + if err != nil { + return nil, err + } + m.sourceDrv = sourceDrv + + databaseDrv, err := database.Open(databaseUrl) + if err != nil { + return nil, err + } + m.databaseDrv = databaseDrv + + return m, nil +} + +// NewWithDatabaseInstance returns a new Migrate instance from a source URL +// and an existing database instance. The source URL scheme is defined by each driver. +// Use any string that can serve as an identifier during logging as databaseName. +// You are responsible for closing the underlying database client if necessary. +func NewWithDatabaseInstance(sourceUrl string, databaseName string, databaseInstance database.Driver) (*Migrate, error) { + m := newCommon() + + sourceName, err := schemeFromUrl(sourceUrl) + if err != nil { + return nil, err + } + m.sourceName = sourceName + + m.databaseName = databaseName + + sourceDrv, err := source.Open(sourceUrl) + if err != nil { + return nil, err + } + m.sourceDrv = sourceDrv + + m.databaseDrv = databaseInstance + + return m, nil +} + +// NewWithSourceInstance returns a new Migrate instance from an existing source instance +// and a database URL. The database URL scheme is defined by each driver. +// Use any string that can serve as an identifier during logging as sourceName. +// You are responsible for closing the underlying source client if necessary. +func NewWithSourceInstance(sourceName string, sourceInstance source.Driver, databaseUrl string) (*Migrate, error) { + m := newCommon() + + databaseName, err := schemeFromUrl(databaseUrl) + if err != nil { + return nil, err + } + m.databaseName = databaseName + + m.sourceName = sourceName + + databaseDrv, err := database.Open(databaseUrl) + if err != nil { + return nil, err + } + m.databaseDrv = databaseDrv + + m.sourceDrv = sourceInstance + + return m, nil +} + +// NewWithInstance returns a new Migrate instance from an existing source and +// database instance. Use any string that can serve as an identifier during logging +// as sourceName and databaseName. You are responsible for closing down +// the underlying source and database client if necessary. +func NewWithInstance(sourceName string, sourceInstance source.Driver, databaseName string, databaseInstance database.Driver) (*Migrate, error) { + m := newCommon() + + m.sourceName = sourceName + m.databaseName = databaseName + + m.sourceDrv = sourceInstance + m.databaseDrv = databaseInstance + + return m, nil +} + +func newCommon() *Migrate { + return &Migrate{ + GracefulStop: make(chan bool, 1), + PrefetchMigrations: DefaultPrefetchMigrations, + LockTimeout: DefaultLockTimeout, + isLockedMu: &sync.Mutex{}, + } +} + +// Close closes the the source and the database. +func (m *Migrate) Close() (source error, database error) { + databaseSrvClose := make(chan error) + sourceSrvClose := make(chan error) + + m.logVerbosePrintf("Closing source and database\n") + + go func() { + databaseSrvClose <- m.databaseDrv.Close() + }() + + go func() { + sourceSrvClose <- m.sourceDrv.Close() + }() + + return <-sourceSrvClose, <-databaseSrvClose +} + +// Migrate looks at the currently active migration version, +// then migrates either up or down to the specified version. +func (m *Migrate) Migrate(version uint) error { + if err := m.lock(); err != nil { + return err + } + + curVersion, dirty, err := m.databaseDrv.Version() + if err != nil { + return m.unlockErr(err) + } + + if dirty { + return m.unlockErr(ErrDirty{curVersion}) + } + + ret := make(chan interface{}, m.PrefetchMigrations) + go m.read(curVersion, int(version), ret) + + return m.unlockErr(m.runMigrations(ret)) +} + +// Steps looks at the currently active migration version. +// It will migrate up if n > 0, and down if n < 0. +func (m *Migrate) Steps(n int) error { + if n == 0 { + return ErrNoChange + } + + if err := m.lock(); err != nil { + return err + } + + curVersion, dirty, err := m.databaseDrv.Version() + if err != nil { + return m.unlockErr(err) + } + + if dirty { + return m.unlockErr(ErrDirty{curVersion}) + } + + ret := make(chan interface{}, m.PrefetchMigrations) + + if n > 0 { + go m.readUp(curVersion, n, ret) + } else { + go m.readDown(curVersion, -n, ret) + } + + return m.unlockErr(m.runMigrations(ret)) +} + +// Up looks at the currently active migration version +// and will migrate all the way up (applying all up migrations). +func (m *Migrate) Up() error { + if err := m.lock(); err != nil { + return err + } + + curVersion, dirty, err := m.databaseDrv.Version() + if err != nil { + return m.unlockErr(err) + } + + if dirty { + return m.unlockErr(ErrDirty{curVersion}) + } + + ret := make(chan interface{}, m.PrefetchMigrations) + + go m.readUp(curVersion, -1, ret) + return m.unlockErr(m.runMigrations(ret)) +} + +// Down looks at the currently active migration version +// and will migrate all the way down (applying all down migrations). +func (m *Migrate) Down() error { + if err := m.lock(); err != nil { + return err + } + + curVersion, dirty, err := m.databaseDrv.Version() + if err != nil { + return m.unlockErr(err) + } + + if dirty { + return m.unlockErr(ErrDirty{curVersion}) + } + + ret := make(chan interface{}, m.PrefetchMigrations) + go m.readDown(curVersion, -1, ret) + return m.unlockErr(m.runMigrations(ret)) +} + +// Drop deletes everything in the database. +func (m *Migrate) Drop() error { + if err := m.lock(); err != nil { + return err + } + if err := m.databaseDrv.Drop(); err != nil { + return m.unlockErr(err) + } + return m.unlock() +} + +// Run runs any migration provided by you against the database. +// It does not check any currently active version in database. +// Usually you don't need this function at all. Use Migrate, +// Steps, Up or Down instead. +func (m *Migrate) Run(migration ...*Migration) error { + if len(migration) == 0 { + return ErrNoChange + } + + if err := m.lock(); err != nil { + return err + } + + curVersion, dirty, err := m.databaseDrv.Version() + if err != nil { + return m.unlockErr(err) + } + + if dirty { + return m.unlockErr(ErrDirty{curVersion}) + } + + ret := make(chan interface{}, m.PrefetchMigrations) + + go func() { + defer close(ret) + for _, migr := range migration { + if m.PrefetchMigrations > 0 && migr.Body != nil { + m.logVerbosePrintf("Start buffering %v\n", migr.LogString()) + } else { + m.logVerbosePrintf("Scheduled %v\n", migr.LogString()) + } + + ret <- migr + go migr.Buffer() + } + }() + + return m.unlockErr(m.runMigrations(ret)) +} + +// Force sets a migration version. +// It does not check any currently active version in database. +// It resets the dirty state to false. +func (m *Migrate) Force(version int) error { + if version < -1 { + panic("version must be >= -1") + } + + if err := m.lock(); err != nil { + return err + } + + if err := m.databaseDrv.SetVersion(version, false); err != nil { + return m.unlockErr(err) + } + + return m.unlock() +} + +// Version returns the currently active migration version. +// If no migration has been applied, yet, it will return ErrNilVersion. +func (m *Migrate) Version() (version uint, dirty bool, err error) { + v, d, err := m.databaseDrv.Version() + if err != nil { + return 0, false, err + } + + if v == database.NilVersion { + return 0, false, ErrNilVersion + } + + return suint(v), d, nil +} + +// read reads either up or down migrations from source `from` to `to`. +// Each migration is then written to the ret channel. +// If an error occurs during reading, that error is written to the ret channel, too. +// Once read is done reading it will close the ret channel. +func (m *Migrate) read(from int, to int, ret chan<- interface{}) { + defer close(ret) + + // check if from version exists + if from >= 0 { + if m.versionExists(suint(from)) != nil { + ret <- os.ErrNotExist + return + } + } + + // check if to version exists + if to >= 0 { + if m.versionExists(suint(to)) != nil { + ret <- os.ErrNotExist + return + } + } + + // no change? + if from == to { + ret <- ErrNoChange + return + } + + if from < to { + // it's going up + // apply first migration if from is nil version + if from == -1 { + firstVersion, err := m.sourceDrv.First() + if err != nil { + ret <- err + return + } + + migr, err := m.newMigration(firstVersion, int(firstVersion)) + if err != nil { + ret <- err + return + } + + ret <- migr + go migr.Buffer() + from = int(firstVersion) + } + + // run until we reach target ... + for from < to { + if m.stop() { + return + } + + next, err := m.sourceDrv.Next(suint(from)) + if err != nil { + ret <- err + return + } + + migr, err := m.newMigration(next, int(next)) + if err != nil { + ret <- err + return + } + + ret <- migr + go migr.Buffer() + from = int(next) + } + + } else { + // it's going down + // run until we reach target ... + for from > to && from >= 0 { + if m.stop() { + return + } + + prev, err := m.sourceDrv.Prev(suint(from)) + if os.IsNotExist(err) && to == -1 { + // apply nil migration + migr, err := m.newMigration(suint(from), -1) + if err != nil { + ret <- err + return + } + ret <- migr + go migr.Buffer() + return + + } else if err != nil { + ret <- err + return + } + + migr, err := m.newMigration(suint(from), int(prev)) + if err != nil { + ret <- err + return + } + + ret <- migr + go migr.Buffer() + from = int(prev) + } + } +} + +// readUp reads up migrations from `from` limitted by `limit`. +// limit can be -1, implying no limit and reading until there are no more migrations. +// Each migration is then written to the ret channel. +// If an error occurs during reading, that error is written to the ret channel, too. +// Once readUp is done reading it will close the ret channel. +func (m *Migrate) readUp(from int, limit int, ret chan<- interface{}) { + defer close(ret) + + // check if from version exists + if from >= 0 { + if m.versionExists(suint(from)) != nil { + ret <- os.ErrNotExist + return + } + } + + if limit == 0 { + ret <- ErrNoChange + return + } + + count := 0 + for count < limit || limit == -1 { + if m.stop() { + return + } + + // apply first migration if from is nil version + if from == -1 { + firstVersion, err := m.sourceDrv.First() + if err != nil { + ret <- err + return + } + + migr, err := m.newMigration(firstVersion, int(firstVersion)) + if err != nil { + ret <- err + return + } + + ret <- migr + go migr.Buffer() + from = int(firstVersion) + count++ + continue + } + + // apply next migration + next, err := m.sourceDrv.Next(suint(from)) + if os.IsNotExist(err) { + // no limit, but no migrations applied? + if limit == -1 && count == 0 { + ret <- ErrNoChange + return + } + + // no limit, reached end + if limit == -1 { + return + } + + // reached end, and didn't apply any migrations + if limit > 0 && count == 0 { + ret <- os.ErrNotExist + return + } + + // applied less migrations than limit? + if count < limit { + ret <- ErrShortLimit{suint(limit - count)} + return + } + } + if err != nil { + ret <- err + return + } + + migr, err := m.newMigration(next, int(next)) + if err != nil { + ret <- err + return + } + + ret <- migr + go migr.Buffer() + from = int(next) + count++ + } +} + +// readDown reads down migrations from `from` limitted by `limit`. +// limit can be -1, implying no limit and reading until there are no more migrations. +// Each migration is then written to the ret channel. +// If an error occurs during reading, that error is written to the ret channel, too. +// Once readDown is done reading it will close the ret channel. +func (m *Migrate) readDown(from int, limit int, ret chan<- interface{}) { + defer close(ret) + + // check if from version exists + if from >= 0 { + if m.versionExists(suint(from)) != nil { + ret <- os.ErrNotExist + return + } + } + + if limit == 0 { + ret <- ErrNoChange + return + } + + // no change if already at nil version + if from == -1 && limit == -1 { + ret <- ErrNoChange + return + } + + // can't go over limit if already at nil version + if from == -1 && limit > 0 { + ret <- os.ErrNotExist + return + } + + count := 0 + for count < limit || limit == -1 { + if m.stop() { + return + } + + prev, err := m.sourceDrv.Prev(suint(from)) + if os.IsNotExist(err) { + // no limit or haven't reached limit, apply "first" migration + if limit == -1 || limit-count > 0 { + firstVersion, err := m.sourceDrv.First() + if err != nil { + ret <- err + return + } + + migr, err := m.newMigration(firstVersion, -1) + if err != nil { + ret <- err + return + } + ret <- migr + go migr.Buffer() + count++ + } + + if count < limit { + ret <- ErrShortLimit{suint(limit - count)} + } + return + } + if err != nil { + ret <- err + return + } + + migr, err := m.newMigration(suint(from), int(prev)) + if err != nil { + ret <- err + return + } + + ret <- migr + go migr.Buffer() + from = int(prev) + count++ + } +} + +// runMigrations reads *Migration and error from a channel. Any other type +// sent on this channel will result in a panic. Each migration is then +// proxied to the database driver and run against the database. +// Before running a newly received migration it will check if it's supposed +// to stop execution because it might have received a stop signal on the +// GracefulStop channel. +func (m *Migrate) runMigrations(ret <-chan interface{}) error { + for r := range ret { + + if m.stop() { + return nil + } + + switch r.(type) { + case error: + return r.(error) + + case *Migration: + migr := r.(*Migration) + + // set version with dirty state + if err := m.databaseDrv.SetVersion(migr.TargetVersion, true); err != nil { + return err + } + + if migr.Body != nil { + m.logVerbosePrintf("Read and execute %v\n", migr.LogString()) + if err := m.databaseDrv.Run(migr.BufferedBody); err != nil { + return err + } + } + + // set clean state + if err := m.databaseDrv.SetVersion(migr.TargetVersion, false); err != nil { + return err + } + + endTime := time.Now() + readTime := migr.FinishedReading.Sub(migr.StartedBuffering) + runTime := endTime.Sub(migr.FinishedReading) + + // log either verbose or normal + if m.Log != nil { + if m.Log.Verbose() { + m.logPrintf("Finished %v (read %v, ran %v)\n", migr.LogString(), readTime, runTime) + } else { + m.logPrintf("%v (%v)\n", migr.LogString(), readTime+runTime) + } + } + + default: + panic("unknown type") + } + } + return nil +} + +// versionExists checks the source if either the up or down migration for +// the specified migration version exists. +func (m *Migrate) versionExists(version uint) error { + // try up migration first + up, _, err := m.sourceDrv.ReadUp(version) + if err == nil { + defer up.Close() + } + if os.IsExist(err) { + return nil + } else if !os.IsNotExist(err) { + return err + } + + // then try down migration + down, _, err := m.sourceDrv.ReadDown(version) + if err == nil { + defer down.Close() + } + if os.IsExist(err) { + return nil + } else if !os.IsNotExist(err) { + return err + } + + return os.ErrNotExist +} + +// stop returns true if no more migrations should be run against the database +// because a stop signal was received on the GracefulStop channel. +// Calls are cheap and this function is not blocking. +func (m *Migrate) stop() bool { + if m.isGracefulStop { + return true + } + + select { + case <-m.GracefulStop: + m.isGracefulStop = true + return true + + default: + return false + } +} + +// newMigration is a helper func that returns a *Migration for the +// specified version and targetVersion. +func (m *Migrate) newMigration(version uint, targetVersion int) (*Migration, error) { + var migr *Migration + + if targetVersion >= int(version) { + r, identifier, err := m.sourceDrv.ReadUp(version) + if os.IsNotExist(err) { + // create "empty" migration + migr, err = NewMigration(nil, "", version, targetVersion) + if err != nil { + return nil, err + } + + } else if err != nil { + return nil, err + + } else { + // create migration from up source + migr, err = NewMigration(r, identifier, version, targetVersion) + if err != nil { + return nil, err + } + } + + } else { + r, identifier, err := m.sourceDrv.ReadDown(version) + if os.IsNotExist(err) { + // create "empty" migration + migr, err = NewMigration(nil, "", version, targetVersion) + if err != nil { + return nil, err + } + + } else if err != nil { + return nil, err + + } else { + // create migration from down source + migr, err = NewMigration(r, identifier, version, targetVersion) + if err != nil { + return nil, err + } + } + } + + if m.PrefetchMigrations > 0 && migr.Body != nil { + m.logVerbosePrintf("Start buffering %v\n", migr.LogString()) + } else { + m.logVerbosePrintf("Scheduled %v\n", migr.LogString()) + } + + return migr, nil +} + +// lock is a thread safe helper function to lock the database. +// It should be called as late as possible when running migrations. +func (m *Migrate) lock() error { + m.isLockedMu.Lock() + defer m.isLockedMu.Unlock() + + if m.isLocked { + return ErrLocked + } + + // create done channel, used in the timeout goroutine + done := make(chan bool, 1) + defer func() { + done <- true + }() + + // use errchan to signal error back to this context + errchan := make(chan error, 2) + + // start timeout goroutine + timeout := time.After(m.LockTimeout) + go func() { + for { + select { + case <-done: + return + case <-timeout: + errchan <- ErrLockTimeout + return + } + } + }() + + // now try to acquire the lock + go func() { + if err := m.databaseDrv.Lock(); err != nil { + errchan <- err + } else { + errchan <- nil + } + return + }() + + // wait until we either recieve ErrLockTimeout or error from Lock operation + err := <-errchan + if err == nil { + m.isLocked = true + } + return err +} + +// unlock is a thread safe helper function to unlock the database. +// It should be called as early as possible when no more migrations are +// expected to be executed. +func (m *Migrate) unlock() error { + m.isLockedMu.Lock() + defer m.isLockedMu.Unlock() + + if err := m.databaseDrv.Unlock(); err != nil { + // BUG: Can potentially create a deadlock. Add a timeout. + return err + } + + m.isLocked = false + return nil +} + +// unlockErr calls unlock and returns a combined error +// if a prevErr is not nil. +func (m *Migrate) unlockErr(prevErr error) error { + if err := m.unlock(); err != nil { + return NewMultiError(prevErr, err) + } + return prevErr +} + +// logPrintf writes to m.Log if not nil +func (m *Migrate) logPrintf(format string, v ...interface{}) { + if m.Log != nil { + m.Log.Printf(format, v...) + } +} + +// logVerbosePrintf writes to m.Log if not nil. Use for verbose logging output. +func (m *Migrate) logVerbosePrintf(format string, v ...interface{}) { + if m.Log != nil && m.Log.Verbose() { + m.Log.Printf(format, v...) + } +} diff --git a/vendor/github.com/mattes/migrate/migrate_test.go b/vendor/github.com/mattes/migrate/migrate_test.go new file mode 100644 index 000000000..0ec4bce21 --- /dev/null +++ b/vendor/github.com/mattes/migrate/migrate_test.go @@ -0,0 +1,941 @@ +package migrate + +import ( + "bytes" + "database/sql" + "io/ioutil" + "log" + "os" + "testing" + + dStub "github.com/mattes/migrate/database/stub" + "github.com/mattes/migrate/source" + sStub "github.com/mattes/migrate/source/stub" +) + +// sourceStubMigrations hold the following migrations: +// u = up migration, d = down migration, n = version +// | 1 | - | 3 | 4 | 5 | - | 7 | +// | u d | - | u | u d | d | - | u d | +var sourceStubMigrations *source.Migrations + +func init() { + sourceStubMigrations = source.NewMigrations() + sourceStubMigrations.Append(&source.Migration{Version: 1, Direction: source.Up}) + sourceStubMigrations.Append(&source.Migration{Version: 1, Direction: source.Down}) + sourceStubMigrations.Append(&source.Migration{Version: 3, Direction: source.Up}) + sourceStubMigrations.Append(&source.Migration{Version: 4, Direction: source.Up}) + sourceStubMigrations.Append(&source.Migration{Version: 4, Direction: source.Down}) + sourceStubMigrations.Append(&source.Migration{Version: 5, Direction: source.Down}) + sourceStubMigrations.Append(&source.Migration{Version: 7, Direction: source.Up}) + sourceStubMigrations.Append(&source.Migration{Version: 7, Direction: source.Down}) +} + +type DummyInstance struct{ Name string } + +func TestNew(t *testing.T) { + m, err := New("stub://", "stub://") + if err != nil { + t.Fatal(err) + } + + if m.sourceName != "stub" { + t.Errorf("expected stub, got %v", m.sourceName) + } + if m.sourceDrv == nil { + t.Error("expected sourceDrv not to be nil") + } + + if m.databaseName != "stub" { + t.Errorf("expected stub, got %v", m.databaseName) + } + if m.databaseDrv == nil { + t.Error("expected databaseDrv not to be nil") + } +} + +func ExampleNew() { + // Read migrations from /home/mattes/migrations and connect to a local postgres database. + m, err := New("file:///home/mattes/migrations", "postgres://mattes:secret@localhost:5432/database?sslmode=disable") + if err != nil { + log.Fatal(err) + } + + // Migrate all the way up ... + if err := m.Up(); err != nil { + log.Fatal(err) + } +} + +func TestNewWithDatabaseInstance(t *testing.T) { + dummyDb := &DummyInstance{"database"} + dbInst, err := dStub.WithInstance(dummyDb, &dStub.Config{}) + if err != nil { + t.Fatal(err) + } + + m, err := NewWithDatabaseInstance("stub://", "stub", dbInst) + if err != nil { + t.Fatal(err) + } + + if m.sourceName != "stub" { + t.Errorf("expected stub, got %v", m.sourceName) + } + if m.sourceDrv == nil { + t.Error("expected sourceDrv not to be nil") + } + + if m.databaseName != "stub" { + t.Errorf("expected stub, got %v", m.databaseName) + } + if m.databaseDrv == nil { + t.Error("expected databaseDrv not to be nil") + } +} + +func ExampleNewWithDatabaseInstance() { + // Create and use an existing database instance. + db, err := sql.Open("postgres", "postgres://mattes:secret@localhost:5432/database?sslmode=disable") + if err != nil { + log.Fatal(err) + } + defer db.Close() + + // Create driver instance from db. + // Check each driver if it supports the WithInstance function. + // `import "github.com/mattes/migrate/database/postgres"` + instance, err := dStub.WithInstance(db, &dStub.Config{}) + if err != nil { + log.Fatal(err) + } + + // Read migrations from /home/mattes/migrations and connect to a local postgres database. + m, err := NewWithDatabaseInstance("file:///home/mattes/migrations", "postgres", instance) + if err != nil { + log.Fatal(err) + } + + // Migrate all the way up ... + if err := m.Up(); err != nil { + log.Fatal(err) + } +} + +func TestNewWithSourceInstance(t *testing.T) { + dummySource := &DummyInstance{"source"} + sInst, err := sStub.WithInstance(dummySource, &sStub.Config{}) + if err != nil { + t.Fatal(err) + } + + m, err := NewWithSourceInstance("stub", sInst, "stub://") + if err != nil { + t.Fatal(err) + } + + if m.sourceName != "stub" { + t.Errorf("expected stub, got %v", m.sourceName) + } + if m.sourceDrv == nil { + t.Error("expected sourceDrv not to be nil") + } + + if m.databaseName != "stub" { + t.Errorf("expected stub, got %v", m.databaseName) + } + if m.databaseDrv == nil { + t.Error("expected databaseDrv not to be nil") + } +} + +func ExampleNewWithSourceInstance() { + di := &DummyInstance{"think any client required for a source here"} + + // Create driver instance from DummyInstance di. + // Check each driver if it support the WithInstance function. + // `import "github.com/mattes/migrate/source/stub"` + instance, err := sStub.WithInstance(di, &sStub.Config{}) + if err != nil { + log.Fatal(err) + } + + // Read migrations from Stub and connect to a local postgres database. + m, err := NewWithSourceInstance("stub", instance, "postgres://mattes:secret@localhost:5432/database?sslmode=disable") + if err != nil { + log.Fatal(err) + } + + // Migrate all the way up ... + if err := m.Up(); err != nil { + log.Fatal(err) + } +} + +func TestNewWithInstance(t *testing.T) { + dummyDb := &DummyInstance{"database"} + dbInst, err := dStub.WithInstance(dummyDb, &dStub.Config{}) + if err != nil { + t.Fatal(err) + } + + dummySource := &DummyInstance{"source"} + sInst, err := sStub.WithInstance(dummySource, &sStub.Config{}) + if err != nil { + t.Fatal(err) + } + + m, err := NewWithInstance("stub", sInst, "stub", dbInst) + if err != nil { + t.Fatal(err) + } + + if m.sourceName != "stub" { + t.Errorf("expected stub, got %v", m.sourceName) + } + if m.sourceDrv == nil { + t.Error("expected sourceDrv not to be nil") + } + + if m.databaseName != "stub" { + t.Errorf("expected stub, got %v", m.databaseName) + } + if m.databaseDrv == nil { + t.Error("expected databaseDrv not to be nil") + } +} + +func ExampleNewWithInstance() { + // See NewWithDatabaseInstance and NewWithSourceInstance for an example. +} + +func TestClose(t *testing.T) { + m, _ := New("stub://", "stub://") + sourceErr, databaseErr := m.Close() + if sourceErr != nil { + t.Error(sourceErr) + } + if databaseErr != nil { + t.Error(databaseErr) + } +} + +func TestMigrate(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + dbDrv := m.databaseDrv.(*dStub.Stub) + seq := newMigSeq() + + tt := []struct { + version uint + expectErr error + expectVersion uint + expectSeq migrationSequence + }{ + // migrate all the way Up in single steps + {version: 0, expectErr: os.ErrNotExist}, + {version: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(1))}, + {version: 2, expectErr: os.ErrNotExist}, + {version: 3, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(3))}, + {version: 4, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(4))}, + {version: 5, expectErr: nil, expectVersion: 5, expectSeq: seq.add()}, // 5 has no up migration + {version: 6, expectErr: os.ErrNotExist}, + {version: 7, expectErr: nil, expectVersion: 7, expectSeq: seq.add(M(7))}, + {version: 8, expectErr: os.ErrNotExist}, + + // migrate all the way Down in single steps + {version: 6, expectErr: os.ErrNotExist}, + {version: 5, expectErr: nil, expectVersion: 5, expectSeq: seq.add(M(7, 5))}, + {version: 4, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(5, 4))}, + {version: 3, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(4, 3))}, + {version: 2, expectErr: os.ErrNotExist}, + {version: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add()}, // 3 has no down migration + {version: 0, expectErr: os.ErrNotExist}, + + // migrate all the way Up in one step + {version: 7, expectErr: nil, expectVersion: 7, expectSeq: seq.add(M(3), M(4), M(7))}, + + // migrate all the way Down in one step + {version: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1))}, + + // can't migrate the same version twice + {version: 1, expectErr: ErrNoChange}, + } + + for i, v := range tt { + err := m.Migrate(v.version) + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || + (v.expectErr != os.ErrNotExist && err != v.expectErr) { + t.Errorf("expected err %v, got %v, in %v", v.expectErr, err, i) + + } else if err == nil { + version, _, err := m.Version() + if err != nil { + t.Error(err) + } + if version != v.expectVersion { + t.Errorf("expected version %v, got %v, in %v", v.expectVersion, version, i) + } + equalDbSeq(t, i, v.expectSeq, dbDrv) + } + } +} + +func TestMigrateDirty(t *testing.T) { + m, _ := New("stub://", "stub://") + dbDrv := m.databaseDrv.(*dStub.Stub) + if err := dbDrv.SetVersion(0, true); err != nil { + t.Fatal(err) + } + + err := m.Migrate(1) + if _, ok := err.(ErrDirty); !ok { + t.Fatalf("expected ErrDirty, got %v", err) + } +} + +func TestSteps(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + dbDrv := m.databaseDrv.(*dStub.Stub) + seq := newMigSeq() + + tt := []struct { + n int + expectErr error + expectVersion int + expectSeq migrationSequence + }{ + // step must be != 0 + {n: 0, expectErr: ErrNoChange}, + + // can't go Down if ErrNilVersion + {n: -1, expectErr: os.ErrNotExist}, + + // migrate all the way Up + {n: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(1))}, + {n: 1, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(3))}, + {n: 1, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(4))}, + {n: 1, expectErr: nil, expectVersion: 5, expectSeq: seq.add()}, + {n: 1, expectErr: nil, expectVersion: 7, expectSeq: seq.add(M(7))}, + {n: 1, expectErr: os.ErrNotExist}, + + // migrate all the way Down + {n: -1, expectErr: nil, expectVersion: 5, expectSeq: seq.add(M(7, 5))}, + {n: -1, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(5, 4))}, + {n: -1, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(4, 3))}, + {n: -1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(3, 1))}, + {n: -1, expectErr: nil, expectVersion: -1, expectSeq: seq.add(M(1, -1))}, + + // migrate Up in bigger step + {n: 4, expectErr: nil, expectVersion: 5, expectSeq: seq.add(M(1), M(3), M(4), M(5))}, + + // apply one migration, then reaches out of boundary + {n: 2, expectErr: ErrShortLimit{1}, expectVersion: 7, expectSeq: seq.add(M(7))}, + + // migrate Down in bigger step + {n: -4, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1))}, + + // apply one migration, then reaches out of boundary + {n: -2, expectErr: ErrShortLimit{1}, expectVersion: -1, expectSeq: seq.add(M(1, -1))}, + } + + for i, v := range tt { + err := m.Steps(v.n) + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || + (v.expectErr != os.ErrNotExist && err != v.expectErr) { + t.Errorf("expected err %v, got %v, in %v", v.expectErr, err, i) + + } else if err == nil { + version, _, err := m.Version() + if err != ErrNilVersion && err != nil { + t.Error(err) + } + if v.expectVersion == -1 && err != ErrNilVersion { + t.Errorf("expected ErrNilVersion, got %v, in %v", version, i) + + } else if v.expectVersion >= 0 && version != uint(v.expectVersion) { + t.Errorf("expected version %v, got %v, in %v", v.expectVersion, version, i) + } + equalDbSeq(t, i, v.expectSeq, dbDrv) + } + } +} + +func TestStepsDirty(t *testing.T) { + m, _ := New("stub://", "stub://") + dbDrv := m.databaseDrv.(*dStub.Stub) + if err := dbDrv.SetVersion(0, true); err != nil { + t.Fatal(err) + } + + err := m.Steps(1) + if _, ok := err.(ErrDirty); !ok { + t.Fatalf("expected ErrDirty, got %v", err) + } +} + +func TestUpAndDown(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + dbDrv := m.databaseDrv.(*dStub.Stub) + seq := newMigSeq() + + // go Up first + if err := m.Up(); err != nil { + t.Fatal(err) + } + equalDbSeq(t, 0, seq.add(M(1), M(3), M(4), M(5), M(7)), dbDrv) + + // go Down + if err := m.Down(); err != nil { + t.Fatal(err) + } + equalDbSeq(t, 1, seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1)), dbDrv) + + // go 1 Up and then all the way Up + if err := m.Steps(1); err != nil { + t.Fatal(err) + } + if err := m.Up(); err != nil { + t.Fatal(err) + } + equalDbSeq(t, 2, seq.add(M(1), M(3), M(4), M(5), M(7)), dbDrv) + + // go 1 Down and then all the way Down + if err := m.Steps(-1); err != nil { + t.Fatal(err) + } + if err := m.Down(); err != nil { + t.Fatal(err) + } + equalDbSeq(t, 0, seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1)), dbDrv) +} + +func TestUpDirty(t *testing.T) { + m, _ := New("stub://", "stub://") + dbDrv := m.databaseDrv.(*dStub.Stub) + if err := dbDrv.SetVersion(0, true); err != nil { + t.Fatal(err) + } + + err := m.Up() + if _, ok := err.(ErrDirty); !ok { + t.Fatalf("expected ErrDirty, got %v", err) + } +} + +func TestDownDirty(t *testing.T) { + m, _ := New("stub://", "stub://") + dbDrv := m.databaseDrv.(*dStub.Stub) + if err := dbDrv.SetVersion(0, true); err != nil { + t.Fatal(err) + } + + err := m.Down() + if _, ok := err.(ErrDirty); !ok { + t.Fatalf("expected ErrDirty, got %v", err) + } +} + +func TestDrop(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + dbDrv := m.databaseDrv.(*dStub.Stub) + + if err := m.Drop(); err != nil { + t.Fatal(err) + } + + if dbDrv.MigrationSequence[len(dbDrv.MigrationSequence)-1] != dStub.DROP { + t.Fatalf("expected database to DROP, got sequence %v", dbDrv.MigrationSequence) + } +} + +func TestVersion(t *testing.T) { + m, _ := New("stub://", "stub://") + dbDrv := m.databaseDrv.(*dStub.Stub) + + _, _, err := m.Version() + if err != ErrNilVersion { + t.Fatalf("expected ErrNilVersion, got %v", err) + } + + if err := dbDrv.Run(bytes.NewBufferString("1_up")); err != nil { + t.Fatal(err) + } + + if err := dbDrv.SetVersion(1, false); err != nil { + t.Fatal(err) + } + + v, _, err := m.Version() + if err != nil { + t.Fatal(err) + } + + if v != 1 { + t.Fatalf("expected version 1, got %v", v) + } +} + +func TestRun(t *testing.T) { + m, _ := New("stub://", "stub://") + + mx, err := NewMigration(nil, "", 1, 2) + if err != nil { + t.Fatal(err) + } + + if err := m.Run(mx); err != nil { + t.Fatal(err) + } + + v, _, err := m.Version() + if err != nil { + t.Fatal(err) + } + + if v != 2 { + t.Errorf("expected version 2, got %v", v) + } +} + +func TestRunDirty(t *testing.T) { + m, _ := New("stub://", "stub://") + dbDrv := m.databaseDrv.(*dStub.Stub) + if err := dbDrv.SetVersion(0, true); err != nil { + t.Fatal(err) + } + + migr, err := NewMigration(nil, "", 1, 2) + if err != nil { + t.Fatal(err) + } + + err = m.Run(migr) + if _, ok := err.(ErrDirty); !ok { + t.Fatalf("expected ErrDirty, got %v", err) + } +} + +func TestForce(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + + if err := m.Force(7); err != nil { + t.Fatal(err) + } + + v, dirty, err := m.Version() + if err != nil { + t.Fatal(err) + } + if dirty { + t.Errorf("expected dirty to be false") + } + if v != 7 { + t.Errorf("expected version to be 7") + } +} + +func TestForceDirty(t *testing.T) { + m, _ := New("stub://", "stub://") + dbDrv := m.databaseDrv.(*dStub.Stub) + if err := dbDrv.SetVersion(0, true); err != nil { + t.Fatal(err) + } + + if err := m.Force(1); err != nil { + t.Fatal(err) + } +} + +func TestRead(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + + tt := []struct { + from int + to int + expectErr error + expectMigrations migrationSequence + }{ + {from: -1, to: -1, expectErr: ErrNoChange}, + {from: -1, to: 0, expectErr: os.ErrNotExist}, + {from: -1, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(1))}, + {from: -1, to: 2, expectErr: os.ErrNotExist}, + {from: -1, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3))}, + {from: -1, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4))}, + {from: -1, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4), M(5))}, + {from: -1, to: 6, expectErr: os.ErrNotExist}, + {from: -1, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4), M(5), M(7))}, + {from: -1, to: 8, expectErr: os.ErrNotExist}, + + {from: 0, to: -1, expectErr: os.ErrNotExist}, + {from: 0, to: 0, expectErr: os.ErrNotExist}, + {from: 0, to: 1, expectErr: os.ErrNotExist}, + {from: 0, to: 2, expectErr: os.ErrNotExist}, + {from: 0, to: 3, expectErr: os.ErrNotExist}, + {from: 0, to: 4, expectErr: os.ErrNotExist}, + {from: 0, to: 5, expectErr: os.ErrNotExist}, + {from: 0, to: 6, expectErr: os.ErrNotExist}, + {from: 0, to: 7, expectErr: os.ErrNotExist}, + {from: 0, to: 8, expectErr: os.ErrNotExist}, + + {from: 1, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(1, -1))}, + {from: 1, to: 0, expectErr: os.ErrNotExist}, + {from: 1, to: 1, expectErr: ErrNoChange}, + {from: 1, to: 2, expectErr: os.ErrNotExist}, + {from: 1, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(3))}, + {from: 1, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4))}, + {from: 1, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4), M(5))}, + {from: 1, to: 6, expectErr: os.ErrNotExist}, + {from: 1, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4), M(5), M(7))}, + {from: 1, to: 8, expectErr: os.ErrNotExist}, + + {from: 2, to: -1, expectErr: os.ErrNotExist}, + {from: 2, to: 0, expectErr: os.ErrNotExist}, + {from: 2, to: 1, expectErr: os.ErrNotExist}, + {from: 2, to: 2, expectErr: os.ErrNotExist}, + {from: 2, to: 3, expectErr: os.ErrNotExist}, + {from: 2, to: 4, expectErr: os.ErrNotExist}, + {from: 2, to: 5, expectErr: os.ErrNotExist}, + {from: 2, to: 6, expectErr: os.ErrNotExist}, + {from: 2, to: 7, expectErr: os.ErrNotExist}, + {from: 2, to: 8, expectErr: os.ErrNotExist}, + + {from: 3, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1), M(1, -1))}, + {from: 3, to: 0, expectErr: os.ErrNotExist}, + {from: 3, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1))}, + {from: 3, to: 2, expectErr: os.ErrNotExist}, + {from: 3, to: 3, expectErr: ErrNoChange}, + {from: 3, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(4))}, + {from: 3, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5))}, + {from: 3, to: 6, expectErr: os.ErrNotExist}, + {from: 3, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5), M(7))}, + {from: 3, to: 8, expectErr: os.ErrNotExist}, + + {from: 4, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1), M(1, -1))}, + {from: 4, to: 0, expectErr: os.ErrNotExist}, + {from: 4, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1))}, + {from: 4, to: 2, expectErr: os.ErrNotExist}, + {from: 4, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(4, 3))}, + {from: 4, to: 4, expectErr: ErrNoChange}, + {from: 4, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(5))}, + {from: 4, to: 6, expectErr: os.ErrNotExist}, + {from: 4, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(5), M(7))}, + {from: 4, to: 8, expectErr: os.ErrNotExist}, + + {from: 5, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3), M(3, 1), M(1, -1))}, + {from: 5, to: 0, expectErr: os.ErrNotExist}, + {from: 5, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3), M(3, 1))}, + {from: 5, to: 2, expectErr: os.ErrNotExist}, + {from: 5, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3))}, + {from: 5, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(5, 4))}, + {from: 5, to: 5, expectErr: ErrNoChange}, + {from: 5, to: 6, expectErr: os.ErrNotExist}, + {from: 5, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(7))}, + {from: 5, to: 8, expectErr: os.ErrNotExist}, + + {from: 6, to: -1, expectErr: os.ErrNotExist}, + {from: 6, to: 0, expectErr: os.ErrNotExist}, + {from: 6, to: 1, expectErr: os.ErrNotExist}, + {from: 6, to: 2, expectErr: os.ErrNotExist}, + {from: 6, to: 3, expectErr: os.ErrNotExist}, + {from: 6, to: 4, expectErr: os.ErrNotExist}, + {from: 6, to: 5, expectErr: os.ErrNotExist}, + {from: 6, to: 6, expectErr: os.ErrNotExist}, + {from: 6, to: 7, expectErr: os.ErrNotExist}, + {from: 6, to: 8, expectErr: os.ErrNotExist}, + + {from: 7, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1))}, + {from: 7, to: 0, expectErr: os.ErrNotExist}, + {from: 7, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3), M(3, 1))}, + {from: 7, to: 2, expectErr: os.ErrNotExist}, + {from: 7, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3))}, + {from: 7, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4))}, + {from: 7, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(7, 5))}, + {from: 7, to: 6, expectErr: os.ErrNotExist}, + {from: 7, to: 7, expectErr: ErrNoChange}, + {from: 7, to: 8, expectErr: os.ErrNotExist}, + + {from: 8, to: -1, expectErr: os.ErrNotExist}, + {from: 8, to: 0, expectErr: os.ErrNotExist}, + {from: 8, to: 1, expectErr: os.ErrNotExist}, + {from: 8, to: 2, expectErr: os.ErrNotExist}, + {from: 8, to: 3, expectErr: os.ErrNotExist}, + {from: 8, to: 4, expectErr: os.ErrNotExist}, + {from: 8, to: 5, expectErr: os.ErrNotExist}, + {from: 8, to: 6, expectErr: os.ErrNotExist}, + {from: 8, to: 7, expectErr: os.ErrNotExist}, + {from: 8, to: 8, expectErr: os.ErrNotExist}, + } + + for i, v := range tt { + ret := make(chan interface{}) + go m.read(v.from, v.to, ret) + migrations, err := migrationsFromChannel(ret) + + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || + (v.expectErr != os.ErrNotExist && v.expectErr != err) { + t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) + t.Logf("%v, in %v", migrations, i) + } + if len(v.expectMigrations) > 0 { + equalMigSeq(t, i, v.expectMigrations, migrations) + } + } +} + +func TestReadUp(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + + tt := []struct { + from int + limit int // -1 means no limit + expectErr error + expectMigrations migrationSequence + }{ + {from: -1, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4), M(5), M(7))}, + {from: -1, limit: 0, expectErr: ErrNoChange}, + {from: -1, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(1))}, + {from: -1, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3))}, + + {from: 0, limit: -1, expectErr: os.ErrNotExist}, + {from: 0, limit: 0, expectErr: os.ErrNotExist}, + {from: 0, limit: 1, expectErr: os.ErrNotExist}, + {from: 0, limit: 2, expectErr: os.ErrNotExist}, + + {from: 1, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4), M(5), M(7))}, + {from: 1, limit: 0, expectErr: ErrNoChange}, + {from: 1, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(3))}, + {from: 1, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4))}, + + {from: 2, limit: -1, expectErr: os.ErrNotExist}, + {from: 2, limit: 0, expectErr: os.ErrNotExist}, + {from: 2, limit: 1, expectErr: os.ErrNotExist}, + {from: 2, limit: 2, expectErr: os.ErrNotExist}, + + {from: 3, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5), M(7))}, + {from: 3, limit: 0, expectErr: ErrNoChange}, + {from: 3, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(4))}, + {from: 3, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5))}, + + {from: 4, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(5), M(7))}, + {from: 4, limit: 0, expectErr: ErrNoChange}, + {from: 4, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(5))}, + {from: 4, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(5), M(7))}, + + {from: 5, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(7))}, + {from: 5, limit: 0, expectErr: ErrNoChange}, + {from: 5, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(7))}, + {from: 5, limit: 2, expectErr: ErrShortLimit{1}, expectMigrations: newMigSeq(M(7))}, + + {from: 6, limit: -1, expectErr: os.ErrNotExist}, + {from: 6, limit: 0, expectErr: os.ErrNotExist}, + {from: 6, limit: 1, expectErr: os.ErrNotExist}, + {from: 6, limit: 2, expectErr: os.ErrNotExist}, + + {from: 7, limit: -1, expectErr: ErrNoChange}, + {from: 7, limit: 0, expectErr: ErrNoChange}, + {from: 7, limit: 1, expectErr: os.ErrNotExist}, + {from: 7, limit: 2, expectErr: os.ErrNotExist}, + + {from: 8, limit: -1, expectErr: os.ErrNotExist}, + {from: 8, limit: 0, expectErr: os.ErrNotExist}, + {from: 8, limit: 1, expectErr: os.ErrNotExist}, + {from: 8, limit: 2, expectErr: os.ErrNotExist}, + } + + for i, v := range tt { + ret := make(chan interface{}) + go m.readUp(v.from, v.limit, ret) + migrations, err := migrationsFromChannel(ret) + + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || + (v.expectErr != os.ErrNotExist && v.expectErr != err) { + t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) + t.Logf("%v, in %v", migrations, i) + } + if len(v.expectMigrations) > 0 { + equalMigSeq(t, i, v.expectMigrations, migrations) + } + } +} + +func TestReadDown(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + + tt := []struct { + from int + limit int // -1 means no limit + expectErr error + expectMigrations migrationSequence + }{ + {from: -1, limit: -1, expectErr: ErrNoChange}, + {from: -1, limit: 0, expectErr: ErrNoChange}, + {from: -1, limit: 1, expectErr: os.ErrNotExist}, + {from: -1, limit: 2, expectErr: os.ErrNotExist}, + + {from: 0, limit: -1, expectErr: os.ErrNotExist}, + {from: 0, limit: 0, expectErr: os.ErrNotExist}, + {from: 0, limit: 1, expectErr: os.ErrNotExist}, + {from: 0, limit: 2, expectErr: os.ErrNotExist}, + + {from: 1, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(1, -1))}, + {from: 1, limit: 0, expectErr: ErrNoChange}, + {from: 1, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(1, -1))}, + {from: 1, limit: 2, expectErr: ErrShortLimit{1}, expectMigrations: newMigSeq(M(1, -1))}, + + {from: 2, limit: -1, expectErr: os.ErrNotExist}, + {from: 2, limit: 0, expectErr: os.ErrNotExist}, + {from: 2, limit: 1, expectErr: os.ErrNotExist}, + {from: 2, limit: 2, expectErr: os.ErrNotExist}, + + {from: 3, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1), M(1, -1))}, + {from: 3, limit: 0, expectErr: ErrNoChange}, + {from: 3, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1))}, + {from: 3, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(3, 1), M(1, -1))}, + + {from: 4, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1), M(1, -1))}, + {from: 4, limit: 0, expectErr: ErrNoChange}, + {from: 4, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3))}, + {from: 4, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1))}, + + {from: 5, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3), M(3, 1), M(1, -1))}, + {from: 5, limit: 0, expectErr: ErrNoChange}, + {from: 5, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4))}, + {from: 5, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3))}, + + {from: 6, limit: -1, expectErr: os.ErrNotExist}, + {from: 6, limit: 0, expectErr: os.ErrNotExist}, + {from: 6, limit: 1, expectErr: os.ErrNotExist}, + {from: 6, limit: 2, expectErr: os.ErrNotExist}, + + {from: 7, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1))}, + {from: 7, limit: 0, expectErr: ErrNoChange}, + {from: 7, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5))}, + {from: 7, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4))}, + + {from: 8, limit: -1, expectErr: os.ErrNotExist}, + {from: 8, limit: 0, expectErr: os.ErrNotExist}, + {from: 8, limit: 1, expectErr: os.ErrNotExist}, + {from: 8, limit: 2, expectErr: os.ErrNotExist}, + } + + for i, v := range tt { + ret := make(chan interface{}) + go m.readDown(v.from, v.limit, ret) + migrations, err := migrationsFromChannel(ret) + + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || + (v.expectErr != os.ErrNotExist && v.expectErr != err) { + t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) + t.Logf("%v, in %v", migrations, i) + } + if len(v.expectMigrations) > 0 { + equalMigSeq(t, i, v.expectMigrations, migrations) + } + } +} + +func TestLock(t *testing.T) { + m, _ := New("stub://", "stub://") + if err := m.lock(); err != nil { + t.Fatal(err) + } + + if err := m.lock(); err == nil { + t.Fatal("should be locked already") + } +} + +func migrationsFromChannel(ret chan interface{}) ([]*Migration, error) { + slice := make([]*Migration, 0) + for r := range ret { + switch r.(type) { + case error: + return slice, r.(error) + + case *Migration: + slice = append(slice, r.(*Migration)) + } + } + return slice, nil +} + +type migrationSequence []*Migration + +func newMigSeq(migr ...*Migration) migrationSequence { + return migr +} + +func (m *migrationSequence) add(migr ...*Migration) migrationSequence { + *m = append(*m, migr...) + return *m +} + +func (m *migrationSequence) bodySequence() []string { + r := make([]string, 0) + for _, v := range *m { + if v.Body != nil { + body, err := ioutil.ReadAll(v.Body) + if err != nil { + panic(err) // that should never happen + } + + // reset body reader + // TODO: is there a better/nicer way? + v.Body = ioutil.NopCloser(bytes.NewReader(body)) + + r = append(r, string(body[:])) + } + } + return r +} + +// M is a convenience func to create a new *Migration +func M(version uint, targetVersion ...int) *Migration { + if len(targetVersion) > 1 { + panic("only one targetVersion allowed") + } + ts := int(version) + if len(targetVersion) == 1 { + ts = targetVersion[0] + } + + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + migr, err := m.newMigration(version, ts) + if err != nil { + panic(err) + } + return migr +} + +func equalMigSeq(t *testing.T, i int, expected, got migrationSequence) { + if len(expected) != len(got) { + t.Errorf("expected migrations %v, got %v, in %v", expected, got, i) + + } else { + for ii := 0; ii < len(expected); ii++ { + if expected[ii].Version != got[ii].Version { + t.Errorf("expected version %v, got %v, in %v", expected[ii].Version, got[ii].Version, i) + } + + if expected[ii].TargetVersion != got[ii].TargetVersion { + t.Errorf("expected targetVersion %v, got %v, in %v", expected[ii].TargetVersion, got[ii].TargetVersion, i) + } + } + } +} + +func equalDbSeq(t *testing.T, i int, expected migrationSequence, got *dStub.Stub) { + bs := expected.bodySequence() + if !got.EqualSequence(bs) { + t.Fatalf("\nexpected sequence %v,\ngot %v, in %v", bs, got.MigrationSequence, i) + } +} diff --git a/vendor/github.com/mattes/migrate/migration.go b/vendor/github.com/mattes/migrate/migration.go new file mode 100644 index 000000000..069e7f038 --- /dev/null +++ b/vendor/github.com/mattes/migrate/migration.go @@ -0,0 +1,154 @@ +package migrate + +import ( + "bufio" + "fmt" + "io" + "time" +) + +// DefaultBufferSize sets the in memory buffer size (in Bytes) for every +// pre-read migration (see DefaultPrefetchMigrations). +var DefaultBufferSize = uint(100000) + +// Migration holds information about a migration. +// It is initially created from data coming from the source and then +// used when run against the database. +type Migration struct { + // Identifier can be any string to help identifying + // the migration in the source. + Identifier string + + // Version is the version of this migration. + Version uint + + // TargetVersion is the migration version after this migration + // has been applied to the database. + // Can be -1, implying that this is a NilVersion. + TargetVersion int + + // Body holds an io.ReadCloser to the source. + Body io.ReadCloser + + // BufferedBody holds an buffered io.Reader to the underlying Body. + BufferedBody io.Reader + + // BufferSize defaults to DefaultBufferSize + BufferSize uint + + // bufferWriter holds an io.WriteCloser and pipes to BufferBody. + // It's an *Closer for flow control. + bufferWriter io.WriteCloser + + // Scheduled is the time when the migration was scheduled/ queued. + Scheduled time.Time + + // StartedBuffering is the time when buffering of the migration source started. + StartedBuffering time.Time + + // FinishedBuffering is the time when buffering of the migration source finished. + FinishedBuffering time.Time + + // FinishedReading is the time when the migration source is fully read. + FinishedReading time.Time + + // BytesRead holds the number of Bytes read from the migration source. + BytesRead int64 +} + +// NewMigration returns a new Migration and sets the body, identifier, +// version and targetVersion. Body can be nil, which turns this migration +// into a "NilMigration". If no identifier is provided, it will default to "". +// targetVersion can be -1, implying it is a NilVersion. +// +// What is a NilMigration? +// Usually each migration version coming from source is expected to have an +// Up and Down migration. This is not a hard requirement though, leading to +// a situation where only the Up or Down migration is present. So let's say +// the user wants to migrate up to a version that doesn't have the actual Up +// migration, in that case we still want to apply the version, but with an empty +// body. We are calling that a NilMigration, a migration with an empty body. +// +// What is a NilVersion? +// NilVersion is a const(-1). When running down migrations and we are at the +// last down migration, there is no next down migration, the targetVersion should +// be nil. Nil in this case is represented by -1 (because type int). +func NewMigration(body io.ReadCloser, identifier string, + version uint, targetVersion int) (*Migration, error) { + tnow := time.Now() + m := &Migration{ + Identifier: identifier, + Version: version, + TargetVersion: targetVersion, + Scheduled: tnow, + } + + if body == nil { + if len(identifier) == 0 { + m.Identifier = "" + } + + m.StartedBuffering = tnow + m.FinishedBuffering = tnow + m.FinishedReading = tnow + return m, nil + } + + br, bw := io.Pipe() + m.Body = body // want to simulate low latency? newSlowReader(body) + m.BufferSize = DefaultBufferSize + m.BufferedBody = br + m.bufferWriter = bw + return m, nil +} + +// String implements string.Stringer and is used in tests. +func (m *Migration) String() string { + return fmt.Sprintf("%v [%v=>%v]", m.Identifier, m.Version, m.TargetVersion) +} + +// LogString returns a string describing this migration to humans. +func (m *Migration) LogString() string { + directionStr := "u" + if m.TargetVersion < int(m.Version) { + directionStr = "d" + } + return fmt.Sprintf("%v/%v %v", m.Version, directionStr, m.Identifier) +} + +// Buffer buffers Body up to BufferSize. +// Calling this function blocks. Call with goroutine. +func (m *Migration) Buffer() error { + if m.Body == nil { + return nil + } + + m.StartedBuffering = time.Now() + + b := bufio.NewReaderSize(m.Body, int(m.BufferSize)) + + // start reading from body, peek won't move the read pointer though + // poor man's solution? + b.Peek(int(m.BufferSize)) + + m.FinishedBuffering = time.Now() + + // write to bufferWriter, this will block until + // something starts reading from m.Buffer + n, err := b.WriteTo(m.bufferWriter) + if err != nil { + return err + } + + m.FinishedReading = time.Now() + m.BytesRead = n + + // close bufferWriter so Buffer knows that there is no + // more data coming + m.bufferWriter.Close() + + // it's safe to close the Body too + m.Body.Close() + + return nil +} diff --git a/vendor/github.com/mattes/migrate/migration_test.go b/vendor/github.com/mattes/migrate/migration_test.go new file mode 100644 index 000000000..b6589f938 --- /dev/null +++ b/vendor/github.com/mattes/migrate/migration_test.go @@ -0,0 +1,56 @@ +package migrate + +import ( + "fmt" + "io/ioutil" + "log" + "strings" +) + +func ExampleNewMigration() { + // Create a dummy migration body, this is coming from the source usually. + body := ioutil.NopCloser(strings.NewReader("dumy migration that creates users table")) + + // Create a new Migration that represents version 1486686016. + // Once this migration has been applied to the database, the new + // migration version will be 1486689359. + migr, err := NewMigration(body, "create_users_table", 1486686016, 1486689359) + if err != nil { + log.Fatal(err) + } + + fmt.Print(migr.LogString()) + // Output: + // 1486686016/u create_users_table +} + +func ExampleNewMigration_nilMigration() { + // Create a new Migration that represents a NilMigration. + // Once this migration has been applied to the database, the new + // migration version will be 1486689359. + migr, err := NewMigration(nil, "", 1486686016, 1486689359) + if err != nil { + log.Fatal(err) + } + + fmt.Print(migr.LogString()) + // Output: + // 1486686016/u +} + +func ExampleNewMigration_nilVersion() { + // Create a dummy migration body, this is coming from the source usually. + body := ioutil.NopCloser(strings.NewReader("dumy migration that deletes users table")) + + // Create a new Migration that represents version 1486686016. + // This is the last available down migration, so the migration version + // will be -1, meaning NilVersion once this migration ran. + migr, err := NewMigration(body, "drop_users_table", 1486686016, -1) + if err != nil { + log.Fatal(err) + } + + fmt.Print(migr.LogString()) + // Output: + // 1486686016/d drop_users_table +} diff --git a/vendor/github.com/mattes/migrate/source/aws-s3/README.md b/vendor/github.com/mattes/migrate/source/aws-s3/README.md new file mode 100644 index 000000000..3a59cfec9 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/aws-s3/README.md @@ -0,0 +1,3 @@ +# aws-s3 + +`s3:///` diff --git a/vendor/github.com/mattes/migrate/source/aws-s3/s3.go b/vendor/github.com/mattes/migrate/source/aws-s3/s3.go new file mode 100644 index 000000000..8b581402c --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/aws-s3/s3.go @@ -0,0 +1,125 @@ +package awss3 + +import ( + "fmt" + "io" + "net/url" + "os" + "path" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" + "github.com/mattes/migrate/source" +) + +func init() { + source.Register("s3", &s3Driver{}) +} + +type s3Driver struct { + s3client s3iface.S3API + bucket string + prefix string + migrations *source.Migrations +} + +func (s *s3Driver) Open(folder string) (source.Driver, error) { + u, err := url.Parse(folder) + if err != nil { + return nil, err + } + sess, err := session.NewSession() + if err != nil { + return nil, err + } + driver := s3Driver{ + bucket: u.Host, + prefix: strings.Trim(u.Path, "/") + "/", + s3client: s3.New(sess), + migrations: source.NewMigrations(), + } + err = driver.loadMigrations() + if err != nil { + return nil, err + } + return &driver, nil +} + +func (s *s3Driver) loadMigrations() error { + output, err := s.s3client.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(s.bucket), + Prefix: aws.String(s.prefix), + Delimiter: aws.String("/"), + }) + if err != nil { + return err + } + for _, object := range output.Contents { + _, fileName := path.Split(aws.StringValue(object.Key)) + m, err := source.DefaultParse(fileName) + if err != nil { + continue + } + if !s.migrations.Append(m) { + return fmt.Errorf("unable to parse file %v", aws.StringValue(object.Key)) + } + } + return nil +} + +func (s *s3Driver) Close() error { + return nil +} + +func (s *s3Driver) First() (uint, error) { + v, ok := s.migrations.First() + if !ok { + return 0, os.ErrNotExist + } + return v, nil +} + +func (s *s3Driver) Prev(version uint) (uint, error) { + v, ok := s.migrations.Prev(version) + if !ok { + return 0, os.ErrNotExist + } + return v, nil +} + +func (s *s3Driver) Next(version uint) (uint, error) { + v, ok := s.migrations.Next(version) + if !ok { + return 0, os.ErrNotExist + } + return v, nil +} + +func (s *s3Driver) ReadUp(version uint) (io.ReadCloser, string, error) { + if m, ok := s.migrations.Up(version); ok { + return s.open(m) + } + return nil, "", os.ErrNotExist +} + +func (s *s3Driver) ReadDown(version uint) (io.ReadCloser, string, error) { + if m, ok := s.migrations.Down(version); ok { + return s.open(m) + } + return nil, "", os.ErrNotExist +} + +func (s *s3Driver) open(m *source.Migration) (io.ReadCloser, string, error) { + key := path.Join(s.prefix, m.Raw) + object, err := s.s3client.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(s.bucket), + Key: aws.String(key), + }) + if err != nil { + return nil, "", err + } + return object.Body, m.Identifier, nil +} diff --git a/vendor/github.com/mattes/migrate/source/aws-s3/s3_test.go b/vendor/github.com/mattes/migrate/source/aws-s3/s3_test.go new file mode 100644 index 000000000..f07d7ff2c --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/aws-s3/s3_test.go @@ -0,0 +1,82 @@ +package awss3 + +import ( + "errors" + "io/ioutil" + "strings" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/mattes/migrate/source" + st "github.com/mattes/migrate/source/testing" +) + +func Test(t *testing.T) { + s3Client := fakeS3{ + bucket: "some-bucket", + objects: map[string]string{ + "staging/migrations/1_foobar.up.sql": "1 up", + "staging/migrations/1_foobar.down.sql": "1 down", + "prod/migrations/1_foobar.up.sql": "1 up", + "prod/migrations/1_foobar.down.sql": "1 down", + "prod/migrations/3_foobar.up.sql": "3 up", + "prod/migrations/4_foobar.up.sql": "4 up", + "prod/migrations/4_foobar.down.sql": "4 down", + "prod/migrations/5_foobar.down.sql": "5 down", + "prod/migrations/7_foobar.up.sql": "7 up", + "prod/migrations/7_foobar.down.sql": "7 down", + "prod/migrations/not-a-migration.txt": "", + "prod/migrations/0-random-stuff/whatever.txt": "", + }, + } + driver := s3Driver{ + bucket: "some-bucket", + prefix: "prod/migrations/", + migrations: source.NewMigrations(), + s3client: &s3Client, + } + err := driver.loadMigrations() + if err != nil { + t.Fatal(err) + } + st.Test(t, &driver) +} + +type fakeS3 struct { + s3.S3 + bucket string + objects map[string]string +} + +func (s *fakeS3) ListObjects(input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) { + bucket := aws.StringValue(input.Bucket) + if bucket != s.bucket { + return nil, errors.New("bucket not found") + } + prefix := aws.StringValue(input.Prefix) + delimiter := aws.StringValue(input.Delimiter) + var output s3.ListObjectsOutput + for name := range s.objects { + if strings.HasPrefix(name, prefix) { + if delimiter == "" || !strings.Contains(strings.Replace(name, prefix, "", 1), delimiter) { + output.Contents = append(output.Contents, &s3.Object{ + Key: aws.String(name), + }) + } + } + } + return &output, nil +} + +func (s *fakeS3) GetObject(input *s3.GetObjectInput) (*s3.GetObjectOutput, error) { + bucket := aws.StringValue(input.Bucket) + if bucket != s.bucket { + return nil, errors.New("bucket not found") + } + if data, ok := s.objects[aws.StringValue(input.Key)]; ok { + body := ioutil.NopCloser(strings.NewReader(data)) + return &s3.GetObjectOutput{Body: body}, nil + } + return nil, errors.New("object not found") +} diff --git a/vendor/github.com/mattes/migrate/source/driver.go b/vendor/github.com/mattes/migrate/source/driver.go new file mode 100644 index 000000000..b9c052c16 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/driver.go @@ -0,0 +1,107 @@ +// Package source provides the Source interface. +// All source drivers must implement this interface, register themselves, +// optionally provide a `WithInstance` function and pass the tests +// in package source/testing. +package source + +import ( + "fmt" + "io" + nurl "net/url" + "sync" +) + +var driversMu sync.RWMutex +var drivers = make(map[string]Driver) + +// Driver is the interface every source driver must implement. +// +// How to implement a source driver? +// 1. Implement this interface. +// 2. Optionally, add a function named `WithInstance`. +// This function should accept an existing source instance and a Config{} struct +// and return a driver instance. +// 3. Add a test that calls source/testing.go:Test() +// 4. Add own tests for Open(), WithInstance() (when provided) and Close(). +// All other functions are tested by tests in source/testing. +// Saves you some time and makes sure all source drivers behave the same way. +// 5. Call Register in init(). +// +// Guidelines: +// * All configuration input must come from the URL string in func Open() +// or the Config{} struct in WithInstance. Don't os.Getenv(). +// * Drivers are supposed to be read only. +// * Ideally don't load any contents (into memory) in Open or WithInstance. +type Driver interface { + // Open returns a a new driver instance configured with parameters + // coming from the URL string. Migrate will call this function + // only once per instance. + Open(url string) (Driver, error) + + // Close closes the underlying source instance managed by the driver. + // Migrate will call this function only once per instance. + Close() error + + // First returns the very first migration version available to the driver. + // Migrate will call this function multiple times. + // If there is no version available, it must return os.ErrNotExist. + First() (version uint, err error) + + // Prev returns the previous version for a given version available to the driver. + // Migrate will call this function multiple times. + // If there is no previous version available, it must return os.ErrNotExist. + Prev(version uint) (prevVersion uint, err error) + + // Next returns the next version for a given version available to the driver. + // Migrate will call this function multiple times. + // If there is no next version available, it must return os.ErrNotExist. + Next(version uint) (nextVersion uint, err error) + + // ReadUp returns the UP migration body and an identifier that helps + // finding this migration in the source for a given version. + // If there is no up migration available for this version, + // it must return os.ErrNotExist. + // Do not start reading, just return the ReadCloser! + ReadUp(version uint) (r io.ReadCloser, identifier string, err error) + + // ReadDown returns the DOWN migration body and an identifier that helps + // finding this migration in the source for a given version. + // If there is no down migration available for this version, + // it must return os.ErrNotExist. + // Do not start reading, just return the ReadCloser! + ReadDown(version uint) (r io.ReadCloser, identifier string, err error) +} + +// Open returns a new driver instance. +func Open(url string) (Driver, error) { + u, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + if u.Scheme == "" { + return nil, fmt.Errorf("source driver: invalid URL scheme") + } + + driversMu.RLock() + d, ok := drivers[u.Scheme] + driversMu.RUnlock() + if !ok { + return nil, fmt.Errorf("source driver: unknown driver %v (forgotten import?)", u.Scheme) + } + + return d.Open(url) +} + +// Register globally registers a driver. +func Register(name string, driver Driver) { + driversMu.Lock() + defer driversMu.Unlock() + if driver == nil { + panic("Register driver is nil") + } + if _, dup := drivers[name]; dup { + panic("Register called twice for driver " + name) + } + drivers[name] = driver +} diff --git a/vendor/github.com/mattes/migrate/source/driver_test.go b/vendor/github.com/mattes/migrate/source/driver_test.go new file mode 100644 index 000000000..82284a0b9 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/driver_test.go @@ -0,0 +1,8 @@ +package source + +func ExampleDriver() { + // see source/stub for an example + + // source/stub/stub.go has the driver implementation + // source/stub/stub_test.go runs source/testing/test.go:Test +} diff --git a/vendor/github.com/mattes/migrate/source/file/README.md b/vendor/github.com/mattes/migrate/source/file/README.md new file mode 100644 index 000000000..7912eff66 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/file/README.md @@ -0,0 +1,4 @@ +# file + +`file:///absolute/path` +`file://relative/path` diff --git a/vendor/github.com/mattes/migrate/source/file/file.go b/vendor/github.com/mattes/migrate/source/file/file.go new file mode 100644 index 000000000..b97d0aa3d --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/file/file.go @@ -0,0 +1,127 @@ +package file + +import ( + "fmt" + "io" + "io/ioutil" + nurl "net/url" + "os" + "path" + "path/filepath" + + "github.com/mattes/migrate/source" +) + +func init() { + source.Register("file", &File{}) +} + +type File struct { + url string + path string + migrations *source.Migrations +} + +func (f *File) Open(url string) (source.Driver, error) { + u, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + // concat host and path to restore full path + // host might be `.` + p := u.Host + u.Path + + if len(p) == 0 { + // default to current directory if no path + wd, err := os.Getwd() + if err != nil { + return nil, err + } + p = wd + + } else if p[0:1] == "." || p[0:1] != "/" { + // make path absolute if relative + abs, err := filepath.Abs(p) + if err != nil { + return nil, err + } + p = abs + } + + // scan directory + files, err := ioutil.ReadDir(p) + if err != nil { + return nil, err + } + + nf := &File{ + url: url, + path: p, + migrations: source.NewMigrations(), + } + + for _, fi := range files { + if !fi.IsDir() { + m, err := source.DefaultParse(fi.Name()) + if err != nil { + continue // ignore files that we can't parse + } + if !nf.migrations.Append(m) { + return nil, fmt.Errorf("unable to parse file %v", fi.Name()) + } + } + } + return nf, nil +} + +func (f *File) Close() error { + // nothing do to here + return nil +} + +func (f *File) First() (version uint, err error) { + if v, ok := f.migrations.First(); !ok { + return 0, &os.PathError{"first", f.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (f *File) Prev(version uint) (prevVersion uint, err error) { + if v, ok := f.migrations.Prev(version); !ok { + return 0, &os.PathError{fmt.Sprintf("prev for version %v", version), f.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (f *File) Next(version uint) (nextVersion uint, err error) { + if v, ok := f.migrations.Next(version); !ok { + return 0, &os.PathError{fmt.Sprintf("next for version %v", version), f.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (f *File) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := f.migrations.Up(version); ok { + r, err := os.Open(path.Join(f.path, m.Raw)) + if err != nil { + return nil, "", err + } + return r, m.Identifier, nil + } + return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), f.path, os.ErrNotExist} +} + +func (f *File) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := f.migrations.Down(version); ok { + r, err := os.Open(path.Join(f.path, m.Raw)) + if err != nil { + return nil, "", err + } + return r, m.Identifier, nil + } + return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), f.path, os.ErrNotExist} +} diff --git a/vendor/github.com/mattes/migrate/source/file/file_test.go b/vendor/github.com/mattes/migrate/source/file/file_test.go new file mode 100644 index 000000000..310131c6f --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/file/file_test.go @@ -0,0 +1,207 @@ +package file + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" + + st "github.com/mattes/migrate/source/testing" +) + +func Test(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + // write files that meet driver test requirements + mustWriteFile(t, tmpDir, "1_foobar.up.sql", "1 up") + mustWriteFile(t, tmpDir, "1_foobar.down.sql", "1 down") + + mustWriteFile(t, tmpDir, "3_foobar.up.sql", "3 up") + + mustWriteFile(t, tmpDir, "4_foobar.up.sql", "4 up") + mustWriteFile(t, tmpDir, "4_foobar.down.sql", "4 down") + + mustWriteFile(t, tmpDir, "5_foobar.down.sql", "5 down") + + mustWriteFile(t, tmpDir, "7_foobar.up.sql", "7 up") + mustWriteFile(t, tmpDir, "7_foobar.down.sql", "7 down") + + f := &File{} + d, err := f.Open("file://" + tmpDir) + if err != nil { + t.Fatal(err) + } + + st.Test(t, d) +} + +func TestOpen(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "TestOpen") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + mustWriteFile(t, tmpDir, "1_foobar.up.sql", "") + mustWriteFile(t, tmpDir, "1_foobar.down.sql", "") + + if !filepath.IsAbs(tmpDir) { + t.Fatal("expected tmpDir to be absolute path") + } + + f := &File{} + _, err = f.Open("file://" + tmpDir) // absolute path + if err != nil { + t.Fatal(err) + } +} + +func TestOpenWithRelativePath(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "TestOpen") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + defer os.Chdir(wd) // rescue working dir after we are done + + if err := os.Chdir(tmpDir); err != nil { + t.Fatal(err) + } + + if err := os.Mkdir(filepath.Join(tmpDir, "foo"), os.ModePerm); err != nil { + t.Fatal(err) + } + + mustWriteFile(t, filepath.Join(tmpDir, "foo"), "1_foobar.up.sql", "") + + f := &File{} + + // dir: foo + d, err := f.Open("file://foo") + if err != nil { + t.Fatal(err) + } + _, err = d.First() + if err != nil { + t.Fatalf("expected first file in working dir %v for foo", tmpDir) + } + + // dir: ./foo + d, err = f.Open("file://./foo") + if err != nil { + t.Fatal(err) + } + _, err = d.First() + if err != nil { + t.Fatalf("expected first file in working dir %v for ./foo", tmpDir) + } +} + +func TestOpenDefaultsToCurrentDirectory(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + f := &File{} + d, err := f.Open("file://") + if err != nil { + t.Fatal(err) + } + + if d.(*File).path != wd { + t.Fatal("expected driver to default to current directory") + } +} + +func TestOpenWithDuplicateVersion(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "TestOpenWithDuplicateVersion") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + mustWriteFile(t, tmpDir, "1_foo.up.sql", "") // 1 up + mustWriteFile(t, tmpDir, "1_bar.up.sql", "") // 1 up + + f := &File{} + _, err = f.Open("file://" + tmpDir) + if err == nil { + t.Fatal("expected err") + } +} + +func TestClose(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "TestOpen") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + f := &File{} + d, err := f.Open("file://" + tmpDir) + if err != nil { + t.Fatal(err) + } + + if d.Close() != nil { + t.Fatal("expected nil") + } +} + +func mustWriteFile(t testing.TB, dir, file string, body string) { + if err := ioutil.WriteFile(path.Join(dir, file), []byte(body), 06444); err != nil { + t.Fatal(err) + } +} + +func mustCreateBenchmarkDir(t *testing.B) (dir string) { + tmpDir, err := ioutil.TempDir("", "Benchmark") + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 1000; i++ { + mustWriteFile(t, tmpDir, fmt.Sprintf("%v_foobar.up.sql", i), "") + mustWriteFile(t, tmpDir, fmt.Sprintf("%v_foobar.down.sql", i), "") + } + + return tmpDir +} + +func BenchmarkOpen(b *testing.B) { + dir := mustCreateBenchmarkDir(b) + defer os.RemoveAll(dir) + b.ResetTimer() + for n := 0; n < b.N; n++ { + f := &File{} + f.Open("file://" + dir) + } + b.StopTimer() +} + +func BenchmarkNext(b *testing.B) { + dir := mustCreateBenchmarkDir(b) + defer os.RemoveAll(dir) + f := &File{} + d, _ := f.Open("file://" + dir) + b.ResetTimer() + v, err := d.First() + for n := 0; n < b.N; n++ { + for !os.IsNotExist(err) { + v, err = d.Next(v) + } + } + b.StopTimer() +} diff --git a/vendor/github.com/mattes/migrate/source/github/.gitignore b/vendor/github.com/mattes/migrate/source/github/.gitignore new file mode 100644 index 000000000..3006ad5eb --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/github/.gitignore @@ -0,0 +1 @@ +.github_test_secrets diff --git a/vendor/github.com/mattes/migrate/source/github/README.md b/vendor/github.com/mattes/migrate/source/github/README.md new file mode 100644 index 000000000..257f575c4 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/github/README.md @@ -0,0 +1,11 @@ +# github + +`github://user:personal-access-token@owner/repo/path` + +| URL Query | WithInstance Config | Description | +|------------|---------------------|-------------| +| user | | The username of the user connecting | +| personal-access-token | | An access token from Github (https://github.com/settings/tokens) | +| owner | | the repo owner | +| repo | | the name of the repository | +| path | | path in repo to migrations | diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1085649617_create_users_table.down.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1085649617_create_users_table.down.sql new file mode 100644 index 000000000..c99ddcdc8 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1085649617_create_users_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS users; diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1085649617_create_users_table.up.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1085649617_create_users_table.up.sql new file mode 100644 index 000000000..92897dcab --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1085649617_create_users_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE users ( + user_id integer unique, + name varchar(40), + email varchar(40) +); diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1185749658_add_city_to_users.down.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1185749658_add_city_to_users.down.sql new file mode 100644 index 000000000..940c60712 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1185749658_add_city_to_users.down.sql @@ -0,0 +1 @@ +ALTER TABLE users DROP COLUMN IF EXISTS city; diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1185749658_add_city_to_users.up.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1185749658_add_city_to_users.up.sql new file mode 100644 index 000000000..67823edc9 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1185749658_add_city_to_users.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE users ADD COLUMN city varchar(100); + + diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.down.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.down.sql new file mode 100644 index 000000000..3e87dd229 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.down.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS users_email_index; diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.up.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.up.sql new file mode 100644 index 000000000..fbeb4ab4e --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.up.sql @@ -0,0 +1,3 @@ +CREATE UNIQUE INDEX CONCURRENTLY users_email_index ON users (email); + +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1385949617_create_books_table.down.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1385949617_create_books_table.down.sql new file mode 100644 index 000000000..1a0b1a214 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1385949617_create_books_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS books; diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1385949617_create_books_table.up.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1385949617_create_books_table.up.sql new file mode 100644 index 000000000..f1503b518 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1385949617_create_books_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE books ( + user_id integer, + name varchar(40), + author varchar(40) +); diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1485949617_create_movies_table.down.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1485949617_create_movies_table.down.sql new file mode 100644 index 000000000..3a5187689 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1485949617_create_movies_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS movies; diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1485949617_create_movies_table.up.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1485949617_create_movies_table.up.sql new file mode 100644 index 000000000..f0ef5943b --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1485949617_create_movies_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE movies ( + user_id integer, + name varchar(40), + director varchar(40) +); diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1585849751_just_a_comment.up.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1585849751_just_a_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1585849751_just_a_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1685849751_another_comment.up.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1685849751_another_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1685849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1785849751_another_comment.up.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1785849751_another_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1785849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1885849751_another_comment.up.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1885849751_another_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1885849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/source/github/github.go b/vendor/github.com/mattes/migrate/source/github/github.go new file mode 100644 index 000000000..d534ed37b --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/github/github.go @@ -0,0 +1,180 @@ +package github + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + nurl "net/url" + "os" + "path" + "strings" + + "github.com/google/go-github/github" + "github.com/mattes/migrate/source" +) + +func init() { + source.Register("github", &Github{}) +} + +var ( + ErrNoUserInfo = fmt.Errorf("no username:token provided") + ErrNoAccessToken = fmt.Errorf("no access token") + ErrInvalidRepo = fmt.Errorf("invalid repo") + ErrInvalidGithubClient = fmt.Errorf("expected *github.Client") + ErrNoDir = fmt.Errorf("no directory") +) + +type Github struct { + client *github.Client + url string + + pathOwner string + pathRepo string + path string + migrations *source.Migrations +} + +type Config struct { +} + +func (g *Github) Open(url string) (source.Driver, error) { + u, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + if u.User == nil { + return nil, ErrNoUserInfo + } + + password, ok := u.User.Password() + if !ok { + return nil, ErrNoUserInfo + } + + tr := &github.BasicAuthTransport{ + Username: u.User.Username(), + Password: password, + } + + gn := &Github{ + client: github.NewClient(tr.Client()), + url: url, + migrations: source.NewMigrations(), + } + + // set owner, repo and path in repo + gn.pathOwner = u.Host + pe := strings.Split(strings.Trim(u.Path, "/"), "/") + if len(pe) < 1 { + return nil, ErrInvalidRepo + } + gn.pathRepo = pe[0] + if len(pe) > 1 { + gn.path = strings.Join(pe[1:], "/") + } + + if err := gn.readDirectory(); err != nil { + return nil, err + } + + return gn, nil +} + +func WithInstance(client *github.Client, config *Config) (source.Driver, error) { + gn := &Github{ + client: client, + migrations: source.NewMigrations(), + } + if err := gn.readDirectory(); err != nil { + return nil, err + } + return gn, nil +} + +func (g *Github) readDirectory() error { + fileContent, dirContents, _, err := g.client.Repositories.GetContents(context.Background(), g.pathOwner, g.pathRepo, g.path, &github.RepositoryContentGetOptions{}) + if err != nil { + return err + } + if fileContent != nil { + return ErrNoDir + } + + for _, fi := range dirContents { + m, err := source.DefaultParse(*fi.Name) + if err != nil { + continue // ignore files that we can't parse + } + if !g.migrations.Append(m) { + return fmt.Errorf("unable to parse file %v", *fi.Name) + } + } + + return nil +} + +func (g *Github) Close() error { + return nil +} + +func (g *Github) First() (version uint, er error) { + if v, ok := g.migrations.First(); !ok { + return 0, &os.PathError{"first", g.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (g *Github) Prev(version uint) (prevVersion uint, err error) { + if v, ok := g.migrations.Prev(version); !ok { + return 0, &os.PathError{fmt.Sprintf("prev for version %v", version), g.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (g *Github) Next(version uint) (nextVersion uint, err error) { + if v, ok := g.migrations.Next(version); !ok { + return 0, &os.PathError{fmt.Sprintf("next for version %v", version), g.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (g *Github) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := g.migrations.Up(version); ok { + file, _, _, err := g.client.Repositories.GetContents(context.Background(), g.pathOwner, g.pathRepo, path.Join(g.path, m.Raw), &github.RepositoryContentGetOptions{}) + if err != nil { + return nil, "", err + } + if file != nil { + r, err := file.GetContent() + if err != nil { + return nil, "", err + } + return ioutil.NopCloser(bytes.NewReader([]byte(r))), m.Identifier, nil + } + } + return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), g.path, os.ErrNotExist} +} + +func (g *Github) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := g.migrations.Down(version); ok { + file, _, _, err := g.client.Repositories.GetContents(context.Background(), g.pathOwner, g.pathRepo, path.Join(g.path, m.Raw), &github.RepositoryContentGetOptions{}) + if err != nil { + return nil, "", err + } + if file != nil { + r, err := file.GetContent() + if err != nil { + return nil, "", err + } + return ioutil.NopCloser(bytes.NewReader([]byte(r))), m.Identifier, nil + } + } + return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), g.path, os.ErrNotExist} +} diff --git a/vendor/github.com/mattes/migrate/source/github/github_test.go b/vendor/github.com/mattes/migrate/source/github/github_test.go new file mode 100644 index 000000000..83e86618e --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/github/github_test.go @@ -0,0 +1,32 @@ +package github + +import ( + "bytes" + "io/ioutil" + "testing" + + st "github.com/mattes/migrate/source/testing" +) + +var GithubTestSecret = "" // username:token + +func init() { + secrets, err := ioutil.ReadFile(".github_test_secrets") + if err == nil { + GithubTestSecret = string(bytes.TrimSpace(secrets)[:]) + } +} + +func Test(t *testing.T) { + if len(GithubTestSecret) == 0 { + t.Skip("test requires .github_test_secrets") + } + + g := &Github{} + d, err := g.Open("github://" + GithubTestSecret + "@mattes/migrate_test_tmp/test") + if err != nil { + t.Fatal(err) + } + + st.Test(t, d) +} diff --git a/vendor/github.com/mattes/migrate/source/go-bindata/README.md b/vendor/github.com/mattes/migrate/source/go-bindata/README.md new file mode 100644 index 000000000..cd9dd4b78 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/go-bindata/README.md @@ -0,0 +1,43 @@ +# go-bindata + +## Usage + + + +### Read bindata with NewWithSourceInstance + +```shell +go get -u github.com/jteeuwen/go-bindata/... +cd examples/migrations && go-bindata -pkg migrations . +``` + +```go +import ( + "github.com/mattes/migrate" + "github.com/mattes/migrate/source/go-bindata" + "github.com/mattes/migrate/source/go-bindata/examples/migrations" +) + +func main() { + // wrap assets into Resource + s := bindata.Resource(migrations.AssetNames(), + func(name string) ([]byte, error) { + return migrations.Asset(name) + }) + + d, err := bindata.WithInstance(s) + m, err := migrate.NewWithSourceInstance("go-bindata", d, "database://foobar") + m.Up() // run your migrations and handle the errors above of course +} +``` + +### Read bindata with URL (todo) + +This will restore the assets in a tmp directory and then +proxy to source/file. go-bindata must be in your `$PATH`. + +``` +migrate -source go-bindata://examples/migrations/bindata.go +``` + + diff --git a/vendor/github.com/mattes/migrate/source/go-bindata/examples/migrations/bindata.go b/vendor/github.com/mattes/migrate/source/go-bindata/examples/migrations/bindata.go new file mode 100644 index 000000000..282d5ef54 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/go-bindata/examples/migrations/bindata.go @@ -0,0 +1,304 @@ +// Code generated by go-bindata. +// sources: +// 1085649617_create_users_table.down.sql +// 1085649617_create_users_table.up.sql +// 1185749658_add_city_to_users.down.sql +// 1185749658_add_city_to_users.up.sql +// DO NOT EDIT! + +package testdata + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var __1085649617_create_users_tableDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\xf0\x74\x53\x70\x8d\xf0\x0c\x0e\x09\x56\x28\x2d\x4e\x2d\x2a\xb6\xe6\x02\x04\x00\x00\xff\xff\x2c\x02\x3d\xa7\x1c\x00\x00\x00") + +func _1085649617_create_users_tableDownSqlBytes() ([]byte, error) { + return bindataRead( + __1085649617_create_users_tableDownSql, + "1085649617_create_users_table.down.sql", + ) +} + +func _1085649617_create_users_tableDownSql() (*asset, error) { + bytes, err := _1085649617_create_users_tableDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1085649617_create_users_table.down.sql", size: 28, mode: os.FileMode(420), modTime: time.Unix(1485750305, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __1085649617_create_users_tableUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x72\x0e\x72\x75\x0c\x71\x55\x08\x71\x74\xf2\x71\x55\x28\x2d\x4e\x2d\x2a\x56\xd0\xe0\x52\x00\xb3\xe2\x33\x53\x14\x32\xf3\x4a\x52\xd3\x53\x8b\x14\x4a\xf3\x32\x0b\x4b\x53\x75\xb8\x14\x14\xf2\x12\x73\x53\x15\x14\x14\x14\xca\x12\x8b\x92\x33\x12\x8b\x34\x4c\x0c\x34\x41\xc2\xa9\xb9\x89\x99\x39\xa8\xc2\x5c\x9a\xd6\x5c\x80\x00\x00\x00\xff\xff\xa3\x57\xbc\x0b\x5f\x00\x00\x00") + +func _1085649617_create_users_tableUpSqlBytes() ([]byte, error) { + return bindataRead( + __1085649617_create_users_tableUpSql, + "1085649617_create_users_table.up.sql", + ) +} + +func _1085649617_create_users_tableUpSql() (*asset, error) { + bytes, err := _1085649617_create_users_tableUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1085649617_create_users_table.up.sql", size: 95, mode: os.FileMode(420), modTime: time.Unix(1485803085, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __1185749658_add_city_to_usersDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x2d\x4e\x2d\x2a\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x09\xf5\xf5\x53\xf0\x74\x53\x70\x8d\xf0\x0c\x0e\x09\x56\x48\xce\x2c\xa9\xb4\xe6\x02\x04\x00\x00\xff\xff\xb7\x52\x88\xd7\x2e\x00\x00\x00") + +func _1185749658_add_city_to_usersDownSqlBytes() ([]byte, error) { + return bindataRead( + __1185749658_add_city_to_usersDownSql, + "1185749658_add_city_to_users.down.sql", + ) +} + +func _1185749658_add_city_to_usersDownSql() (*asset, error) { + bytes, err := _1185749658_add_city_to_usersDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1185749658_add_city_to_users.down.sql", size: 46, mode: os.FileMode(420), modTime: time.Unix(1485750443, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __1185749658_add_city_to_usersUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x2d\x4e\x2d\x2a\x56\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x53\x48\xce\x2c\xa9\x54\x28\x4b\x2c\x4a\xce\x48\x2c\xd2\x30\x34\x30\xd0\xb4\xe6\xe2\xe2\x02\x04\x00\x00\xff\xff\xa8\x0f\x49\xc6\x32\x00\x00\x00") + +func _1185749658_add_city_to_usersUpSqlBytes() ([]byte, error) { + return bindataRead( + __1185749658_add_city_to_usersUpSql, + "1185749658_add_city_to_users.up.sql", + ) +} + +func _1185749658_add_city_to_usersUpSql() (*asset, error) { + bytes, err := _1185749658_add_city_to_usersUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1185749658_add_city_to_users.up.sql", size: 50, mode: os.FileMode(420), modTime: time.Unix(1485843733, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "1085649617_create_users_table.down.sql": _1085649617_create_users_tableDownSql, + "1085649617_create_users_table.up.sql": _1085649617_create_users_tableUpSql, + "1185749658_add_city_to_users.down.sql": _1185749658_add_city_to_usersDownSql, + "1185749658_add_city_to_users.up.sql": _1185749658_add_city_to_usersUpSql, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} +var _bintree = &bintree{nil, map[string]*bintree{ + "1085649617_create_users_table.down.sql": &bintree{_1085649617_create_users_tableDownSql, map[string]*bintree{}}, + "1085649617_create_users_table.up.sql": &bintree{_1085649617_create_users_tableUpSql, map[string]*bintree{}}, + "1185749658_add_city_to_users.down.sql": &bintree{_1185749658_add_city_to_usersDownSql, map[string]*bintree{}}, + "1185749658_add_city_to_users.up.sql": &bintree{_1185749658_add_city_to_usersUpSql, map[string]*bintree{}}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} + diff --git a/vendor/github.com/mattes/migrate/source/go-bindata/go-bindata.go b/vendor/github.com/mattes/migrate/source/go-bindata/go-bindata.go new file mode 100644 index 000000000..7426db71b --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/go-bindata/go-bindata.go @@ -0,0 +1,119 @@ +package bindata + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/mattes/migrate/source" +) + +type AssetFunc func(name string) ([]byte, error) + +func Resource(names []string, afn AssetFunc) *AssetSource { + return &AssetSource{ + Names: names, + AssetFunc: afn, + } +} + +type AssetSource struct { + Names []string + AssetFunc AssetFunc +} + +func init() { + source.Register("go-bindata", &Bindata{}) +} + +type Bindata struct { + path string + assetSource *AssetSource + migrations *source.Migrations +} + +func (b *Bindata) Open(url string) (source.Driver, error) { + return nil, fmt.Errorf("not yet implemented") +} + +var ( + ErrNoAssetSource = fmt.Errorf("expects *AssetSource") +) + +func WithInstance(instance interface{}) (source.Driver, error) { + if _, ok := instance.(*AssetSource); !ok { + return nil, ErrNoAssetSource + } + as := instance.(*AssetSource) + + bn := &Bindata{ + path: "", + assetSource: as, + migrations: source.NewMigrations(), + } + + for _, fi := range as.Names { + m, err := source.DefaultParse(fi) + if err != nil { + continue // ignore files that we can't parse + } + + if !bn.migrations.Append(m) { + return nil, fmt.Errorf("unable to parse file %v", fi) + } + } + + return bn, nil +} + +func (b *Bindata) Close() error { + return nil +} + +func (b *Bindata) First() (version uint, err error) { + if v, ok := b.migrations.First(); !ok { + return 0, &os.PathError{"first", b.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (b *Bindata) Prev(version uint) (prevVersion uint, err error) { + if v, ok := b.migrations.Prev(version); !ok { + return 0, &os.PathError{fmt.Sprintf("prev for version %v", version), b.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (b *Bindata) Next(version uint) (nextVersion uint, err error) { + if v, ok := b.migrations.Next(version); !ok { + return 0, &os.PathError{fmt.Sprintf("next for version %v", version), b.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (b *Bindata) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := b.migrations.Up(version); ok { + body, err := b.assetSource.AssetFunc(m.Raw) + if err != nil { + return nil, "", err + } + return ioutil.NopCloser(bytes.NewReader(body)), m.Identifier, nil + } + return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), b.path, os.ErrNotExist} +} + +func (b *Bindata) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := b.migrations.Down(version); ok { + body, err := b.assetSource.AssetFunc(m.Raw) + if err != nil { + return nil, "", err + } + return ioutil.NopCloser(bytes.NewReader(body)), m.Identifier, nil + } + return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), b.path, os.ErrNotExist} +} diff --git a/vendor/github.com/mattes/migrate/source/go-bindata/go-bindata_test.go b/vendor/github.com/mattes/migrate/source/go-bindata/go-bindata_test.go new file mode 100644 index 000000000..746a7b91f --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/go-bindata/go-bindata_test.go @@ -0,0 +1,43 @@ +package bindata + +import ( + "testing" + + "github.com/mattes/migrate/source/go-bindata/testdata" + st "github.com/mattes/migrate/source/testing" +) + +func Test(t *testing.T) { + // wrap assets into Resource first + s := Resource(testdata.AssetNames(), + func(name string) ([]byte, error) { + return testdata.Asset(name) + }) + + d, err := WithInstance(s) + if err != nil { + t.Fatal(err) + } + st.Test(t, d) +} + +func TestWithInstance(t *testing.T) { + // wrap assets into Resource + s := Resource(testdata.AssetNames(), + func(name string) ([]byte, error) { + return testdata.Asset(name) + }) + + _, err := WithInstance(s) + if err != nil { + t.Fatal(err) + } +} + +func TestOpen(t *testing.T) { + b := &Bindata{} + _, err := b.Open("") + if err == nil { + t.Fatal("expected err, because it's not implemented yet") + } +} diff --git a/vendor/github.com/mattes/migrate/source/go-bindata/testdata/bindata.go b/vendor/github.com/mattes/migrate/source/go-bindata/testdata/bindata.go new file mode 100644 index 000000000..304f3d87c --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/go-bindata/testdata/bindata.go @@ -0,0 +1,396 @@ +// Code generated by go-bindata. +// sources: +// 1_test.down.sql +// 1_test.up.sql +// 3_test.up.sql +// 4_test.down.sql +// 4_test.up.sql +// 5_test.down.sql +// 7_test.down.sql +// 7_test.up.sql +// DO NOT EDIT! + +package testdata + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var __1_testDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _1_testDownSqlBytes() ([]byte, error) { + return bindataRead( + __1_testDownSql, + "1_test.down.sql", + ) +} + +func _1_testDownSql() (*asset, error) { + bytes, err := _1_testDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1_test.down.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440324, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __1_testUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _1_testUpSqlBytes() ([]byte, error) { + return bindataRead( + __1_testUpSql, + "1_test.up.sql", + ) +} + +func _1_testUpSql() (*asset, error) { + bytes, err := _1_testUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1_test.up.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440319, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __3_testUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _3_testUpSqlBytes() ([]byte, error) { + return bindataRead( + __3_testUpSql, + "3_test.up.sql", + ) +} + +func _3_testUpSql() (*asset, error) { + bytes, err := _3_testUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "3_test.up.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440331, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __4_testDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _4_testDownSqlBytes() ([]byte, error) { + return bindataRead( + __4_testDownSql, + "4_test.down.sql", + ) +} + +func _4_testDownSql() (*asset, error) { + bytes, err := _4_testDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "4_test.down.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440337, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __4_testUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _4_testUpSqlBytes() ([]byte, error) { + return bindataRead( + __4_testUpSql, + "4_test.up.sql", + ) +} + +func _4_testUpSql() (*asset, error) { + bytes, err := _4_testUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "4_test.up.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440335, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __5_testDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _5_testDownSqlBytes() ([]byte, error) { + return bindataRead( + __5_testDownSql, + "5_test.down.sql", + ) +} + +func _5_testDownSql() (*asset, error) { + bytes, err := _5_testDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "5_test.down.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440340, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __7_testDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _7_testDownSqlBytes() ([]byte, error) { + return bindataRead( + __7_testDownSql, + "7_test.down.sql", + ) +} + +func _7_testDownSql() (*asset, error) { + bytes, err := _7_testDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "7_test.down.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440343, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __7_testUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _7_testUpSqlBytes() ([]byte, error) { + return bindataRead( + __7_testUpSql, + "7_test.up.sql", + ) +} + +func _7_testUpSql() (*asset, error) { + bytes, err := _7_testUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "7_test.up.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440347, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "1_test.down.sql": _1_testDownSql, + "1_test.up.sql": _1_testUpSql, + "3_test.up.sql": _3_testUpSql, + "4_test.down.sql": _4_testDownSql, + "4_test.up.sql": _4_testUpSql, + "5_test.down.sql": _5_testDownSql, + "7_test.down.sql": _7_testDownSql, + "7_test.up.sql": _7_testUpSql, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} +var _bintree = &bintree{nil, map[string]*bintree{ + "1_test.down.sql": &bintree{_1_testDownSql, map[string]*bintree{}}, + "1_test.up.sql": &bintree{_1_testUpSql, map[string]*bintree{}}, + "3_test.up.sql": &bintree{_3_testUpSql, map[string]*bintree{}}, + "4_test.down.sql": &bintree{_4_testDownSql, map[string]*bintree{}}, + "4_test.up.sql": &bintree{_4_testUpSql, map[string]*bintree{}}, + "5_test.down.sql": &bintree{_5_testDownSql, map[string]*bintree{}}, + "7_test.down.sql": &bintree{_7_testDownSql, map[string]*bintree{}}, + "7_test.up.sql": &bintree{_7_testUpSql, map[string]*bintree{}}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} + diff --git a/vendor/github.com/mattes/migrate/source/google-cloud-storage/README.md b/vendor/github.com/mattes/migrate/source/google-cloud-storage/README.md new file mode 100644 index 000000000..e61cb2311 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/google-cloud-storage/README.md @@ -0,0 +1,3 @@ +# google-cloud-storage + +`gcs:///` diff --git a/vendor/github.com/mattes/migrate/source/google-cloud-storage/storage.go b/vendor/github.com/mattes/migrate/source/google-cloud-storage/storage.go new file mode 100644 index 000000000..c1a18bc2f --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/google-cloud-storage/storage.go @@ -0,0 +1,119 @@ +package googlecloudstorage + +import ( + "fmt" + "io" + "net/url" + "os" + "path" + "strings" + + "cloud.google.com/go/storage" + "github.com/mattes/migrate/source" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func init() { + source.Register("gcs", &gcs{}) +} + +type gcs struct { + bucket *storage.BucketHandle + prefix string + migrations *source.Migrations +} + +func (g *gcs) Open(folder string) (source.Driver, error) { + u, err := url.Parse(folder) + if err != nil { + return nil, err + } + client, err := storage.NewClient(context.Background()) + if err != nil { + return nil, err + } + driver := gcs{ + bucket: client.Bucket(u.Host), + prefix: strings.Trim(u.Path, "/") + "/", + migrations: source.NewMigrations(), + } + err = driver.loadMigrations() + if err != nil { + return nil, err + } + return &driver, nil +} + +func (g *gcs) loadMigrations() error { + iter := g.bucket.Objects(context.Background(), &storage.Query{ + Prefix: g.prefix, + Delimiter: "/", + }) + object, err := iter.Next() + for ; err == nil; object, err = iter.Next() { + _, fileName := path.Split(object.Name) + m, parseErr := source.DefaultParse(fileName) + if parseErr != nil { + continue + } + if !g.migrations.Append(m) { + return fmt.Errorf("unable to parse file %v", object.Name) + } + } + if err != iterator.Done { + return err + } + return nil +} + +func (g *gcs) Close() error { + return nil +} + +func (g *gcs) First() (uint, error) { + v, ok := g.migrations.First() + if !ok { + return 0, os.ErrNotExist + } + return v, nil +} + +func (g *gcs) Prev(version uint) (uint, error) { + v, ok := g.migrations.Prev(version) + if !ok { + return 0, os.ErrNotExist + } + return v, nil +} + +func (g *gcs) Next(version uint) (uint, error) { + v, ok := g.migrations.Next(version) + if !ok { + return 0, os.ErrNotExist + } + return v, nil +} + +func (g *gcs) ReadUp(version uint) (io.ReadCloser, string, error) { + if m, ok := g.migrations.Up(version); ok { + return g.open(m) + } + return nil, "", os.ErrNotExist +} + +func (g *gcs) ReadDown(version uint) (io.ReadCloser, string, error) { + if m, ok := g.migrations.Down(version); ok { + return g.open(m) + } + return nil, "", os.ErrNotExist +} + +func (g *gcs) open(m *source.Migration) (io.ReadCloser, string, error) { + objectPath := path.Join(g.prefix, m.Raw) + reader, err := g.bucket.Object(objectPath).NewReader(context.Background()) + if err != nil { + return nil, "", err + } + return reader, m.Identifier, nil +} diff --git a/vendor/github.com/mattes/migrate/source/google-cloud-storage/storage_test.go b/vendor/github.com/mattes/migrate/source/google-cloud-storage/storage_test.go new file mode 100644 index 000000000..2af4947cc --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/google-cloud-storage/storage_test.go @@ -0,0 +1,37 @@ +package googlecloudstorage + +import ( + "testing" + + "github.com/fsouza/fake-gcs-server/fakestorage" + "github.com/mattes/migrate/source" + st "github.com/mattes/migrate/source/testing" +) + +func Test(t *testing.T) { + server := fakestorage.NewServer([]fakestorage.Object{ + {BucketName: "some-bucket", Name: "staging/migrations/1_foobar.up.sql", Content: []byte("1 up")}, + {BucketName: "some-bucket", Name: "staging/migrations/1_foobar.down.sql", Content: []byte("1 down")}, + {BucketName: "some-bucket", Name: "prod/migrations/1_foobar.up.sql", Content: []byte("1 up")}, + {BucketName: "some-bucket", Name: "prod/migrations/1_foobar.down.sql", Content: []byte("1 down")}, + {BucketName: "some-bucket", Name: "prod/migrations/3_foobar.up.sql", Content: []byte("3 up")}, + {BucketName: "some-bucket", Name: "prod/migrations/4_foobar.up.sql", Content: []byte("4 up")}, + {BucketName: "some-bucket", Name: "prod/migrations/4_foobar.down.sql", Content: []byte("4 down")}, + {BucketName: "some-bucket", Name: "prod/migrations/5_foobar.down.sql", Content: []byte("5 down")}, + {BucketName: "some-bucket", Name: "prod/migrations/7_foobar.up.sql", Content: []byte("7 up")}, + {BucketName: "some-bucket", Name: "prod/migrations/7_foobar.down.sql", Content: []byte("7 down")}, + {BucketName: "some-bucket", Name: "prod/migrations/not-a-migration.txt"}, + {BucketName: "some-bucket", Name: "prod/migrations/0-random-stuff/whatever.txt"}, + }) + defer server.Stop() + driver := gcs{ + bucket: server.Client().Bucket("some-bucket"), + prefix: "prod/migrations/", + migrations: source.NewMigrations(), + } + err := driver.loadMigrations() + if err != nil { + t.Fatal(err) + } + st.Test(t, &driver) +} diff --git a/vendor/github.com/mattes/migrate/source/migration.go b/vendor/github.com/mattes/migrate/source/migration.go new file mode 100644 index 000000000..97a4ee226 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/migration.go @@ -0,0 +1,143 @@ +package source + +import ( + "sort" +) + +// Direction is either up or down. +type Direction string + +const ( + Down Direction = "down" + Up = "up" +) + +// Migration is a helper struct for source drivers that need to +// build the full directory tree in memory. +// Migration is fully independent from migrate.Migration. +type Migration struct { + // Version is the version of this migration. + Version uint + + // Identifier can be any string that helps identifying + // this migration in the source. + Identifier string + + // Direction is either Up or Down. + Direction Direction + + // Raw holds the raw location path to this migration in source. + // ReadUp and ReadDown will use this. + Raw string +} + +// Migrations wraps Migration and has an internal index +// to keep track of Migration order. +type Migrations struct { + index uintSlice + migrations map[uint]map[Direction]*Migration +} + +func NewMigrations() *Migrations { + return &Migrations{ + index: make(uintSlice, 0), + migrations: make(map[uint]map[Direction]*Migration), + } +} + +func (i *Migrations) Append(m *Migration) (ok bool) { + if m == nil { + return false + } + + if i.migrations[m.Version] == nil { + i.migrations[m.Version] = make(map[Direction]*Migration) + } + + // reject duplicate versions + if _, dup := i.migrations[m.Version][m.Direction]; dup { + return false + } + + i.migrations[m.Version][m.Direction] = m + i.buildIndex() + + return true +} + +func (i *Migrations) buildIndex() { + i.index = make(uintSlice, 0) + for version, _ := range i.migrations { + i.index = append(i.index, version) + } + sort.Sort(i.index) +} + +func (i *Migrations) First() (version uint, ok bool) { + if len(i.index) == 0 { + return 0, false + } + return i.index[0], true +} + +func (i *Migrations) Prev(version uint) (prevVersion uint, ok bool) { + pos := i.findPos(version) + if pos >= 1 && len(i.index) > pos-1 { + return i.index[pos-1], true + } + return 0, false +} + +func (i *Migrations) Next(version uint) (nextVersion uint, ok bool) { + pos := i.findPos(version) + if pos >= 0 && len(i.index) > pos+1 { + return i.index[pos+1], true + } + return 0, false +} + +func (i *Migrations) Up(version uint) (m *Migration, ok bool) { + if _, ok := i.migrations[version]; ok { + if mx, ok := i.migrations[version][Up]; ok { + return mx, true + } + } + return nil, false +} + +func (i *Migrations) Down(version uint) (m *Migration, ok bool) { + if _, ok := i.migrations[version]; ok { + if mx, ok := i.migrations[version][Down]; ok { + return mx, true + } + } + return nil, false +} + +func (i *Migrations) findPos(version uint) int { + if len(i.index) > 0 { + ix := i.index.Search(version) + if ix < len(i.index) && i.index[ix] == version { + return ix + } + } + return -1 +} + +type uintSlice []uint + +func (s uintSlice) Len() int { + return len(s) +} + +func (s uintSlice) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s uintSlice) Less(i, j int) bool { + return s[i] < s[j] +} + +func (s uintSlice) Search(x uint) int { + return sort.Search(len(s), func(i int) bool { return s[i] >= x }) +} diff --git a/vendor/github.com/mattes/migrate/source/migration_test.go b/vendor/github.com/mattes/migrate/source/migration_test.go new file mode 100644 index 000000000..857cd26af --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/migration_test.go @@ -0,0 +1,46 @@ +package source + +import ( + "testing" +) + +func TestNewMigrations(t *testing.T) { + // TODO +} + +func TestAppend(t *testing.T) { + // TODO +} + +func TestBuildIndex(t *testing.T) { + // TODO +} + +func TestFirst(t *testing.T) { + // TODO +} + +func TestPrev(t *testing.T) { + // TODO +} + +func TestUp(t *testing.T) { + // TODO +} + +func TestDown(t *testing.T) { + // TODO +} + +func TestFindPos(t *testing.T) { + m := Migrations{index: uintSlice{1, 2, 3}} + if p := m.findPos(0); p != -1 { + t.Errorf("expected -1, got %v", p) + } + if p := m.findPos(1); p != 0 { + t.Errorf("expected 0, got %v", p) + } + if p := m.findPos(3); p != 2 { + t.Errorf("expected 2, got %v", p) + } +} diff --git a/vendor/github.com/mattes/migrate/source/parse.go b/vendor/github.com/mattes/migrate/source/parse.go new file mode 100644 index 000000000..2f888fe75 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/parse.go @@ -0,0 +1,39 @@ +package source + +import ( + "fmt" + "regexp" + "strconv" +) + +var ( + ErrParse = fmt.Errorf("no match") +) + +var ( + DefaultParse = Parse + DefaultRegex = Regex +) + +// Regex matches the following pattern: +// 123_name.up.ext +// 123_name.down.ext +var Regex = regexp.MustCompile(`^([0-9]+)_(.*)\.(` + string(Down) + `|` + string(Up) + `)\.(.*)$`) + +// Parse returns Migration for matching Regex pattern. +func Parse(raw string) (*Migration, error) { + m := Regex.FindStringSubmatch(raw) + if len(m) == 5 { + versionUint64, err := strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, err + } + return &Migration{ + Version: uint(versionUint64), + Identifier: m[2], + Direction: Direction(m[3]), + Raw: raw, + }, nil + } + return nil, ErrParse +} diff --git a/vendor/github.com/mattes/migrate/source/parse_test.go b/vendor/github.com/mattes/migrate/source/parse_test.go new file mode 100644 index 000000000..d06356cc8 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/parse_test.go @@ -0,0 +1,106 @@ +package source + +import ( + "testing" +) + +func TestParse(t *testing.T) { + tt := []struct { + name string + expectErr error + expectMigration *Migration + }{ + { + name: "1_foobar.up.sql", + expectErr: nil, + expectMigration: &Migration{ + Version: 1, + Identifier: "foobar", + Direction: Up, + Raw: "1_foobar.up.sql", + }, + }, + { + name: "1_foobar.down.sql", + expectErr: nil, + expectMigration: &Migration{ + Version: 1, + Identifier: "foobar", + Direction: Down, + Raw: "1_foobar.down.sql", + }, + }, + { + name: "1_f-o_ob+ar.up.sql", + expectErr: nil, + expectMigration: &Migration{ + Version: 1, + Identifier: "f-o_ob+ar", + Direction: Up, + Raw: "1_f-o_ob+ar.up.sql", + }, + }, + { + name: "1485385885_foobar.up.sql", + expectErr: nil, + expectMigration: &Migration{ + Version: 1485385885, + Identifier: "foobar", + Direction: Up, + Raw: "1485385885_foobar.up.sql", + }, + }, + { + name: "20170412214116_date_foobar.up.sql", + expectErr: nil, + expectMigration: &Migration{ + Version: 20170412214116, + Identifier: "date_foobar", + Direction: Up, + Raw: "20170412214116_date_foobar.up.sql", + }, + }, + { + name: "-1_foobar.up.sql", + expectErr: ErrParse, + expectMigration: nil, + }, + { + name: "foobar.up.sql", + expectErr: ErrParse, + expectMigration: nil, + }, + { + name: "1.up.sql", + expectErr: ErrParse, + expectMigration: nil, + }, + { + name: "1_foobar.sql", + expectErr: ErrParse, + expectMigration: nil, + }, + { + name: "1_foobar.up", + expectErr: ErrParse, + expectMigration: nil, + }, + { + name: "1_foobar.down", + expectErr: ErrParse, + expectMigration: nil, + }, + } + + for i, v := range tt { + f, err := Parse(v.name) + + if err != v.expectErr { + t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) + } + + if v.expectMigration != nil && *f != *v.expectMigration { + t.Errorf("expected %+v, got %+v, in %v", *v.expectMigration, *f, i) + } + } +} diff --git a/vendor/github.com/mattes/migrate/source/stub/stub.go b/vendor/github.com/mattes/migrate/source/stub/stub.go new file mode 100644 index 000000000..0f4153c54 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/stub/stub.go @@ -0,0 +1,85 @@ +package stub + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/mattes/migrate/source" +) + +func init() { + source.Register("stub", &Stub{}) +} + +type Config struct{} + +// d, _ := source.Open("stub://") +// d.(*stub.Stub).Migrations = + +type Stub struct { + Url string + Instance interface{} + Migrations *source.Migrations + Config *Config +} + +func (s *Stub) Open(url string) (source.Driver, error) { + return &Stub{ + Url: url, + Migrations: source.NewMigrations(), + Config: &Config{}, + }, nil +} + +func WithInstance(instance interface{}, config *Config) (source.Driver, error) { + return &Stub{ + Instance: instance, + Migrations: source.NewMigrations(), + Config: config, + }, nil +} + +func (s *Stub) Close() error { + return nil +} + +func (s *Stub) First() (version uint, err error) { + if v, ok := s.Migrations.First(); !ok { + return 0, &os.PathError{"first", s.Url, os.ErrNotExist} // TODO: s.Url can be empty when called with WithInstance + } else { + return v, nil + } +} + +func (s *Stub) Prev(version uint) (prevVersion uint, err error) { + if v, ok := s.Migrations.Prev(version); !ok { + return 0, &os.PathError{fmt.Sprintf("prev for version %v", version), s.Url, os.ErrNotExist} + } else { + return v, nil + } +} + +func (s *Stub) Next(version uint) (nextVersion uint, err error) { + if v, ok := s.Migrations.Next(version); !ok { + return 0, &os.PathError{fmt.Sprintf("next for version %v", version), s.Url, os.ErrNotExist} + } else { + return v, nil + } +} + +func (s *Stub) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := s.Migrations.Up(version); ok { + return ioutil.NopCloser(bytes.NewBufferString(m.Identifier)), fmt.Sprintf("%v.up.stub", version), nil + } + return nil, "", &os.PathError{fmt.Sprintf("read up version %v", version), s.Url, os.ErrNotExist} +} + +func (s *Stub) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := s.Migrations.Down(version); ok { + return ioutil.NopCloser(bytes.NewBufferString(m.Identifier)), fmt.Sprintf("%v.down.stub", version), nil + } + return nil, "", &os.PathError{fmt.Sprintf("read down version %v", version), s.Url, os.ErrNotExist} +} diff --git a/vendor/github.com/mattes/migrate/source/stub/stub_test.go b/vendor/github.com/mattes/migrate/source/stub/stub_test.go new file mode 100644 index 000000000..05ce819d7 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/stub/stub_test.go @@ -0,0 +1,30 @@ +package stub + +import ( + "testing" + + "github.com/mattes/migrate/source" + st "github.com/mattes/migrate/source/testing" +) + +func Test(t *testing.T) { + s := &Stub{} + d, err := s.Open("") + if err != nil { + t.Fatal(err) + } + + m := source.NewMigrations() + m.Append(&source.Migration{Version: 1, Direction: source.Up}) + m.Append(&source.Migration{Version: 1, Direction: source.Down}) + m.Append(&source.Migration{Version: 3, Direction: source.Up}) + m.Append(&source.Migration{Version: 4, Direction: source.Up}) + m.Append(&source.Migration{Version: 4, Direction: source.Down}) + m.Append(&source.Migration{Version: 5, Direction: source.Down}) + m.Append(&source.Migration{Version: 7, Direction: source.Up}) + m.Append(&source.Migration{Version: 7, Direction: source.Down}) + + d.(*Stub).Migrations = m + + st.Test(t, d) +} diff --git a/vendor/github.com/mattes/migrate/source/testing/testing.go b/vendor/github.com/mattes/migrate/source/testing/testing.go new file mode 100644 index 000000000..3cc003c59 --- /dev/null +++ b/vendor/github.com/mattes/migrate/source/testing/testing.go @@ -0,0 +1,169 @@ +// Package testing has the source tests. +// All source drivers must pass the Test function. +// This lives in it's own package so it stays a test dependency. +package testing + +import ( + "os" + "testing" + + "github.com/mattes/migrate/source" +) + +// Test runs tests against source implementations. +// It assumes that the driver tests has access to the following migrations: +// +// u = up migration, d = down migration, n = version +// | 1 | - | 3 | 4 | 5 | - | 7 | +// | u d | - | u | u d | d | - | u d | +// +// See source/stub/stub_test.go or source/file/file_test.go for an example. +func Test(t *testing.T, d source.Driver) { + TestFirst(t, d) + TestPrev(t, d) + TestNext(t, d) + TestReadUp(t, d) + TestReadDown(t, d) +} + +func TestFirst(t *testing.T, d source.Driver) { + version, err := d.First() + if err != nil { + t.Fatalf("First: expected err to be nil, got %v", err) + } + if version != 1 { + t.Errorf("First: expected 1, got %v", version) + } +} + +func TestPrev(t *testing.T, d source.Driver) { + tt := []struct { + version uint + expectErr error + expectPrevVersion uint + }{ + {version: 0, expectErr: os.ErrNotExist}, + {version: 1, expectErr: os.ErrNotExist}, + {version: 2, expectErr: os.ErrNotExist}, + {version: 3, expectErr: nil, expectPrevVersion: 1}, + {version: 4, expectErr: nil, expectPrevVersion: 3}, + {version: 5, expectErr: nil, expectPrevVersion: 4}, + {version: 6, expectErr: os.ErrNotExist}, + {version: 7, expectErr: nil, expectPrevVersion: 5}, + {version: 8, expectErr: os.ErrNotExist}, + {version: 9, expectErr: os.ErrNotExist}, + } + + for i, v := range tt { + pv, err := d.Prev(v.version) + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) && v.expectErr != err { + t.Errorf("Prev: expected %v, got %v, in %v", v.expectErr, err, i) + } + if err == nil && v.expectPrevVersion != pv { + t.Errorf("Prev: expected %v, got %v, in %v", v.expectPrevVersion, pv, i) + } + } +} + +func TestNext(t *testing.T, d source.Driver) { + tt := []struct { + version uint + expectErr error + expectNextVersion uint + }{ + {version: 0, expectErr: os.ErrNotExist}, + {version: 1, expectErr: nil, expectNextVersion: 3}, + {version: 2, expectErr: os.ErrNotExist}, + {version: 3, expectErr: nil, expectNextVersion: 4}, + {version: 4, expectErr: nil, expectNextVersion: 5}, + {version: 5, expectErr: nil, expectNextVersion: 7}, + {version: 6, expectErr: os.ErrNotExist}, + {version: 7, expectErr: os.ErrNotExist}, + {version: 8, expectErr: os.ErrNotExist}, + {version: 9, expectErr: os.ErrNotExist}, + } + + for i, v := range tt { + nv, err := d.Next(v.version) + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) && v.expectErr != err { + t.Errorf("Next: expected %v, got %v, in %v", v.expectErr, err, i) + } + if err == nil && v.expectNextVersion != nv { + t.Errorf("Next: expected %v, got %v, in %v", v.expectNextVersion, nv, i) + } + } +} + +func TestReadUp(t *testing.T, d source.Driver) { + tt := []struct { + version uint + expectErr error + expectUp bool + }{ + {version: 0, expectErr: os.ErrNotExist}, + {version: 1, expectErr: nil, expectUp: true}, + {version: 2, expectErr: os.ErrNotExist}, + {version: 3, expectErr: nil, expectUp: true}, + {version: 4, expectErr: nil, expectUp: true}, + {version: 5, expectErr: os.ErrNotExist}, + {version: 6, expectErr: os.ErrNotExist}, + {version: 7, expectErr: nil, expectUp: true}, + {version: 8, expectErr: os.ErrNotExist}, + } + + for i, v := range tt { + up, identifier, err := d.ReadUp(v.version) + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || + (v.expectErr != os.ErrNotExist && err != v.expectErr) { + t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) + + } else if err == nil { + if len(identifier) == 0 { + t.Errorf("expected identifier not to be empty, in %v", i) + } + + if v.expectUp == true && up == nil { + t.Errorf("expected up not to be nil, in %v", i) + } else if v.expectUp == false && up != nil { + t.Errorf("expected up to be nil, got %v, in %v", up, i) + } + } + } +} + +func TestReadDown(t *testing.T, d source.Driver) { + tt := []struct { + version uint + expectErr error + expectDown bool + }{ + {version: 0, expectErr: os.ErrNotExist}, + {version: 1, expectErr: nil, expectDown: true}, + {version: 2, expectErr: os.ErrNotExist}, + {version: 3, expectErr: os.ErrNotExist}, + {version: 4, expectErr: nil, expectDown: true}, + {version: 5, expectErr: nil, expectDown: true}, + {version: 6, expectErr: os.ErrNotExist}, + {version: 7, expectErr: nil, expectDown: true}, + {version: 8, expectErr: os.ErrNotExist}, + } + + for i, v := range tt { + down, identifier, err := d.ReadDown(v.version) + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || + (v.expectErr != os.ErrNotExist && err != v.expectErr) { + t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) + + } else if err == nil { + if len(identifier) == 0 { + t.Errorf("expected identifier not to be empty, in %v", i) + } + + if v.expectDown == true && down == nil { + t.Errorf("expected down not to be nil, in %v", i) + } else if v.expectDown == false && down != nil { + t.Errorf("expected down to be nil, got %v, in %v", down, i) + } + } + } +} diff --git a/vendor/github.com/mattes/migrate/testing/docker.go b/vendor/github.com/mattes/migrate/testing/docker.go new file mode 100644 index 000000000..f7a7c4152 --- /dev/null +++ b/vendor/github.com/mattes/migrate/testing/docker.go @@ -0,0 +1,254 @@ +// Package testing is used in driver tests. +package testing + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "io" + "math/rand" + "strconv" + "strings" + "testing" + "time" + dockertypes "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" + dockernetwork "github.com/docker/docker/api/types/network" + dockerclient "github.com/docker/docker/client" +) + +func NewDockerContainer(t testing.TB, image string, env []string, cmd []string) (*DockerContainer, error) { + c, err := dockerclient.NewEnvClient() + if err != nil { + return nil, err + } + + if cmd == nil { + cmd = make([]string, 0) + } + + contr := &DockerContainer{ + t: t, + client: c, + ImageName: image, + ENV: env, + Cmd: cmd, + } + + if err := contr.PullImage(); err != nil { + return nil, err + } + + if err := contr.Start(); err != nil { + return nil, err + } + + return contr, nil +} + +// DockerContainer implements Instance interface +type DockerContainer struct { + t testing.TB + client *dockerclient.Client + ImageName string + ENV []string + Cmd []string + ContainerId string + ContainerName string + ContainerJSON dockertypes.ContainerJSON + containerInspected bool + keepForDebugging bool +} + +func (d *DockerContainer) PullImage() error { + d.t.Logf("Docker: Pull image %v", d.ImageName) + r, err := d.client.ImagePull(context.Background(), d.ImageName, dockertypes.ImagePullOptions{}) + if err != nil { + return err + } + defer r.Close() + + // read output and log relevant lines + bf := bufio.NewScanner(r) + for bf.Scan() { + var resp dockerImagePullOutput + if err := json.Unmarshal(bf.Bytes(), &resp); err != nil { + return err + } + if strings.HasPrefix(resp.Status, "Status: ") { + d.t.Logf("Docker: %v", resp.Status) + } + } + return bf.Err() +} + +func (d *DockerContainer) Start() error { + containerName := fmt.Sprintf("migrate_test_%v", pseudoRandStr(10)) + + // create container first + resp, err := d.client.ContainerCreate(context.Background(), + &dockercontainer.Config{ + Image: d.ImageName, + Labels: map[string]string{"migrate_test": "true"}, + Env: d.ENV, + Cmd: d.Cmd, + }, + &dockercontainer.HostConfig{ + PublishAllPorts: true, + }, + &dockernetwork.NetworkingConfig{}, + containerName) + if err != nil { + return err + } + + d.ContainerId = resp.ID + d.ContainerName = containerName + + // then start it + if err := d.client.ContainerStart(context.Background(), resp.ID, dockertypes.ContainerStartOptions{}); err != nil { + return err + } + + d.t.Logf("Docker: Started container %v (%v) for image %v listening at %v:%v", resp.ID[0:12], containerName, d.ImageName, d.Host(), d.Port()) + for _, v := range resp.Warnings { + d.t.Logf("Docker: Warning: %v", v) + } + return nil +} + +func (d *DockerContainer) KeepForDebugging() { + d.keepForDebugging = true +} + +func (d *DockerContainer) Remove() error { + if d.keepForDebugging { + return nil + } + + if len(d.ContainerId) == 0 { + return fmt.Errorf("missing containerId") + } + if err := d.client.ContainerRemove(context.Background(), d.ContainerId, + dockertypes.ContainerRemoveOptions{ + Force: true, + }); err != nil { + d.t.Log(err) + return err + } + d.t.Logf("Docker: Removed %v", d.ContainerName) + return nil +} + +func (d *DockerContainer) Inspect() error { + if len(d.ContainerId) == 0 { + return fmt.Errorf("missing containerId") + } + resp, err := d.client.ContainerInspect(context.Background(), d.ContainerId) + if err != nil { + return err + } + + d.ContainerJSON = resp + d.containerInspected = true + return nil +} + +func (d *DockerContainer) Logs() (io.ReadCloser, error) { + if len(d.ContainerId) == 0 { + return nil, fmt.Errorf("missing containerId") + } + + return d.client.ContainerLogs(context.Background(), d.ContainerId, dockertypes.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + }) +} + +func (d *DockerContainer) portMapping(selectFirst bool, cPort int) (containerPort uint, hostIP string, hostPort uint, err error) { + if !d.containerInspected { + if err := d.Inspect(); err != nil { + d.t.Fatal(err) + } + } + + for port, bindings := range d.ContainerJSON.NetworkSettings.Ports { + if !selectFirst && port.Int() != cPort { + // Skip ahead until we find the port we want + continue + } + for _, binding := range bindings { + + hostPortUint, err := strconv.ParseUint(binding.HostPort, 10, 64) + if err != nil { + return 0, "", 0, err + } + + return uint(port.Int()), binding.HostIP, uint(hostPortUint), nil + } + } + + if selectFirst { + return 0, "", 0, fmt.Errorf("no port binding") + } else { + return 0, "", 0, fmt.Errorf("specified port not bound") + } +} + +func (d *DockerContainer) Host() string { + _, hostIP, _, err := d.portMapping(true, -1) + if err != nil { + d.t.Fatal(err) + } + + if hostIP == "0.0.0.0" { + return "127.0.0.1" + } else { + return hostIP + } +} + +func (d *DockerContainer) Port() uint { + _, _, port, err := d.portMapping(true, -1) + if err != nil { + d.t.Fatal(err) + } + return port +} + +func (d *DockerContainer) PortFor(cPort int) uint { + _, _, port, err := d.portMapping(false, cPort) + if err != nil { + d.t.Fatal(err) + } + return port +} + +func (d *DockerContainer) NetworkSettings() dockertypes.NetworkSettings { + netSettings := d.ContainerJSON.NetworkSettings + return *netSettings +} + +type dockerImagePullOutput struct { + Status string `json:"status"` + ProgressDetails struct { + Current int `json:"current"` + Total int `json:"total"` + } `json:"progressDetail"` + Id string `json:"id"` + Progress string `json:"progress"` +} + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +func pseudoRandStr(n int) string { + var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz0123456789") + b := make([]rune, n) + for i := range b { + b[i] = letterRunes[rand.Intn(len(letterRunes))] + } + return string(b) +} diff --git a/vendor/github.com/mattes/migrate/testing/testing.go b/vendor/github.com/mattes/migrate/testing/testing.go new file mode 100644 index 000000000..64e0a6465 --- /dev/null +++ b/vendor/github.com/mattes/migrate/testing/testing.go @@ -0,0 +1,96 @@ +package testing + +import ( + "io/ioutil" + "os" + "strconv" + "testing" + "time" + + dockertypes "github.com/docker/docker/api/types" +) + +type IsReadyFunc func(Instance) bool + +type TestFunc func(*testing.T, Instance) + +type Version struct { + Image string + ENV []string + Cmd []string +} + +func ParallelTest(t *testing.T, versions []Version, readyFn IsReadyFunc, testFn TestFunc) { + delay, err := strconv.Atoi(os.Getenv("MIGRATE_TEST_CONTAINER_BOOT_DELAY")) + if err != nil { + delay = 0 + } + + for i, version := range versions { + version := version // capture range variable, see https://goo.gl/60w3p2 + + // Only test against one version in short mode + // TODO: order is random, maybe always pick first version instead? + if i > 0 && testing.Short() { + t.Logf("Skipping %v in short mode", version) + + } else { + t.Run(version.Image, func(t *testing.T) { + t.Parallel() + + // create new container + container, err := NewDockerContainer(t, version.Image, version.ENV, version.Cmd) + if err != nil { + t.Fatalf("%v\n%s", err, containerLogs(t, container)) + } + + // make sure to remove container once done + defer container.Remove() + + // wait until database is ready + tick := time.Tick(1000 * time.Millisecond) + timeout := time.After(time.Duration(delay + 60) * time.Second) + outer: + for { + select { + case <-tick: + if readyFn(container) { + break outer + } + + case <-timeout: + t.Fatalf("Docker: Container not ready, timeout for %v.\n%s", version, containerLogs(t, container)) + } + } + + time.Sleep(time.Duration(int64(delay)) * time.Second) + + // we can now run the tests + testFn(t, container) + }) + } + } +} + +func containerLogs(t *testing.T, c *DockerContainer) []byte { + r, err := c.Logs() + if err != nil { + t.Error("%v", err) + return nil + } + defer r.Close() + b, err := ioutil.ReadAll(r) + if err != nil { + t.Error("%v", err) + return nil + } + return b +} + +type Instance interface { + Host() string + Port() uint + PortFor(int) uint + NetworkSettings() dockertypes.NetworkSettings + KeepForDebugging() +} diff --git a/vendor/github.com/mattes/migrate/testing/testing_test.go b/vendor/github.com/mattes/migrate/testing/testing_test.go new file mode 100644 index 000000000..8217decfa --- /dev/null +++ b/vendor/github.com/mattes/migrate/testing/testing_test.go @@ -0,0 +1,20 @@ +package testing + +import ( + "testing" +) + +func ExampleParallelTest(t *testing.T) { + var isReady = func(i Instance) bool { + // Return true if Instance is ready to run tests. + // Don't block here though. + return true + } + + // t is *testing.T coming from parent Test(t *testing.T) + ParallelTest(t, []Version{{Image: "docker_image:9.6"}}, isReady, + func(t *testing.T, i Instance) { + // Run your test/s ... + t.Fatal("...") + }) +} diff --git a/vendor/github.com/mattes/migrate/util.go b/vendor/github.com/mattes/migrate/util.go new file mode 100644 index 000000000..67048ea5c --- /dev/null +++ b/vendor/github.com/mattes/migrate/util.go @@ -0,0 +1,105 @@ +package migrate + +import ( + "bufio" + "fmt" + "io" + nurl "net/url" + "strings" + "time" +) + +// MultiError holds multiple errors. +type MultiError struct { + Errs []error +} + +// NewMultiError returns an error type holding multiple errors. +func NewMultiError(errs ...error) MultiError { + compactErrs := make([]error, 0) + for _, e := range errs { + if e != nil { + compactErrs = append(compactErrs, e) + } + } + return MultiError{compactErrs} +} + +// Error implements error. Mulitple errors are concatenated with 'and's. +func (m MultiError) Error() string { + var strs = make([]string, 0) + for _, e := range m.Errs { + if len(e.Error()) > 0 { + strs = append(strs, e.Error()) + } + } + return strings.Join(strs, " and ") +} + +// suint safely converts int to uint +// see https://goo.gl/wEcqof +// see https://goo.gl/pai7Dr +func suint(n int) uint { + if n < 0 { + panic(fmt.Sprintf("suint(%v) expects input >= 0", n)) + } + return uint(n) +} + +// newSlowReader turns an io.ReadCloser into a slow io.ReadCloser. +// Use this to simulate a slow internet connection. +func newSlowReader(r io.ReadCloser) io.ReadCloser { + return &slowReader{ + rx: r, + reader: bufio.NewReader(r), + } +} + +type slowReader struct { + rx io.ReadCloser + reader *bufio.Reader +} + +func (b *slowReader) Read(p []byte) (n int, err error) { + time.Sleep(10 * time.Millisecond) + c, err := b.reader.ReadByte() + if err != nil { + return 0, err + } else { + copy(p, []byte{c}) + return 1, nil + } +} + +func (b *slowReader) Close() error { + return b.rx.Close() +} + +var errNoScheme = fmt.Errorf("no scheme") + +// schemeFromUrl returns the scheme from a URL string +func schemeFromUrl(url string) (string, error) { + u, err := nurl.Parse(url) + if err != nil { + return "", err + } + + if len(u.Scheme) == 0 { + return "", errNoScheme + } + + return u.Scheme, nil +} + +// FilterCustomQuery filters all query values starting with `x-` +func FilterCustomQuery(u *nurl.URL) *nurl.URL { + ux := *u + vx := make(nurl.Values) + for k, v := range ux.Query() { + if len(k) <= 1 || (len(k) > 1 && k[0:2] != "x-") { + vx[k] = v + } + } + ux.RawQuery = vx.Encode() + return &ux +} diff --git a/vendor/github.com/mattes/migrate/util_test.go b/vendor/github.com/mattes/migrate/util_test.go new file mode 100644 index 000000000..1ad234473 --- /dev/null +++ b/vendor/github.com/mattes/migrate/util_test.go @@ -0,0 +1,32 @@ +package migrate + +import ( + nurl "net/url" + "testing" +) + +func TestSuintPanicsWithNegativeInput(t *testing.T) { + defer func() { + if r := recover(); r == nil { + t.Fatal("expected suint to panic for -1") + } + }() + suint(-1) +} + +func TestSuint(t *testing.T) { + if u := suint(0); u != 0 { + t.Fatalf("expected 0, got %v", u) + } +} + +func TestFilterCustomQuery(t *testing.T) { + n, err := nurl.Parse("foo://host?a=b&x-custom=foo&c=d") + if err != nil { + t.Fatal(err) + } + nx := FilterCustomQuery(n).Query() + if nx.Get("x-custom") != "" { + t.Fatalf("didn't expect x-custom") + } +} diff --git a/vendor/github.com/rdallman/migrate/.gitignore b/vendor/github.com/rdallman/migrate/.gitignore new file mode 100644 index 000000000..938901207 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/.gitignore @@ -0,0 +1,6 @@ +.DS_Store +cli/build +cli/cli +cli/migrate +.coverage +.godoc.pid diff --git a/vendor/github.com/rdallman/migrate/.travis.yml b/vendor/github.com/rdallman/migrate/.travis.yml new file mode 100644 index 000000000..c06a4041d --- /dev/null +++ b/vendor/github.com/rdallman/migrate/.travis.yml @@ -0,0 +1,60 @@ +language: go +sudo: required + +go: + - 1.9.1 + +env: + - MIGRATE_TEST_CONTAINER_BOOT_DELAY=10 + +# TODO: https://docs.docker.com/engine/installation/linux/ubuntu/ +# pre-provision with travis docker setup and pin down docker version in install step +services: + - docker + +install: + - make deps + - (cd $GOPATH/src/github.com/docker/docker && git fetch --all --tags --prune && git checkout v17.05.0-ce) + - sudo apt-get update && sudo apt-get install docker-ce=17.05.0* + - go get github.com/mattn/goveralls + +script: + - make test + +after_success: + - goveralls -service=travis-ci -coverprofile .coverage/combined.txt + - make list-external-deps > dependency_tree.txt && cat dependency_tree.txt + +before_deploy: + - make build-cli + - gem install --no-ri --no-rdoc fpm + - fpm -s dir -t deb -n migrate -v "$(git describe --tags 2>/dev/null | cut -c 2-)" --license MIT -m matthias.kadenbach@gmail.com --url https://github.com/mattes/migrate --description='Database migrations' -a amd64 -p migrate.$(git describe --tags 2>/dev/null | cut -c 2-).deb --deb-no-default-config-files -f -C cli/build migrate.linux-amd64=/usr/bin/migrate + +deploy: + - provider: releases + api_key: + secure: EFow50BI448HVb/uQ1Kk2Kq0xzmwIYq3V67YyymXIuqSCodvXEsMiBPUoLrxEknpPEIc67LEQTNdfHBgvyHk6oRINWAfie+7pr5tKrpOTF9ghyxoN1PlO8WKQCqwCvGMBCnc5ur5rvzp0bqfpV2rs5q9/nngy3kBuEvs12V7iho= + skip_cleanup: true + on: + go: 1.9 + repo: mattes/migrate + tags: true + file: + - cli/build/migrate.linux-amd64.tar.gz + - cli/build/migrate.darwin-amd64.tar.gz + - cli/build/migrate.windows-amd64.exe.tar.gz + - cli/build/sha256sum.txt + - dependency_tree.txt + - provider: packagecloud + repository: migrate + username: mattes + token: + secure: RiHJ/+J9DvXUah/APYdWySWZ5uOOISYJ0wS7xddc7/BNStRVjzFzvJ9zmb67RkyZZrvGuVjPiL4T8mtDyCJCj47RmU/56wPdEHbar/FjsiUCgwvR19RlulkgbV4okBCePbwzMw6HNHRp14TzfQCPtnN4kef0lOI4gZJkImN7rtQ= + dist: ubuntu/xenial + package_glob: '*.deb' + skip_cleanup: true + on: + go: 1.9 + repo: mattes/migrate + tags: true + diff --git a/vendor/github.com/rdallman/migrate/CONTRIBUTING.md b/vendor/github.com/rdallman/migrate/CONTRIBUTING.md new file mode 100644 index 000000000..fcf82a42e --- /dev/null +++ b/vendor/github.com/rdallman/migrate/CONTRIBUTING.md @@ -0,0 +1,22 @@ +# Development, Testing and Contributing + + 1. Make sure you have a running Docker daemon + (Install for [MacOS](https://docs.docker.com/docker-for-mac/)) + 2. Fork this repo and `git clone` somewhere to `$GOPATH/src/github.com/%you%/migrate` + 3. `make rewrite-import-paths` to update imports to your local fork + 4. Confirm tests are working: `make test-short` + 5. Write awesome code ... + 6. `make test` to run all tests against all database versions + 7. `make restore-import-paths` to restore import paths + 8. Push code and open Pull Request + +Some more helpful commands: + + * You can specify which database/ source tests to run: + `make test-short SOURCE='file go-bindata' DATABASE='postgres cassandra'` + * After `make test`, run `make html-coverage` which opens a shiny test coverage overview. + * Missing imports? `make deps` + * `make build-cli` builds the CLI in directory `cli/build/`. + * `make list-external-deps` lists all external dependencies for each package + * `make docs && make open-docs` opens godoc in your browser, `make kill-docs` kills the godoc server. + Repeatedly call `make docs` to refresh the server. diff --git a/vendor/github.com/rdallman/migrate/FAQ.md b/vendor/github.com/rdallman/migrate/FAQ.md new file mode 100644 index 000000000..f8bb9a85b --- /dev/null +++ b/vendor/github.com/rdallman/migrate/FAQ.md @@ -0,0 +1,67 @@ +# FAQ + +#### How is the code base structured? + ``` + / package migrate (the heart of everything) + /cli the CLI wrapper + /database database driver and sub directories have the actual driver implementations + /source source driver and sub directories have the actual driver implementations + ``` + +#### Why is there no `source/driver.go:Last()`? + It's not needed. And unless the source has a "native" way to read a directory in reversed order, + it might be expensive to do a full directory scan in order to get the last element. + +#### What is a NilMigration? NilVersion? + NilMigration defines a migration without a body. NilVersion is defined as const -1. + +#### What is the difference between uint(version) and int(targetVersion)? + version refers to an existing migration version coming from a source and therefor can never be negative. + targetVersion can either be a version OR represent a NilVersion, which equals -1. + +#### What's the difference between Next/Previous and Up/Down? + ``` + 1_first_migration.up.extension next -> 2_second_migration.up.extension ... + 1_first_migration.down.extension <- previous 2_second_migration.down.extension ... + ``` + +#### Why two separate files (up and down) for a migration? + It makes all of our lives easier. No new markup/syntax to learn for users + and existing database utility tools continue to work as expected. + +#### How many migrations can migrate handle? + Whatever the maximum positive signed integer value is for your platform. + For 32bit it would be 2,147,483,647 migrations. Migrate only keeps references to + the currently run and pre-fetched migrations in memory. Please note that some + source drivers need to do build a full "directory" tree first, which puts some + heat on the memory consumption. + +#### Are the table tests in migrate_test.go bloated? + Yes and no. There are duplicate test cases for sure but they don't hurt here. In fact + the tests are very visual now and might help new users understand expected behaviors quickly. + Migrate from version x to y and y is the last migration? Just check out the test for + that particular case and know what's going on instantly. + +#### What is Docker being used for? + Only for testing. See [testing/docker.go](testing/docker.go) + +#### Why not just use docker-compose? + It doesn't give us enough runtime control for testing. We want to be able to bring up containers fast + and whenever we want, not just once at the beginning of all tests. + +#### Can I maintain my driver in my own repository? + Yes, technically thats possible. We want to encourage you to contribute your driver to this respository though. + The driver's functionality is dictated by migrate's interfaces. That means there should really + just be one driver for a database/ source. We want to prevent a future where several drivers doing the exact same thing, + just implemented a bit differently, co-exist somewhere on Github. If users have to do research first to find the + "best" available driver for a database in order to get started, we would have failed as an open source community. + +#### Can I mix multiple sources during a batch of migrations? + No. + +#### What does "dirty" database mean? + Before a migration runs, each database sets a dirty flag. Execution stops if a migration fails and the dirty state persists, + which prevents attempts to run more migrations on top of a failed migration. You need to manually fix the error + and then "force" the expected version. + + diff --git a/vendor/github.com/rdallman/migrate/LICENSE b/vendor/github.com/rdallman/migrate/LICENSE new file mode 100644 index 000000000..62efa3670 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/LICENSE @@ -0,0 +1,23 @@ +The MIT License (MIT) + +Copyright (c) 2016 Matthias Kadenbach + +https://github.com/mattes/migrate + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/rdallman/migrate/MIGRATIONS.md b/vendor/github.com/rdallman/migrate/MIGRATIONS.md new file mode 100644 index 000000000..fbefb9278 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/MIGRATIONS.md @@ -0,0 +1,81 @@ +# Migrations + +## Migration Filename Format + +A single logical migration is represented as two separate migration files, one +to migrate "up" to the specified version from the previous version, and a second +to migrate back "down" to the previous version. These migrations can be provided +by any one of the supported [migration sources](./README.md#migration-sources). + +The ordering and direction of the migration files is determined by the filenames +used for them. `migrate` expects the filenames of migrations to have the format: + + {version}_{title}.up.{extension} + {version}_{title}.down.{extension} + +The `title` of each migration is unused, and is only for readability. Similarly, +the `extension` of the migration files is not checked by the library, and should +be an appropriate format for the database in use (`.sql` for SQL variants, for +instance). + +Versions of migrations may be represented as any 64 bit unsigned integer. +All migrations are applied upward in order of increasing version number, and +downward by decreasing version number. + +Common versioning schemes include incrementing integers: + + 1_initialize_schema.down.sql + 1_initialize_schema.up.sql + 2_add_table.down.sql + 2_add_table.up.sql + ... + +Or timestamps at an appropriate resolution: + + 1500360784_initialize_schema.down.sql + 1500360784_initialize_schema.up.sql + 1500445949_add_table.down.sql + 1500445949_add_table.up.sql + ... + +But any scheme resulting in distinct, incrementing integers as versions is valid. + +It is suggested that the version number of corresponding `up` and `down` migration +files be equivalent for clarity, but they are allowed to differ so long as the +relative ordering of the migrations is preserved. + +The migration files are permitted to be empty, so in the event that a migration +is a no-op or is irreversible, it is recommended to still include both migration +files, and either leaving them empty or adding a comment as appropriate. + +## Migration Content Format + +The format of the migration files themselves varies between database systems. +Different databases have different semantics around schema changes and when and +how they are allowed to occur (for instance, if schema changes can occur within +a transaction). + +As such, the `migrate` library has little to no checking around the format of +migration sources. The migration files are generally processed directly by the +drivers as raw operations. + +## Reversibility of Migrations + +Best practice for writing schema migration is that all migrations should be +reversible. It should in theory be possible for run migrations down and back up +through any and all versions with the state being fully cleaned and recreated +by doing so. + +By adhering to this recommended practice, development and deployment of new code +is cleaner and easier (cleaning database state for a new feature should be as +easy as migrating down to a prior version, and back up to the latest). + +As opposed to some other migration libraries, `migrate` represents up and down +migrations as separate files. This prevents any non-standard file syntax from +being introduced which may result in unintended behavior or errors, depending +on what database is processing the file. + +While it is technically possible for an up or down migration to exist on its own +without an equivalently versioned counterpart, it is strongly recommended to +always include a down migration which cleans up the state of the corresponding +up migration. diff --git a/vendor/github.com/rdallman/migrate/Makefile b/vendor/github.com/rdallman/migrate/Makefile new file mode 100644 index 000000000..e36394bed --- /dev/null +++ b/vendor/github.com/rdallman/migrate/Makefile @@ -0,0 +1,123 @@ +SOURCE ?= file go-bindata github aws-s3 google-cloud-storage +DATABASE ?= postgres mysql redshift cassandra sqlite3 spanner cockroachdb clickhouse +VERSION ?= $(shell git describe --tags 2>/dev/null | cut -c 2-) +TEST_FLAGS ?= +REPO_OWNER ?= $(shell cd .. && basename "$$(pwd)") + + +build-cli: clean + -mkdir ./cli/build + cd ./cli && CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -a -o build/migrate.linux-amd64 -ldflags='-X main.Version=$(VERSION)' -tags '$(DATABASE) $(SOURCE)' . + cd ./cli && CGO_ENABLED=1 GOOS=darwin GOARCH=amd64 go build -a -o build/migrate.darwin-amd64 -ldflags='-X main.Version=$(VERSION)' -tags '$(DATABASE) $(SOURCE)' . + cd ./cli && CGO_ENABLED=1 GOOS=windows GOARCH=amd64 go build -a -o build/migrate.windows-amd64.exe -ldflags='-X main.Version=$(VERSION)' -tags '$(DATABASE) $(SOURCE)' . + cd ./cli/build && find . -name 'migrate*' | xargs -I{} tar czf {}.tar.gz {} + cd ./cli/build && shasum -a 256 * > sha256sum.txt + cat ./cli/build/sha256sum.txt + + +clean: + -rm -r ./cli/build + + +test-short: + make test-with-flags --ignore-errors TEST_FLAGS='-short' + + +test: + @-rm -r .coverage + @mkdir .coverage + make test-with-flags TEST_FLAGS='-v -race -covermode atomic -coverprofile .coverage/_$$(RAND).txt -bench=. -benchmem' + @echo 'mode: atomic' > .coverage/combined.txt + @cat .coverage/*.txt | grep -v 'mode: atomic' >> .coverage/combined.txt + + +test-with-flags: + @echo SOURCE: $(SOURCE) + @echo DATABASE: $(DATABASE) + + @go test $(TEST_FLAGS) . + @go test $(TEST_FLAGS) ./cli/... + @go test $(TEST_FLAGS) ./testing/... + + @echo -n '$(SOURCE)' | tr -s ' ' '\n' | xargs -I{} go test $(TEST_FLAGS) ./source/{} + @go test $(TEST_FLAGS) ./source/testing/... + @go test $(TEST_FLAGS) ./source/stub/... + + @echo -n '$(DATABASE)' | tr -s ' ' '\n' | xargs -I{} go test $(TEST_FLAGS) ./database/{} + @go test $(TEST_FLAGS) ./database/testing/... + @go test $(TEST_FLAGS) ./database/stub/... + + +kill-orphaned-docker-containers: + docker rm -f $(shell docker ps -aq --filter label=migrate_test) + + +html-coverage: + go tool cover -html=.coverage/combined.txt + + +deps: + -go get -v -u ./... + -go test -v -i ./... + # TODO: why is this not being fetched with the command above? + -go get -u github.com/fsouza/fake-gcs-server/fakestorage + + +list-external-deps: + $(call external_deps,'.') + $(call external_deps,'./cli/...') + $(call external_deps,'./testing/...') + + $(foreach v, $(SOURCE), $(call external_deps,'./source/$(v)/...')) + $(call external_deps,'./source/testing/...') + $(call external_deps,'./source/stub/...') + + $(foreach v, $(DATABASE), $(call external_deps,'./database/$(v)/...')) + $(call external_deps,'./database/testing/...') + $(call external_deps,'./database/stub/...') + + +restore-import-paths: + find . -name '*.go' -type f -execdir sed -i '' s%\"github.com/$(REPO_OWNER)/migrate%\"github.com/mattes/migrate%g '{}' \; + + +rewrite-import-paths: + find . -name '*.go' -type f -execdir sed -i '' s%\"github.com/mattes/migrate%\"github.com/$(REPO_OWNER)/migrate%g '{}' \; + + +# example: fswatch -0 --exclude .godoc.pid --event Updated . | xargs -0 -n1 -I{} make docs +docs: + -make kill-docs + nohup godoc -play -http=127.0.0.1:6064 /dev/null 2>&1 & echo $$! > .godoc.pid + cat .godoc.pid + + +kill-docs: + @cat .godoc.pid + kill -9 $$(cat .godoc.pid) + rm .godoc.pid + + +open-docs: + open http://localhost:6064/pkg/github.com/$(REPO_OWNER)/migrate + + +# example: make release V=0.0.0 +release: + git tag v$(V) + @read -p "Press enter to confirm and push to origin ..." && git push origin v$(V) + + +define external_deps + @echo '-- $(1)'; go list -f '{{join .Deps "\n"}}' $(1) | grep -v github.com/$(REPO_OWNER)/migrate | xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}' + +endef + + +.PHONY: build-cli clean test-short test test-with-flags deps html-coverage \ + restore-import-paths rewrite-import-paths list-external-deps release \ + docs kill-docs open-docs kill-orphaned-docker-containers + +SHELL = /bin/bash +RAND = $(shell echo $$RANDOM) + diff --git a/vendor/github.com/rdallman/migrate/README.md b/vendor/github.com/rdallman/migrate/README.md new file mode 100644 index 000000000..cbc19d88c --- /dev/null +++ b/vendor/github.com/rdallman/migrate/README.md @@ -0,0 +1,140 @@ +[![Build Status](https://travis-ci.org/mattes/migrate.svg?branch=master)](https://travis-ci.org/mattes/migrate) +[![GoDoc](https://godoc.org/github.com/mattes/migrate?status.svg)](https://godoc.org/github.com/mattes/migrate) +[![Coverage Status](https://coveralls.io/repos/github/mattes/migrate/badge.svg?branch=v3.0-prev)](https://coveralls.io/github/mattes/migrate?branch=v3.0-prev) +[![packagecloud.io](https://img.shields.io/badge/deb-packagecloud.io-844fec.svg)](https://packagecloud.io/mattes/migrate?filter=debs) + +# migrate + +__Database migrations written in Go. Use as [CLI](#cli-usage) or import as [library](#use-in-your-go-project).__ + + * Migrate reads migrations from [sources](#migration-sources) + and applies them in correct order to a [database](#databases). + * Drivers are "dumb", migrate glues everything together and makes sure the logic is bulletproof. + (Keeps the drivers lightweight, too.) + * Database drivers don't assume things or try to correct user input. When in doubt, fail. + + +Looking for [v1](https://github.com/mattes/migrate/tree/v1)? + + +## Databases + +Database drivers run migrations. [Add a new database?](database/driver.go) + + * [PostgreSQL](database/postgres) + * [Redshift](database/redshift) + * [Ql](database/ql) + * [Cassandra](database/cassandra) + * [SQLite](database/sqlite3) + * [MySQL/ MariaDB](database/mysql) + * [Neo4j](database/neo4j) ([todo #167](https://github.com/mattes/migrate/issues/167)) + * [MongoDB](database/mongodb) ([todo #169](https://github.com/mattes/migrate/issues/169)) + * [CrateDB](database/crate) ([todo #170](https://github.com/mattes/migrate/issues/170)) + * [Shell](database/shell) ([todo #171](https://github.com/mattes/migrate/issues/171)) + * [Google Cloud Spanner](database/spanner) + * [CockroachDB](database/cockroachdb) + * [ClickHouse](database/clickhouse) + + +## Migration Sources + +Source drivers read migrations from local or remote sources. [Add a new source?](source/driver.go) + + * [Filesystem](source/file) - read from fileystem (always included) + * [Go-Bindata](source/go-bindata) - read from embedded binary data ([jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata)) + * [Github](source/github) - read from remote Github repositories + * [AWS S3](source/aws-s3) - read from Amazon Web Services S3 + * [Google Cloud Storage](source/google-cloud-storage) - read from Google Cloud Platform Storage + + + +## CLI usage + + * Simple wrapper around this library. + * Handles ctrl+c (SIGINT) gracefully. + * No config search paths, no config files, no magic ENV var injections. + +__[CLI Documentation](cli)__ + +([brew todo #156](https://github.com/mattes/migrate/issues/156)) + +``` +$ brew install migrate --with-postgres +$ migrate -database postgres://localhost:5432/database up 2 +``` + + +## Use in your Go project + + * API is stable and frozen for this release (v3.x). + * Package migrate has no external dependencies. + * Only import the drivers you need. + (check [dependency_tree.txt](https://github.com/mattes/migrate/releases) for each driver) + * To help prevent database corruptions, it supports graceful stops via `GracefulStop chan bool`. + * Bring your own logger. + * Uses `io.Reader` streams internally for low memory overhead. + * Thread-safe and no goroutine leaks. + +__[Go Documentation](https://godoc.org/github.com/mattes/migrate)__ + +```go +import ( + "github.com/mattes/migrate" + _ "github.com/mattes/migrate/database/postgres" + _ "github.com/mattes/migrate/source/github" +) + +func main() { + m, err := migrate.New( + "github://mattes:personal-access-token@mattes/migrate_test", + "postgres://localhost:5432/database?sslmode=enable") + m.Steps(2) +} +``` + +Want to use an existing database client? + +```go +import ( + "database/sql" + _ "github.com/lib/pq" + "github.com/mattes/migrate" + "github.com/mattes/migrate/database/postgres" + _ "github.com/mattes/migrate/source/file" +) + +func main() { + db, err := sql.Open("postgres", "postgres://localhost:5432/database?sslmode=enable") + driver, err := postgres.WithInstance(db, &postgres.Config{}) + m, err := migrate.NewWithDatabaseInstance( + "file:///migrations", + "postgres", driver) + m.Steps(2) +} +``` + +## Migration files + +Each migration has an up and down migration. [Why?](FAQ.md#why-two-separate-files-up-and-down-for-a-migration) + +``` +1481574547_create_users_table.up.sql +1481574547_create_users_table.down.sql +``` + +[Best practices: How to write migrations.](MIGRATIONS.md) + + + +## Development and Contributing + +Yes, please! [`Makefile`](Makefile) is your friend, +read the [development guide](CONTRIBUTING.md). + +Also have a look at the [FAQ](FAQ.md). + + + +--- + +Looking for alternatives? [https://awesome-go.com/#database](https://awesome-go.com/#database). diff --git a/vendor/github.com/rdallman/migrate/cli/README.md b/vendor/github.com/rdallman/migrate/cli/README.md new file mode 100644 index 000000000..c0886d5a7 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/cli/README.md @@ -0,0 +1,113 @@ +# migrate CLI + +## Installation + +#### With Go toolchain + +``` +$ go get -u -d github.com/mattes/migrate/cli github.com/lib/pq +$ go build -tags 'postgres' -o /usr/local/bin/migrate github.com/mattes/migrate/cli +``` + +Note: This example builds the cli which will only work with postgres. In order +to build the cli for use with other databases, replace the `postgres` build tag +with the appropriate database tag(s) for the databases desired. The tags +correspond to the names of the sub-packages underneath the +[`database`](../database) package. + +#### MacOS + +([todo #156](https://github.com/mattes/migrate/issues/156)) + +``` +$ brew install migrate --with-postgres +``` + +#### Linux (*.deb package) + +``` +$ curl -L https://packagecloud.io/mattes/migrate/gpgkey | apt-key add - +$ echo "deb https://packagecloud.io/mattes/migrate/ubuntu/ xenial main" > /etc/apt/sources.list.d/migrate.list +$ apt-get update +$ apt-get install -y migrate +``` + +#### Download pre-build binary (Windows, MacOS, or Linux) + +[Release Downloads](https://github.com/mattes/migrate/releases) + +``` +$ curl -L https://github.com/mattes/migrate/releases/download/$version/migrate.$platform-amd64.tar.gz | tar xvz +``` + + + +## Usage + +``` +$ migrate -help +Usage: migrate OPTIONS COMMAND [arg...] + migrate [ -version | -help ] + +Options: + -source Location of the migrations (driver://url) + -path Shorthand for -source=file://path + -database Run migrations against this database (driver://url) + -prefetch N Number of migrations to load in advance before executing (default 10) + -lock-timeout N Allow N seconds to acquire database lock (default 15) + -verbose Print verbose logging + -version Print version + -help Print usage + +Commands: + create [-ext E] [-dir D] NAME + Create a set of timestamped up/down migrations titled NAME, in directory D with extension E + goto V Migrate to version V + up [N] Apply all or N up migrations + down [N] Apply all or N down migrations + drop Drop everyting inside database + force V Set version V but don't run migration (ignores dirty state) + version Print current migration version +``` + + +So let's say you want to run the first two migrations + +``` +$ migrate -database postgres://localhost:5432/database up 2 +``` + +If your migrations are hosted on github + +``` +$ migrate -source github://mattes:personal-access-token@mattes/migrate_test \ + -database postgres://localhost:5432/database down 2 +``` + +The CLI will gracefully stop at a safe point when SIGINT (ctrl+c) is received. +Send SIGKILL for immediate halt. + + + +## Reading CLI arguments from somewhere else + +##### ENV variables + +``` +$ migrate -database "$MY_MIGRATE_DATABASE" +``` + +##### JSON files + +Check out https://stedolan.github.io/jq/ + +``` +$ migrate -database "$(cat config.json | jq '.database')" +``` + +##### YAML files + +```` +$ migrate -database "$(cat config/database.yml | ruby -ryaml -e "print YAML.load(STDIN.read)['database']")" +$ migrate -database "$(cat config/database.yml | python -c 'import yaml,sys;print yaml.safe_load(sys.stdin)["database"]')" +``` diff --git a/vendor/github.com/rdallman/migrate/cli/build_aws-s3.go b/vendor/github.com/rdallman/migrate/cli/build_aws-s3.go new file mode 100644 index 000000000..766fd5663 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/cli/build_aws-s3.go @@ -0,0 +1,7 @@ +// +build aws-s3 + +package main + +import ( + _ "github.com/mattes/migrate/source/aws-s3" +) diff --git a/vendor/github.com/rdallman/migrate/cli/build_cassandra.go b/vendor/github.com/rdallman/migrate/cli/build_cassandra.go new file mode 100644 index 000000000..319b52d2a --- /dev/null +++ b/vendor/github.com/rdallman/migrate/cli/build_cassandra.go @@ -0,0 +1,7 @@ +// +build cassandra + +package main + +import ( + _ "github.com/mattes/migrate/database/cassandra" +) diff --git a/vendor/github.com/rdallman/migrate/cli/build_clickhouse.go b/vendor/github.com/rdallman/migrate/cli/build_clickhouse.go new file mode 100644 index 000000000..c9175e280 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/cli/build_clickhouse.go @@ -0,0 +1,8 @@ +// +build clickhouse + +package main + +import ( + _ "github.com/kshvakov/clickhouse" + _ "github.com/mattes/migrate/database/clickhouse" +) diff --git a/vendor/github.com/rdallman/migrate/cli/build_cockroachdb.go b/vendor/github.com/rdallman/migrate/cli/build_cockroachdb.go new file mode 100644 index 000000000..e5fdf073e --- /dev/null +++ b/vendor/github.com/rdallman/migrate/cli/build_cockroachdb.go @@ -0,0 +1,7 @@ +// +build cockroachdb + +package main + +import ( + _ "github.com/mattes/migrate/database/cockroachdb" +) diff --git a/vendor/github.com/rdallman/migrate/cli/build_github.go b/vendor/github.com/rdallman/migrate/cli/build_github.go new file mode 100644 index 000000000..9c813b46c --- /dev/null +++ b/vendor/github.com/rdallman/migrate/cli/build_github.go @@ -0,0 +1,7 @@ +// +build github + +package main + +import ( + _ "github.com/mattes/migrate/source/github" +) diff --git a/vendor/github.com/rdallman/migrate/cli/build_go-bindata.go b/vendor/github.com/rdallman/migrate/cli/build_go-bindata.go new file mode 100644 index 000000000..8a6a89349 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/cli/build_go-bindata.go @@ -0,0 +1,7 @@ +// +build go-bindata + +package main + +import ( + _ "github.com/mattes/migrate/source/go-bindata" +) diff --git a/vendor/github.com/rdallman/migrate/cli/build_google-cloud-storage.go b/vendor/github.com/rdallman/migrate/cli/build_google-cloud-storage.go new file mode 100644 index 000000000..04f314338 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/cli/build_google-cloud-storage.go @@ -0,0 +1,7 @@ +// +build google-cloud-storage + +package main + +import ( + _ "github.com/mattes/migrate/source/google-cloud-storage" +) diff --git a/vendor/github.com/rdallman/migrate/cli/build_mysql.go b/vendor/github.com/rdallman/migrate/cli/build_mysql.go new file mode 100644 index 000000000..177766f5e --- /dev/null +++ b/vendor/github.com/rdallman/migrate/cli/build_mysql.go @@ -0,0 +1,7 @@ +// +build mysql + +package main + +import ( + _ "github.com/mattes/migrate/database/mysql" +) diff --git a/vendor/github.com/rdallman/migrate/cli/build_postgres.go b/vendor/github.com/rdallman/migrate/cli/build_postgres.go new file mode 100644 index 000000000..87f6be757 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/cli/build_postgres.go @@ -0,0 +1,7 @@ +// +build postgres + +package main + +import ( + _ "github.com/mattes/migrate/database/postgres" +) diff --git a/vendor/github.com/rdallman/migrate/cli/build_ql.go b/vendor/github.com/rdallman/migrate/cli/build_ql.go new file mode 100644 index 000000000..cd56ef958 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/cli/build_ql.go @@ -0,0 +1,7 @@ +// +build ql + +package main + +import ( + _ "github.com/mattes/migrate/database/ql" +) diff --git a/vendor/github.com/rdallman/migrate/cli/build_redshift.go b/vendor/github.com/rdallman/migrate/cli/build_redshift.go new file mode 100644 index 000000000..8153d0aa3 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/cli/build_redshift.go @@ -0,0 +1,7 @@ +// +build redshift + +package main + +import ( + _ "github.com/mattes/migrate/database/redshift" +) diff --git a/vendor/github.com/rdallman/migrate/cli/build_spanner.go b/vendor/github.com/rdallman/migrate/cli/build_spanner.go new file mode 100644 index 000000000..7223d820b --- /dev/null +++ b/vendor/github.com/rdallman/migrate/cli/build_spanner.go @@ -0,0 +1,7 @@ +// +build spanner + +package main + +import ( + _ "github.com/mattes/migrate/database/spanner" +) diff --git a/vendor/github.com/rdallman/migrate/cli/build_sqlite3.go b/vendor/github.com/rdallman/migrate/cli/build_sqlite3.go new file mode 100644 index 000000000..48ae8ebc2 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/cli/build_sqlite3.go @@ -0,0 +1,7 @@ +// +build sqlite3 + +package main + +import ( + _ "github.com/mattes/migrate/database/sqlite3" +) diff --git a/vendor/github.com/rdallman/migrate/cli/commands.go b/vendor/github.com/rdallman/migrate/cli/commands.go new file mode 100644 index 000000000..703896dc1 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/cli/commands.go @@ -0,0 +1,96 @@ +package main + +import ( + "github.com/mattes/migrate" + _ "github.com/mattes/migrate/database/stub" // TODO remove again + _ "github.com/mattes/migrate/source/file" + "os" + "fmt" +) + +func createCmd(dir string, timestamp int64, name string, ext string) { + base := fmt.Sprintf("%v%v_%v.", dir, timestamp, name) + os.MkdirAll(dir, os.ModePerm) + createFile(base + "up" + ext) + createFile(base + "down" + ext) +} + +func createFile(fname string) { + if _, err := os.Create(fname); err != nil { + log.fatalErr(err) + } +} + +func gotoCmd(m *migrate.Migrate, v uint) { + if err := m.Migrate(v); err != nil { + if err != migrate.ErrNoChange { + log.fatalErr(err) + } else { + log.Println(err) + } + } +} + +func upCmd(m *migrate.Migrate, limit int) { + if limit >= 0 { + if err := m.Steps(limit); err != nil { + if err != migrate.ErrNoChange { + log.fatalErr(err) + } else { + log.Println(err) + } + } + } else { + if err := m.Up(); err != nil { + if err != migrate.ErrNoChange { + log.fatalErr(err) + } else { + log.Println(err) + } + } + } +} + +func downCmd(m *migrate.Migrate, limit int) { + if limit >= 0 { + if err := m.Steps(-limit); err != nil { + if err != migrate.ErrNoChange { + log.fatalErr(err) + } else { + log.Println(err) + } + } + } else { + if err := m.Down(); err != nil { + if err != migrate.ErrNoChange { + log.fatalErr(err) + } else { + log.Println(err) + } + } + } +} + +func dropCmd(m *migrate.Migrate) { + if err := m.Drop(); err != nil { + log.fatalErr(err) + } +} + +func forceCmd(m *migrate.Migrate, v int) { + if err := m.Force(v); err != nil { + log.fatalErr(err) + } +} + +func versionCmd(m *migrate.Migrate) { + v, dirty, err := m.Version() + if err != nil { + log.fatalErr(err) + } + if dirty { + log.Printf("%v (dirty)\n", v) + } else { + log.Println(v) + } +} diff --git a/vendor/github.com/rdallman/migrate/cli/examples/Dockerfile b/vendor/github.com/rdallman/migrate/cli/examples/Dockerfile new file mode 100644 index 000000000..740f951f8 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/cli/examples/Dockerfile @@ -0,0 +1,12 @@ +FROM ubuntu:xenial + +RUN apt-get update && \ + apt-get install -y curl apt-transport-https + +RUN curl -L https://packagecloud.io/mattes/migrate/gpgkey | apt-key add - && \ + echo "deb https://packagecloud.io/mattes/migrate/ubuntu/ xenial main" > /etc/apt/sources.list.d/migrate.list && \ + apt-get update && \ + apt-get install -y migrate + +RUN migrate -version + diff --git a/vendor/github.com/rdallman/migrate/cli/log.go b/vendor/github.com/rdallman/migrate/cli/log.go new file mode 100644 index 000000000..a119d3481 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/cli/log.go @@ -0,0 +1,45 @@ +package main + +import ( + "fmt" + logpkg "log" + "os" +) + +type Log struct { + verbose bool +} + +func (l *Log) Printf(format string, v ...interface{}) { + if l.verbose { + logpkg.Printf(format, v...) + } else { + fmt.Fprintf(os.Stderr, format, v...) + } +} + +func (l *Log) Println(args ...interface{}) { + if l.verbose { + logpkg.Println(args...) + } else { + fmt.Fprintln(os.Stderr, args...) + } +} + +func (l *Log) Verbose() bool { + return l.verbose +} + +func (l *Log) fatalf(format string, v ...interface{}) { + l.Printf(format, v...) + os.Exit(1) +} + +func (l *Log) fatal(args ...interface{}) { + l.Println(args...) + os.Exit(1) +} + +func (l *Log) fatalErr(err error) { + l.fatal("error:", err) +} diff --git a/vendor/github.com/rdallman/migrate/cli/main.go b/vendor/github.com/rdallman/migrate/cli/main.go new file mode 100644 index 000000000..4c727a972 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/cli/main.go @@ -0,0 +1,237 @@ +package main + +import ( + "flag" + "fmt" + "os" + "os/signal" + "strconv" + "strings" + "syscall" + "time" + + "github.com/mattes/migrate" +) + +// set main log +var log = &Log{} + +func main() { + helpPtr := flag.Bool("help", false, "") + versionPtr := flag.Bool("version", false, "") + verbosePtr := flag.Bool("verbose", false, "") + prefetchPtr := flag.Uint("prefetch", 10, "") + lockTimeoutPtr := flag.Uint("lock-timeout", 15, "") + pathPtr := flag.String("path", "", "") + databasePtr := flag.String("database", "", "") + sourcePtr := flag.String("source", "", "") + + flag.Usage = func() { + fmt.Fprint(os.Stderr, + `Usage: migrate OPTIONS COMMAND [arg...] + migrate [ -version | -help ] + +Options: + -source Location of the migrations (driver://url) + -path Shorthand for -source=file://path + -database Run migrations against this database (driver://url) + -prefetch N Number of migrations to load in advance before executing (default 10) + -lock-timeout N Allow N seconds to acquire database lock (default 15) + -verbose Print verbose logging + -version Print version + -help Print usage + +Commands: + create [-ext E] [-dir D] NAME + Create a set of timestamped up/down migrations titled NAME, in directory D with extension E + goto V Migrate to version V + up [N] Apply all or N up migrations + down [N] Apply all or N down migrations + drop Drop everyting inside database + force V Set version V but don't run migration (ignores dirty state) + version Print current migration version +`) + } + + flag.Parse() + + // initialize logger + log.verbose = *verbosePtr + + // show cli version + if *versionPtr { + fmt.Fprintln(os.Stderr, Version) + os.Exit(0) + } + + // show help + if *helpPtr { + flag.Usage() + os.Exit(0) + } + + // translate -path into -source if given + if *sourcePtr == "" && *pathPtr != "" { + *sourcePtr = fmt.Sprintf("file://%v", *pathPtr) + } + + // initialize migrate + // don't catch migraterErr here and let each command decide + // how it wants to handle the error + migrater, migraterErr := migrate.New(*sourcePtr, *databasePtr) + defer func() { + if migraterErr == nil { + migrater.Close() + } + }() + if migraterErr == nil { + migrater.Log = log + migrater.PrefetchMigrations = *prefetchPtr + migrater.LockTimeout = time.Duration(int64(*lockTimeoutPtr)) * time.Second + + // handle Ctrl+c + signals := make(chan os.Signal, 1) + signal.Notify(signals, syscall.SIGINT) + go func() { + for range signals { + log.Println("Stopping after this running migration ...") + migrater.GracefulStop <- true + return + } + }() + } + + startTime := time.Now() + + switch flag.Arg(0) { + case "create": + args := flag.Args()[1:] + + createFlagSet := flag.NewFlagSet("create", flag.ExitOnError) + extPtr := createFlagSet.String("ext", "", "File extension") + dirPtr := createFlagSet.String("dir", "", "Directory to place file in (default: current working directory)") + createFlagSet.Parse(args) + + if createFlagSet.NArg() == 0 { + log.fatal("error: please specify name") + } + name := createFlagSet.Arg(0) + + if *extPtr != "" { + *extPtr = "." + strings.TrimPrefix(*extPtr, ".") + } + if *dirPtr != "" { + *dirPtr = strings.Trim(*dirPtr, "/") + "/" + } + + timestamp := startTime.Unix() + + createCmd(*dirPtr, timestamp, name, *extPtr) + + case "goto": + if migraterErr != nil { + log.fatalErr(migraterErr) + } + + if flag.Arg(1) == "" { + log.fatal("error: please specify version argument V") + } + + v, err := strconv.ParseUint(flag.Arg(1), 10, 64) + if err != nil { + log.fatal("error: can't read version argument V") + } + + gotoCmd(migrater, uint(v)) + + if log.verbose { + log.Println("Finished after", time.Now().Sub(startTime)) + } + + case "up": + if migraterErr != nil { + log.fatalErr(migraterErr) + } + + limit := -1 + if flag.Arg(1) != "" { + n, err := strconv.ParseUint(flag.Arg(1), 10, 64) + if err != nil { + log.fatal("error: can't read limit argument N") + } + limit = int(n) + } + + upCmd(migrater, limit) + + if log.verbose { + log.Println("Finished after", time.Now().Sub(startTime)) + } + + case "down": + if migraterErr != nil { + log.fatalErr(migraterErr) + } + + limit := -1 + if flag.Arg(1) != "" { + n, err := strconv.ParseUint(flag.Arg(1), 10, 64) + if err != nil { + log.fatal("error: can't read limit argument N") + } + limit = int(n) + } + + downCmd(migrater, limit) + + if log.verbose { + log.Println("Finished after", time.Now().Sub(startTime)) + } + + case "drop": + if migraterErr != nil { + log.fatalErr(migraterErr) + } + + dropCmd(migrater) + + if log.verbose { + log.Println("Finished after", time.Now().Sub(startTime)) + } + + case "force": + if migraterErr != nil { + log.fatalErr(migraterErr) + } + + if flag.Arg(1) == "" { + log.fatal("error: please specify version argument V") + } + + v, err := strconv.ParseInt(flag.Arg(1), 10, 64) + if err != nil { + log.fatal("error: can't read version argument V") + } + + if v < -1 { + log.fatal("error: argument V must be >= -1") + } + + forceCmd(migrater, int(v)) + + if log.verbose { + log.Println("Finished after", time.Now().Sub(startTime)) + } + + case "version": + if migraterErr != nil { + log.fatalErr(migraterErr) + } + + versionCmd(migrater) + + default: + flag.Usage() + os.Exit(0) + } +} diff --git a/vendor/github.com/rdallman/migrate/cli/version.go b/vendor/github.com/rdallman/migrate/cli/version.go new file mode 100644 index 000000000..6c3ec49fe --- /dev/null +++ b/vendor/github.com/rdallman/migrate/cli/version.go @@ -0,0 +1,4 @@ +package main + +// Version is set in Makefile with build flags +var Version = "dev" diff --git a/vendor/github.com/rdallman/migrate/database/cassandra/README.md b/vendor/github.com/rdallman/migrate/database/cassandra/README.md new file mode 100644 index 000000000..f99b1105e --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/cassandra/README.md @@ -0,0 +1,31 @@ +# Cassandra + +* Drop command will not work on Cassandra 2.X because it rely on +system_schema table which comes with 3.X +* Other commands should work properly but are **not tested** + + +## Usage +`cassandra://host:port/keyspace?param1=value¶m2=value2` + + +| URL Query | Default value | Description | +|------------|-------------|-----------| +| `x-migrations-table` | schema_migrations | Name of the migrations table | +| `port` | 9042 | The port to bind to | +| `consistency` | ALL | Migration consistency +| `protocol` | | Cassandra protocol version (3 or 4) +| `timeout` | 1 minute | Migration timeout +| `username` | nil | Username to use when authenticating. | +| `password` | nil | Password to use when authenticating. | + + +`timeout` is parsed using [time.ParseDuration(s string)](https://golang.org/pkg/time/#ParseDuration) + + +## Upgrading from v1 + +1. Write down the current migration version from schema_migrations +2. `DROP TABLE schema_migrations` +4. Download and install the latest migrate version. +5. Force the current migration version with `migrate force `. diff --git a/vendor/github.com/rdallman/migrate/database/cassandra/cassandra.go b/vendor/github.com/rdallman/migrate/database/cassandra/cassandra.go new file mode 100644 index 000000000..42563fdbe --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/cassandra/cassandra.go @@ -0,0 +1,228 @@ +package cassandra + +import ( + "fmt" + "io" + "io/ioutil" + nurl "net/url" + "strconv" + "time" + + "github.com/gocql/gocql" + "github.com/mattes/migrate/database" +) + +func init() { + db := new(Cassandra) + database.Register("cassandra", db) +} + +var DefaultMigrationsTable = "schema_migrations" +var dbLocked = false + +var ( + ErrNilConfig = fmt.Errorf("no config") + ErrNoKeyspace = fmt.Errorf("no keyspace provided") + ErrDatabaseDirty = fmt.Errorf("database is dirty") +) + +type Config struct { + MigrationsTable string + KeyspaceName string +} + +type Cassandra struct { + session *gocql.Session + isLocked bool + + // Open and WithInstance need to guarantee that config is never nil + config *Config +} + +func (p *Cassandra) Open(url string) (database.Driver, error) { + u, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + // Check for missing mandatory attributes + if len(u.Path) == 0 { + return nil, ErrNoKeyspace + } + + migrationsTable := u.Query().Get("x-migrations-table") + if len(migrationsTable) == 0 { + migrationsTable = DefaultMigrationsTable + } + + p.config = &Config{ + KeyspaceName: u.Path, + MigrationsTable: migrationsTable, + } + + cluster := gocql.NewCluster(u.Host) + cluster.Keyspace = u.Path[1:len(u.Path)] + cluster.Consistency = gocql.All + cluster.Timeout = 1 * time.Minute + + if len(u.Query().Get("username")) > 0 && len(u.Query().Get("password")) > 0 { + authenticator := gocql.PasswordAuthenticator{ + Username: u.Query().Get("username"), + Password: u.Query().Get("password"), + } + cluster.Authenticator = authenticator + } + + // Retrieve query string configuration + if len(u.Query().Get("consistency")) > 0 { + var consistency gocql.Consistency + consistency, err = parseConsistency(u.Query().Get("consistency")) + if err != nil { + return nil, err + } + + cluster.Consistency = consistency + } + if len(u.Query().Get("protocol")) > 0 { + var protoversion int + protoversion, err = strconv.Atoi(u.Query().Get("protocol")) + if err != nil { + return nil, err + } + cluster.ProtoVersion = protoversion + } + if len(u.Query().Get("timeout")) > 0 { + var timeout time.Duration + timeout, err = time.ParseDuration(u.Query().Get("timeout")) + if err != nil { + return nil, err + } + cluster.Timeout = timeout + } + + p.session, err = cluster.CreateSession() + + if err != nil { + return nil, err + } + + if err := p.ensureVersionTable(); err != nil { + return nil, err + } + + return p, nil +} + +func (p *Cassandra) Close() error { + p.session.Close() + return nil +} + +func (p *Cassandra) Lock() error { + if dbLocked { + return database.ErrLocked + } + dbLocked = true + return nil +} + +func (p *Cassandra) Unlock() error { + dbLocked = false + return nil +} + +func (p *Cassandra) Run(migration io.Reader) error { + migr, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + // run migration + query := string(migr[:]) + if err := p.session.Query(query).Exec(); err != nil { + // TODO: cast to Cassandra error and get line number + return database.Error{OrigErr: err, Err: "migration failed", Query: migr} + } + + return nil +} + +func (p *Cassandra) SetVersion(version int, dirty bool) error { + query := `TRUNCATE "` + p.config.MigrationsTable + `"` + if err := p.session.Query(query).Exec(); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + if version >= 0 { + query = `INSERT INTO "` + p.config.MigrationsTable + `" (version, dirty) VALUES (?, ?)` + if err := p.session.Query(query, version, dirty).Exec(); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + + return nil +} + +// Return current keyspace version +func (p *Cassandra) Version() (version int, dirty bool, err error) { + query := `SELECT version, dirty FROM "` + p.config.MigrationsTable + `" LIMIT 1` + err = p.session.Query(query).Scan(&version, &dirty) + switch { + case err == gocql.ErrNotFound: + return database.NilVersion, false, nil + + case err != nil: + if _, ok := err.(*gocql.Error); ok { + return database.NilVersion, false, nil + } + return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} + + default: + return version, dirty, nil + } +} + +func (p *Cassandra) Drop() error { + // select all tables in current schema + query := fmt.Sprintf(`SELECT table_name from system_schema.tables WHERE keyspace_name='%s'`, p.config.KeyspaceName[1:]) // Skip '/' character + iter := p.session.Query(query).Iter() + var tableName string + for iter.Scan(&tableName) { + err := p.session.Query(fmt.Sprintf(`DROP TABLE %s`, tableName)).Exec() + if err != nil { + return err + } + } + // Re-create the version table + if err := p.ensureVersionTable(); err != nil { + return err + } + return nil +} + +// Ensure version table exists +func (p *Cassandra) ensureVersionTable() error { + err := p.session.Query(fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (version bigint, dirty boolean, PRIMARY KEY(version))", p.config.MigrationsTable)).Exec() + if err != nil { + return err + } + if _, _, err = p.Version(); err != nil { + return err + } + return nil +} + +// ParseConsistency wraps gocql.ParseConsistency +// to return an error instead of a panicking. +func parseConsistency(consistencyStr string) (consistency gocql.Consistency, err error) { + defer func() { + if r := recover(); r != nil { + var ok bool + err, ok = r.(error) + if !ok { + err = fmt.Errorf("Failed to parse consistency \"%s\": %v", consistencyStr, r) + } + } + }() + consistency = gocql.ParseConsistency(consistencyStr) + + return consistency, nil +} diff --git a/vendor/github.com/rdallman/migrate/database/cassandra/cassandra_test.go b/vendor/github.com/rdallman/migrate/database/cassandra/cassandra_test.go new file mode 100644 index 000000000..4ca764a04 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/cassandra/cassandra_test.go @@ -0,0 +1,53 @@ +package cassandra + +import ( + "fmt" + "testing" + dt "github.com/mattes/migrate/database/testing" + mt "github.com/mattes/migrate/testing" + "github.com/gocql/gocql" + "time" + "strconv" +) + +var versions = []mt.Version{ + {Image: "cassandra:3.0.10"}, + {Image: "cassandra:3.0"}, +} + +func isReady(i mt.Instance) bool { + // Cassandra exposes 5 ports (7000, 7001, 7199, 9042 & 9160) + // We only need the port bound to 9042, but we can only access to the first one + // through 'i.Port()' (which calls DockerContainer.firstPortMapping()) + // So we need to get port mapping to retrieve correct port number bound to 9042 + portMap := i.NetworkSettings().Ports + port, _ := strconv.Atoi(portMap["9042/tcp"][0].HostPort) + + cluster := gocql.NewCluster(i.Host()) + cluster.Port = port + //cluster.ProtoVersion = 4 + cluster.Consistency = gocql.All + cluster.Timeout = 1 * time.Minute + p, err := cluster.CreateSession() + if err != nil { + return false + } + // Create keyspace for tests + p.Query("CREATE KEYSPACE testks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor':1}").Exec() + return true +} + +func Test(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + p := &Cassandra{} + portMap := i.NetworkSettings().Ports + port, _ := strconv.Atoi(portMap["9042/tcp"][0].HostPort) + addr := fmt.Sprintf("cassandra://%v:%v/testks", i.Host(), port) + d, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + dt.Test(t, d, []byte("SELECT table_name from system_schema.tables")) + }) +} diff --git a/vendor/github.com/rdallman/migrate/database/clickhouse/README.md b/vendor/github.com/rdallman/migrate/database/clickhouse/README.md new file mode 100644 index 000000000..16dbbf965 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/clickhouse/README.md @@ -0,0 +1,12 @@ +# ClickHouse + +`clickhouse://host:port?username=user&password=qwerty&database=clicks` + +| URL Query | Description | +|------------|-------------| +| `x-migrations-table`| Name of the migrations table | +| `database` | The name of the database to connect to | +| `username` | The user to sign in as | +| `password` | The user's password | +| `host` | The host to connect to. | +| `port` | The port to bind to. | diff --git a/vendor/github.com/rdallman/migrate/database/clickhouse/clickhouse.go b/vendor/github.com/rdallman/migrate/database/clickhouse/clickhouse.go new file mode 100644 index 000000000..fffc5585d --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/clickhouse/clickhouse.go @@ -0,0 +1,196 @@ +package clickhouse + +import ( + "database/sql" + "fmt" + "io" + "io/ioutil" + "net/url" + "time" + + "github.com/mattes/migrate" + "github.com/mattes/migrate/database" +) + +var DefaultMigrationsTable = "schema_migrations" + +var ErrNilConfig = fmt.Errorf("no config") + +type Config struct { + DatabaseName string + MigrationsTable string +} + +func init() { + database.Register("clickhouse", &ClickHouse{}) +} + +func WithInstance(conn *sql.DB, config *Config) (database.Driver, error) { + if config == nil { + return nil, ErrNilConfig + } + + if err := conn.Ping(); err != nil { + return nil, err + } + + ch := &ClickHouse{ + conn: conn, + config: config, + } + + if err := ch.init(); err != nil { + return nil, err + } + + return ch, nil +} + +type ClickHouse struct { + conn *sql.DB + config *Config +} + +func (ch *ClickHouse) Open(dsn string) (database.Driver, error) { + purl, err := url.Parse(dsn) + if err != nil { + return nil, err + } + q := migrate.FilterCustomQuery(purl) + q.Scheme = "tcp" + conn, err := sql.Open("clickhouse", q.String()) + if err != nil { + return nil, err + } + + ch = &ClickHouse{ + conn: conn, + config: &Config{ + MigrationsTable: purl.Query().Get("x-migrations-table"), + DatabaseName: purl.Query().Get("database"), + }, + } + + if err := ch.init(); err != nil { + return nil, err + } + + return ch, nil +} + +func (ch *ClickHouse) init() error { + if len(ch.config.DatabaseName) == 0 { + if err := ch.conn.QueryRow("SELECT currentDatabase()").Scan(&ch.config.DatabaseName); err != nil { + return err + } + } + + if len(ch.config.MigrationsTable) == 0 { + ch.config.MigrationsTable = DefaultMigrationsTable + } + + return ch.ensureVersionTable() +} + +func (ch *ClickHouse) Run(r io.Reader) error { + migration, err := ioutil.ReadAll(r) + if err != nil { + return err + } + if _, err := ch.conn.Exec(string(migration)); err != nil { + return database.Error{OrigErr: err, Err: "migration failed", Query: migration} + } + + return nil +} +func (ch *ClickHouse) Version() (int, bool, error) { + var ( + version int + dirty uint8 + query = "SELECT version, dirty FROM `" + ch.config.MigrationsTable + "` ORDER BY sequence DESC LIMIT 1" + ) + if err := ch.conn.QueryRow(query).Scan(&version, &dirty); err != nil { + if err == sql.ErrNoRows { + return database.NilVersion, false, nil + } + return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} + } + return version, dirty == 1, nil +} + +func (ch *ClickHouse) SetVersion(version int, dirty bool) error { + var ( + bool = func(v bool) uint8 { + if v { + return 1 + } + return 0 + } + tx, err = ch.conn.Begin() + ) + if err != nil { + return err + } + + query := "INSERT INTO " + ch.config.MigrationsTable + " (version, dirty, sequence) VALUES (?, ?, ?)" + if _, err := tx.Exec(query, version, bool(dirty), time.Now().UnixNano()); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + return tx.Commit() +} + +func (ch *ClickHouse) ensureVersionTable() error { + var ( + table string + query = "SHOW TABLES FROM " + ch.config.DatabaseName + " LIKE '" + ch.config.MigrationsTable + "'" + ) + // check if migration table exists + if err := ch.conn.QueryRow(query).Scan(&table); err != nil { + if err != sql.ErrNoRows { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } else { + return nil + } + // if not, create the empty migration table + query = ` + CREATE TABLE ` + ch.config.MigrationsTable + ` ( + version UInt32, + dirty UInt8, + sequence UInt64 + ) Engine=TinyLog + ` + if _, err := ch.conn.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + return nil +} + +func (ch *ClickHouse) Drop() error { + var ( + query = "SHOW TABLES FROM " + ch.config.DatabaseName + tables, err = ch.conn.Query(query) + ) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + defer tables.Close() + for tables.Next() { + var table string + if err := tables.Scan(&table); err != nil { + return err + } + + query = "DROP TABLE IF EXISTS " + ch.config.DatabaseName + "." + table + + if _, err := ch.conn.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + return ch.ensureVersionTable() +} + +func (ch *ClickHouse) Lock() error { return nil } +func (ch *ClickHouse) Unlock() error { return nil } +func (ch *ClickHouse) Close() error { return ch.conn.Close() } diff --git a/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/001_init.down.sql b/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/001_init.down.sql new file mode 100644 index 000000000..51cd8bfb5 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/001_init.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS test_1; \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/001_init.up.sql b/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/001_init.up.sql new file mode 100644 index 000000000..5436b6fdd --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/001_init.up.sql @@ -0,0 +1,3 @@ +CREATE TABLE test_1 ( + Date Date +) Engine=Memory; \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/002_create_table.down.sql b/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/002_create_table.down.sql new file mode 100644 index 000000000..9d7712233 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/002_create_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS test_2; \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/002_create_table.up.sql b/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/002_create_table.up.sql new file mode 100644 index 000000000..6b49ed99d --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/002_create_table.up.sql @@ -0,0 +1,3 @@ +CREATE TABLE test_2 ( + Date Date +) Engine=Memory; \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/README.md b/vendor/github.com/rdallman/migrate/database/cockroachdb/README.md new file mode 100644 index 000000000..7931c2791 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/cockroachdb/README.md @@ -0,0 +1,19 @@ +# cockroachdb + +`cockroachdb://user:password@host:port/dbname?query` (`cockroach://`, and `crdb-postgres://` work, too) + +| URL Query | WithInstance Config | Description | +|------------|---------------------|-------------| +| `x-migrations-table` | `MigrationsTable` | Name of the migrations table | +| `x-lock-table` | `LockTable` | Name of the table which maintains the migration lock | +| `x-force-lock` | `ForceLock` | Force lock acquisition to fix faulty migrations which may not have released the schema lock (Boolean, default is `false`) | +| `dbname` | `DatabaseName` | The name of the database to connect to | +| `user` | | The user to sign in as | +| `password` | | The user's password | +| `host` | | The host to connect to. Values that start with / are for unix domain sockets. (default is localhost) | +| `port` | | The port to bind to. (default is 5432) | +| `connect_timeout` | | Maximum wait for connection, in seconds. Zero or not specified means wait indefinitely. | +| `sslcert` | | Cert file location. The file must contain PEM encoded data. | +| `sslkey` | | Key file location. The file must contain PEM encoded data. | +| `sslrootcert` | | The location of the root certificate file. The file must contain PEM encoded data. | +| `sslmode` | | Whether or not to use SSL (disable\|require\|verify-ca\|verify-full) | diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/cockroachdb.go b/vendor/github.com/rdallman/migrate/database/cockroachdb/cockroachdb.go new file mode 100644 index 000000000..8da31d378 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/cockroachdb/cockroachdb.go @@ -0,0 +1,338 @@ +package cockroachdb + +import ( + "database/sql" + "fmt" + "io" + "io/ioutil" + nurl "net/url" + + "github.com/cockroachdb/cockroach-go/crdb" + "github.com/lib/pq" + "github.com/mattes/migrate" + "github.com/mattes/migrate/database" + "regexp" + "strconv" + "context" +) + +func init() { + db := CockroachDb{} + database.Register("cockroach", &db) + database.Register("cockroachdb", &db) + database.Register("crdb-postgres", &db) +} + +var DefaultMigrationsTable = "schema_migrations" +var DefaultLockTable = "schema_lock" + +var ( + ErrNilConfig = fmt.Errorf("no config") + ErrNoDatabaseName = fmt.Errorf("no database name") +) + +type Config struct { + MigrationsTable string + LockTable string + ForceLock bool + DatabaseName string +} + +type CockroachDb struct { + db *sql.DB + isLocked bool + + // Open and WithInstance need to guarantee that config is never nil + config *Config +} + +func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { + if config == nil { + return nil, ErrNilConfig + } + + if err := instance.Ping(); err != nil { + return nil, err + } + + query := `SELECT current_database()` + var databaseName string + if err := instance.QueryRow(query).Scan(&databaseName); err != nil { + return nil, &database.Error{OrigErr: err, Query: []byte(query)} + } + + if len(databaseName) == 0 { + return nil, ErrNoDatabaseName + } + + config.DatabaseName = databaseName + + if len(config.MigrationsTable) == 0 { + config.MigrationsTable = DefaultMigrationsTable + } + + if len(config.LockTable) == 0 { + config.LockTable = DefaultLockTable + } + + px := &CockroachDb{ + db: instance, + config: config, + } + + if err := px.ensureVersionTable(); err != nil { + return nil, err + } + + if err := px.ensureLockTable(); err != nil { + return nil, err + } + + return px, nil +} + +func (c *CockroachDb) Open(url string) (database.Driver, error) { + purl, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + // As Cockroach uses the postgres protocol, and 'postgres' is already a registered database, we need to replace the + // connect prefix, with the actual protocol, so that the library can differentiate between the implementations + re := regexp.MustCompile("^(cockroach(db)?|crdb-postgres)") + connectString := re.ReplaceAllString(migrate.FilterCustomQuery(purl).String(), "postgres") + + db, err := sql.Open("postgres", connectString) + if err != nil { + return nil, err + } + + migrationsTable := purl.Query().Get("x-migrations-table") + if len(migrationsTable) == 0 { + migrationsTable = DefaultMigrationsTable + } + + lockTable := purl.Query().Get("x-lock-table") + if len(lockTable) == 0 { + lockTable = DefaultLockTable + } + + forceLockQuery := purl.Query().Get("x-force-lock") + forceLock, err := strconv.ParseBool(forceLockQuery) + if err != nil { + forceLock = false + } + + px, err := WithInstance(db, &Config{ + DatabaseName: purl.Path, + MigrationsTable: migrationsTable, + LockTable: lockTable, + ForceLock: forceLock, + }) + if err != nil { + return nil, err + } + + return px, nil +} + +func (c *CockroachDb) Close() error { + return c.db.Close() +} + +// Locking is done manually with a separate lock table. Implementing advisory locks in CRDB is being discussed +// See: https://github.com/cockroachdb/cockroach/issues/13546 +func (c *CockroachDb) Lock() error { + err := crdb.ExecuteTx(context.Background(), c.db, nil, func(tx *sql.Tx) error { + aid, err := database.GenerateAdvisoryLockId(c.config.DatabaseName) + if err != nil { + return err + } + + query := "SELECT * FROM " + c.config.LockTable + " WHERE lock_id = $1" + rows, err := tx.Query(query, aid) + if err != nil { + return database.Error{OrigErr: err, Err: "failed to fetch migration lock", Query: []byte(query)} + } + defer rows.Close() + + // If row exists at all, lock is present + locked := rows.Next() + if locked && !c.config.ForceLock { + return database.Error{Err: "lock could not be acquired; already locked", Query: []byte(query)} + } + + query = "INSERT INTO " + c.config.LockTable + " (lock_id) VALUES ($1)" + if _, err := tx.Exec(query, aid) ; err != nil { + return database.Error{OrigErr: err, Err: "failed to set migration lock", Query: []byte(query)} + } + + return nil + }) + + if err != nil { + return err + } else { + c.isLocked = true + return nil + } +} + +// Locking is done manually with a separate lock table. Implementing advisory locks in CRDB is being discussed +// See: https://github.com/cockroachdb/cockroach/issues/13546 +func (c *CockroachDb) Unlock() error { + aid, err := database.GenerateAdvisoryLockId(c.config.DatabaseName) + if err != nil { + return err + } + + // In the event of an implementation (non-migration) error, it is possible for the lock to not be released. Until + // a better locking mechanism is added, a manual purging of the lock table may be required in such circumstances + query := "DELETE FROM " + c.config.LockTable + " WHERE lock_id = $1" + if _, err := c.db.Exec(query, aid); err != nil { + if e, ok := err.(*pq.Error); ok { + // 42P01 is "UndefinedTableError" in CockroachDB + // https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/pgwire/pgerror/codes.go + if e.Code == "42P01" { + // On drops, the lock table is fully removed; This is fine, and is a valid "unlocked" state for the schema + c.isLocked = false + return nil + } + } + return database.Error{OrigErr: err, Err: "failed to release migration lock", Query: []byte(query)} + } + + c.isLocked = false + return nil +} + +func (c *CockroachDb) Run(migration io.Reader) error { + migr, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + + // run migration + query := string(migr[:]) + if _, err := c.db.Exec(query); err != nil { + return database.Error{OrigErr: err, Err: "migration failed", Query: migr} + } + + return nil +} + +func (c *CockroachDb) SetVersion(version int, dirty bool) error { + return crdb.ExecuteTx(context.Background(), c.db, nil, func(tx *sql.Tx) error { + if _, err := tx.Exec( `TRUNCATE "` + c.config.MigrationsTable + `"`); err != nil { + return err + } + + if version >= 0 { + if _, err := tx.Exec(`INSERT INTO "` + c.config.MigrationsTable + `" (version, dirty) VALUES ($1, $2)`, version, dirty); err != nil { + return err + } + } + + return nil + }) +} + +func (c *CockroachDb) Version() (version int, dirty bool, err error) { + query := `SELECT version, dirty FROM "` + c.config.MigrationsTable + `" LIMIT 1` + err = c.db.QueryRow(query).Scan(&version, &dirty) + + switch { + case err == sql.ErrNoRows: + return database.NilVersion, false, nil + + case err != nil: + if e, ok := err.(*pq.Error); ok { + // 42P01 is "UndefinedTableError" in CockroachDB + // https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/pgwire/pgerror/codes.go + if e.Code == "42P01" { + return database.NilVersion, false, nil + } + } + return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} + + default: + return version, dirty, nil + } +} + +func (c *CockroachDb) Drop() error { + // select all tables in current schema + query := `SELECT table_name FROM information_schema.tables WHERE table_schema=(SELECT current_schema())` + tables, err := c.db.Query(query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + defer tables.Close() + + // delete one table after another + tableNames := make([]string, 0) + for tables.Next() { + var tableName string + if err := tables.Scan(&tableName); err != nil { + return err + } + if len(tableName) > 0 { + tableNames = append(tableNames, tableName) + } + } + + if len(tableNames) > 0 { + // delete one by one ... + for _, t := range tableNames { + query = `DROP TABLE IF EXISTS ` + t + ` CASCADE` + if _, err := c.db.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + if err := c.ensureVersionTable(); err != nil { + return err + } + } + + return nil +} + +func (c *CockroachDb) ensureVersionTable() error { + // check if migration table exists + var count int + query := `SELECT COUNT(1) FROM information_schema.tables WHERE table_name = $1 AND table_schema = (SELECT current_schema()) LIMIT 1` + if err := c.db.QueryRow(query, c.config.MigrationsTable).Scan(&count); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + if count == 1 { + return nil + } + + // if not, create the empty migration table + query = `CREATE TABLE "` + c.config.MigrationsTable + `" (version INT NOT NULL PRIMARY KEY, dirty BOOL NOT NULL)` + if _, err := c.db.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + return nil +} + + +func (c *CockroachDb) ensureLockTable() error { + // check if lock table exists + var count int + query := `SELECT COUNT(1) FROM information_schema.tables WHERE table_name = $1 AND table_schema = (SELECT current_schema()) LIMIT 1` + if err := c.db.QueryRow(query, c.config.LockTable).Scan(&count); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + if count == 1 { + return nil + } + + // if not, create the empty lock table + query = `CREATE TABLE "` + c.config.LockTable + `" (lock_id INT NOT NULL PRIMARY KEY)` + if _, err := c.db.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + return nil +} diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/cockroachdb_test.go b/vendor/github.com/rdallman/migrate/database/cockroachdb/cockroachdb_test.go new file mode 100644 index 000000000..e2dc1f86e --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/cockroachdb/cockroachdb_test.go @@ -0,0 +1,91 @@ +package cockroachdb + +// error codes https://github.com/lib/pq/blob/master/error.go + +import ( + //"bytes" + "database/sql" + "fmt" + "io" + "testing" + + "github.com/lib/pq" + dt "github.com/mattes/migrate/database/testing" + mt "github.com/mattes/migrate/testing" + "bytes" +) + +var versions = []mt.Version{ + {Image: "cockroachdb/cockroach:v1.0.2", Cmd: []string{"start", "--insecure"}}, +} + +func isReady(i mt.Instance) bool { + db, err := sql.Open("postgres", fmt.Sprintf("postgres://root@%v:%v?sslmode=disable", i.Host(), i.PortFor(26257))) + if err != nil { + return false + } + defer db.Close() + err = db.Ping() + if err == io.EOF { + _, err = db.Exec("CREATE DATABASE migrate") + return err == nil; + } else if e, ok := err.(*pq.Error); ok { + if e.Code.Name() == "cannot_connect_now" { + return false + } + } + + _, err = db.Exec("CREATE DATABASE migrate") + return err == nil; + + return true +} + +func Test(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + c := &CockroachDb{} + addr := fmt.Sprintf("cockroach://root@%v:%v/migrate?sslmode=disable", i.Host(), i.PortFor(26257)) + d, err := c.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + dt.Test(t, d, []byte("SELECT 1")) + }) +} + +func TestMultiStatement(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + c := &CockroachDb{} + addr := fmt.Sprintf("cockroach://root@%v:%v/migrate?sslmode=disable", i.Host(), i.Port()) + d, err := c.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + if err := d.Run(bytes.NewReader([]byte("CREATE TABLE foo (foo text); CREATE TABLE bar (bar text);"))); err != nil { + t.Fatalf("expected err to be nil, got %v", err) + } + + // make sure second table exists + var exists bool + if err := d.(*CockroachDb).db.QueryRow("SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'bar' AND table_schema = (SELECT current_schema()))").Scan(&exists); err != nil { + t.Fatal(err) + } + if !exists { + t.Fatalf("expected table bar to exist") + } + }) +} + +func TestFilterCustomQuery(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + c := &CockroachDb{} + addr := fmt.Sprintf("cockroach://root@%v:%v/migrate?sslmode=disable&x-custom=foobar", i.Host(), i.PortFor(26257)) + _, err := c.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + }) +} diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.down.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.down.sql new file mode 100644 index 000000000..c99ddcdc8 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS users; diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.up.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.up.sql new file mode 100644 index 000000000..fc3210181 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE users ( + user_id INT UNIQUE, + name STRING(40), + email STRING(40) +); diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.down.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.down.sql new file mode 100644 index 000000000..940c60712 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.down.sql @@ -0,0 +1 @@ +ALTER TABLE users DROP COLUMN IF EXISTS city; diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.up.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.up.sql new file mode 100644 index 000000000..46204b0f8 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.up.sql @@ -0,0 +1 @@ +ALTER TABLE users ADD COLUMN city TEXT; diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.down.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.down.sql new file mode 100644 index 000000000..3e87dd229 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.down.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS users_email_index; diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.up.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.up.sql new file mode 100644 index 000000000..61f8ba0b9 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.up.sql @@ -0,0 +1,3 @@ +CREATE UNIQUE INDEX IF NOT EXISTS users_email_index ON users (email); + +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.down.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.down.sql new file mode 100644 index 000000000..1a0b1a214 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS books; diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.up.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.up.sql new file mode 100644 index 000000000..0d3b99928 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE books ( + user_id INT, + name STRING(40), + author STRING(40) +); diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.down.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.down.sql new file mode 100644 index 000000000..3a5187689 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS movies; diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.up.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.up.sql new file mode 100644 index 000000000..d533be900 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE movies ( + user_id INT, + name STRING(40), + director STRING(40) +); diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1585849751_just_a_comment.up.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1585849751_just_a_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1585849751_just_a_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1685849751_another_comment.up.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1685849751_another_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1685849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1785849751_another_comment.up.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1785849751_another_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1785849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1885849751_another_comment.up.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1885849751_another_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1885849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/database/crate/README.md b/vendor/github.com/rdallman/migrate/database/crate/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/rdallman/migrate/database/driver.go b/vendor/github.com/rdallman/migrate/database/driver.go new file mode 100644 index 000000000..016eedcba --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/driver.go @@ -0,0 +1,112 @@ +// Package database provides the Database interface. +// All database drivers must implement this interface, register themselves, +// optionally provide a `WithInstance` function and pass the tests +// in package database/testing. +package database + +import ( + "fmt" + "io" + nurl "net/url" + "sync" +) + +var ( + ErrLocked = fmt.Errorf("can't acquire lock") +) + +const NilVersion int = -1 + +var driversMu sync.RWMutex +var drivers = make(map[string]Driver) + +// Driver is the interface every database driver must implement. +// +// How to implement a database driver? +// 1. Implement this interface. +// 2. Optionally, add a function named `WithInstance`. +// This function should accept an existing DB instance and a Config{} struct +// and return a driver instance. +// 3. Add a test that calls database/testing.go:Test() +// 4. Add own tests for Open(), WithInstance() (when provided) and Close(). +// All other functions are tested by tests in database/testing. +// Saves you some time and makes sure all database drivers behave the same way. +// 5. Call Register in init(). +// 6. Create a migrate/cli/build_.go file +// 7. Add driver name in 'DATABASE' variable in Makefile +// +// Guidelines: +// * Don't try to correct user input. Don't assume things. +// When in doubt, return an error and explain the situation to the user. +// * All configuration input must come from the URL string in func Open() +// or the Config{} struct in WithInstance. Don't os.Getenv(). +type Driver interface { + // Open returns a new driver instance configured with parameters + // coming from the URL string. Migrate will call this function + // only once per instance. + Open(url string) (Driver, error) + + // Close closes the underlying database instance managed by the driver. + // Migrate will call this function only once per instance. + Close() error + + // Lock should acquire a database lock so that only one migration process + // can run at a time. Migrate will call this function before Run is called. + // If the implementation can't provide this functionality, return nil. + // Return database.ErrLocked if database is already locked. + Lock() error + + // Unlock should release the lock. Migrate will call this function after + // all migrations have been run. + Unlock() error + + // Run applies a migration to the database. migration is garantueed to be not nil. + Run(migration io.Reader) error + + // SetVersion saves version and dirty state. + // Migrate will call this function before and after each call to Run. + // version must be >= -1. -1 means NilVersion. + SetVersion(version int, dirty bool) error + + // Version returns the currently active version and if the database is dirty. + // When no migration has been applied, it must return version -1. + // Dirty means, a previous migration failed and user interaction is required. + Version() (version int, dirty bool, err error) + + // Drop deletes everything in the database. + Drop() error +} + +// Open returns a new driver instance. +func Open(url string) (Driver, error) { + u, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + if u.Scheme == "" { + return nil, fmt.Errorf("database driver: invalid URL scheme") + } + + driversMu.RLock() + d, ok := drivers[u.Scheme] + driversMu.RUnlock() + if !ok { + return nil, fmt.Errorf("database driver: unknown driver %v (forgotten import?)", u.Scheme) + } + + return d.Open(url) +} + +// Register globally registers a driver. +func Register(name string, driver Driver) { + driversMu.Lock() + defer driversMu.Unlock() + if driver == nil { + panic("Register driver is nil") + } + if _, dup := drivers[name]; dup { + panic("Register called twice for driver " + name) + } + drivers[name] = driver +} diff --git a/vendor/github.com/rdallman/migrate/database/driver_test.go b/vendor/github.com/rdallman/migrate/database/driver_test.go new file mode 100644 index 000000000..c0a29304f --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/driver_test.go @@ -0,0 +1,8 @@ +package database + +func ExampleDriver() { + // see database/stub for an example + + // database/stub/stub.go has the driver implementation + // database/stub/stub_test.go runs database/testing/test.go:Test +} diff --git a/vendor/github.com/rdallman/migrate/database/error.go b/vendor/github.com/rdallman/migrate/database/error.go new file mode 100644 index 000000000..eb802c753 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/error.go @@ -0,0 +1,27 @@ +package database + +import ( + "fmt" +) + +// Error should be used for errors involving queries ran against the database +type Error struct { + // Optional: the line number + Line uint + + // Query is a query excerpt + Query []byte + + // Err is a useful/helping error message for humans + Err string + + // OrigErr is the underlying error + OrigErr error +} + +func (e Error) Error() string { + if len(e.Err) == 0 { + return fmt.Sprintf("%v in line %v: %s", e.OrigErr, e.Line, e.Query) + } + return fmt.Sprintf("%v in line %v: %s (details: %v)", e.Err, e.Line, e.Query, e.OrigErr) +} diff --git a/vendor/github.com/rdallman/migrate/database/mongodb/README.md b/vendor/github.com/rdallman/migrate/database/mongodb/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/rdallman/migrate/database/mysql/README.md b/vendor/github.com/rdallman/migrate/database/mysql/README.md new file mode 100644 index 000000000..490e90b21 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/mysql/README.md @@ -0,0 +1,53 @@ +# MySQL + +`mysql://user:password@tcp(host:port)/dbname?query` + +| URL Query | WithInstance Config | Description | +|------------|---------------------|-------------| +| `x-migrations-table` | `MigrationsTable` | Name of the migrations table | +| `dbname` | `DatabaseName` | The name of the database to connect to | +| `user` | | The user to sign in as | +| `password` | | The user's password | +| `host` | | The host to connect to. | +| `port` | | The port to bind to. | +| `x-tls-ca` | | The location of the root certificate file. | +| `x-tls-cert` | | Cert file location. | +| `x-tls-key` | | Key file location. | +| `x-tls-insecure-skip-verify` | | Whether or not to use SSL (true\|false) | + +## Use with existing client + +If you use the MySQL driver with existing database client, you must create the client with parameter `multiStatements=true`: + +```go +package main + +import ( + "database/sql" + + _ "github.com/go-sql-driver/mysql" + "github.com/mattes/migrate" + "github.com/mattes/migrate/database/mysql" + _ "github.com/mattes/migrate/source/file" +) + +func main() { + db, _ := sql.Open("mysql", "user:password@tcp(host:port)/dbname?multiStatements=true") + driver, _ := mysql.WithInstance(db, &mysql.Config{}) + m, _ := migrate.NewWithDatabaseInstance( + "file:///migrations", + "mysql", + driver, + ) + + m.Steps(2) +} +``` + +## Upgrading from v1 + +1. Write down the current migration version from schema_migrations +1. `DROP TABLE schema_migrations` +2. Wrap your existing migrations in transactions ([BEGIN/COMMIT](https://dev.mysql.com/doc/refman/5.7/en/commit.html)) if you use multiple statements within one migration. +3. Download and install the latest migrate version. +4. Force the current migration version with `migrate force `. diff --git a/vendor/github.com/rdallman/migrate/database/mysql/mysql.go b/vendor/github.com/rdallman/migrate/database/mysql/mysql.go new file mode 100644 index 000000000..3aecfe19c --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/mysql/mysql.go @@ -0,0 +1,344 @@ +// +build go1.9 + +package mysql + +import ( + "context" + "crypto/tls" + "crypto/x509" + "database/sql" + "fmt" + "io" + "io/ioutil" + nurl "net/url" + "strconv" + "strings" + + "github.com/go-sql-driver/mysql" + "github.com/mattes/migrate" + "github.com/mattes/migrate/database" +) + +func init() { + database.Register("mysql", &Mysql{}) +} + +var DefaultMigrationsTable = "schema_migrations" + +var ( + ErrDatabaseDirty = fmt.Errorf("database is dirty") + ErrNilConfig = fmt.Errorf("no config") + ErrNoDatabaseName = fmt.Errorf("no database name") + ErrAppendPEM = fmt.Errorf("failed to append PEM") +) + +type Config struct { + MigrationsTable string + DatabaseName string +} + +type Mysql struct { + // mysql RELEASE_LOCK must be called from the same conn, so + // just do everything over a single conn anyway. + db *sql.Conn + isLocked bool + + config *Config +} + +// instance must have `multiStatements` set to true +func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { + if config == nil { + return nil, ErrNilConfig + } + + if err := instance.Ping(); err != nil { + return nil, err + } + + query := `SELECT DATABASE()` + var databaseName sql.NullString + if err := instance.QueryRow(query).Scan(&databaseName); err != nil { + return nil, &database.Error{OrigErr: err, Query: []byte(query)} + } + + if len(databaseName.String) == 0 { + return nil, ErrNoDatabaseName + } + + config.DatabaseName = databaseName.String + + if len(config.MigrationsTable) == 0 { + config.MigrationsTable = DefaultMigrationsTable + } + + conn, err := instance.Conn(context.Background()) + if err != nil { + return nil, err + } + + mx := &Mysql{ + db: conn, + config: config, + } + + if err := mx.ensureVersionTable(); err != nil { + return nil, err + } + + return mx, nil +} + +func (m *Mysql) Open(url string) (database.Driver, error) { + purl, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + q := purl.Query() + q.Set("multiStatements", "true") + purl.RawQuery = q.Encode() + + db, err := sql.Open("mysql", strings.Replace( + migrate.FilterCustomQuery(purl).String(), "mysql://", "", 1)) + if err != nil { + return nil, err + } + + migrationsTable := purl.Query().Get("x-migrations-table") + if len(migrationsTable) == 0 { + migrationsTable = DefaultMigrationsTable + } + + // use custom TLS? + ctls := purl.Query().Get("tls") + if len(ctls) > 0 { + if _, isBool := readBool(ctls); !isBool && strings.ToLower(ctls) != "skip-verify" { + rootCertPool := x509.NewCertPool() + pem, err := ioutil.ReadFile(purl.Query().Get("x-tls-ca")) + if err != nil { + return nil, err + } + + if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { + return nil, ErrAppendPEM + } + + certs, err := tls.LoadX509KeyPair(purl.Query().Get("x-tls-cert"), purl.Query().Get("x-tls-key")) + if err != nil { + return nil, err + } + + insecureSkipVerify := false + if len(purl.Query().Get("x-tls-insecure-skip-verify")) > 0 { + x, err := strconv.ParseBool(purl.Query().Get("x-tls-insecure-skip-verify")) + if err != nil { + return nil, err + } + insecureSkipVerify = x + } + + mysql.RegisterTLSConfig(ctls, &tls.Config{ + RootCAs: rootCertPool, + Certificates: []tls.Certificate{certs}, + InsecureSkipVerify: insecureSkipVerify, + }) + } + } + + mx, err := WithInstance(db, &Config{ + DatabaseName: purl.Path, + MigrationsTable: migrationsTable, + }) + if err != nil { + return nil, err + } + + return mx, nil +} + +func (m *Mysql) Close() error { + return m.db.Close() +} + +func (m *Mysql) Lock() error { + if m.isLocked { + return database.ErrLocked + } + + aid, err := database.GenerateAdvisoryLockId( + fmt.Sprintf("%s:%s", m.config.DatabaseName, m.config.MigrationsTable)) + if err != nil { + return err + } + + query := "SELECT GET_LOCK(?, 10)" + var success bool + if err := m.db.QueryRowContext(context.Background(), query, aid).Scan(&success); err != nil { + return &database.Error{OrigErr: err, Err: "try lock failed", Query: []byte(query)} + } + + if success { + m.isLocked = true + return nil + } + + return database.ErrLocked +} + +func (m *Mysql) Unlock() error { + if !m.isLocked { + return nil + } + + aid, err := database.GenerateAdvisoryLockId( + fmt.Sprintf("%s:%s", m.config.DatabaseName, m.config.MigrationsTable)) + if err != nil { + return err + } + + query := `SELECT RELEASE_LOCK(?)` + if _, err := m.db.ExecContext(context.Background(), query, aid); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + // NOTE: RELEASE_LOCK could return NULL or (or 0 if the code is changed), + // in which case isLocked should be true until the timeout expires -- synchronizing + // these states is likely not worth trying to do; reconsider the necessity of isLocked. + + m.isLocked = false + return nil +} + +func (m *Mysql) Run(migration io.Reader) error { + migr, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + + query := string(migr[:]) + if _, err := m.db.ExecContext(context.Background(), query); err != nil { + return database.Error{OrigErr: err, Err: "migration failed", Query: migr} + } + + return nil +} + +func (m *Mysql) SetVersion(version int, dirty bool) error { + tx, err := m.db.BeginTx(context.Background(), &sql.TxOptions{}) + if err != nil { + return &database.Error{OrigErr: err, Err: "transaction start failed"} + } + + query := "TRUNCATE `" + m.config.MigrationsTable + "`" + if _, err := tx.ExecContext(context.Background(), query); err != nil { + tx.Rollback() + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + if version >= 0 { + query := "INSERT INTO `" + m.config.MigrationsTable + "` (version, dirty) VALUES (?, ?)" + if _, err := tx.ExecContext(context.Background(), query, version, dirty); err != nil { + tx.Rollback() + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + + if err := tx.Commit(); err != nil { + return &database.Error{OrigErr: err, Err: "transaction commit failed"} + } + + return nil +} + +func (m *Mysql) Version() (version int, dirty bool, err error) { + query := "SELECT version, dirty FROM `" + m.config.MigrationsTable + "` LIMIT 1" + err = m.db.QueryRowContext(context.Background(), query).Scan(&version, &dirty) + switch { + case err == sql.ErrNoRows: + return database.NilVersion, false, nil + + case err != nil: + if e, ok := err.(*mysql.MySQLError); ok { + if e.Number == 0 { + return database.NilVersion, false, nil + } + } + return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} + + default: + return version, dirty, nil + } +} + +func (m *Mysql) Drop() error { + // select all tables + query := `SHOW TABLES LIKE '%'` + tables, err := m.db.QueryContext(context.Background(), query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + defer tables.Close() + + // delete one table after another + tableNames := make([]string, 0) + for tables.Next() { + var tableName string + if err := tables.Scan(&tableName); err != nil { + return err + } + if len(tableName) > 0 { + tableNames = append(tableNames, tableName) + } + } + + if len(tableNames) > 0 { + // delete one by one ... + for _, t := range tableNames { + query = "DROP TABLE IF EXISTS `" + t + "` CASCADE" + if _, err := m.db.ExecContext(context.Background(), query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + if err := m.ensureVersionTable(); err != nil { + return err + } + } + + return nil +} + +func (m *Mysql) ensureVersionTable() error { + // check if migration table exists + var result string + query := `SHOW TABLES LIKE "` + m.config.MigrationsTable + `"` + if err := m.db.QueryRowContext(context.Background(), query).Scan(&result); err != nil { + if err != sql.ErrNoRows { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } else { + return nil + } + + // if not, create the empty migration table + query = "CREATE TABLE `" + m.config.MigrationsTable + "` (version bigint not null primary key, dirty boolean not null)" + if _, err := m.db.ExecContext(context.Background(), query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + return nil +} + +// Returns the bool value of the input. +// The 2nd return value indicates if the input was a valid bool value +// See https://github.com/go-sql-driver/mysql/blob/a059889267dc7170331388008528b3b44479bffb/utils.go#L71 +func readBool(input string) (value bool, valid bool) { + switch input { + case "1", "true", "TRUE", "True": + return true, true + case "0", "false", "FALSE", "False": + return false, true + } + + // Not a valid bool value + return +} diff --git a/vendor/github.com/rdallman/migrate/database/mysql/mysql_test.go b/vendor/github.com/rdallman/migrate/database/mysql/mysql_test.go new file mode 100644 index 000000000..64176f64a --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/mysql/mysql_test.go @@ -0,0 +1,94 @@ +package mysql + +import ( + "database/sql" + sqldriver "database/sql/driver" + "fmt" + // "io/ioutil" + // "log" + "testing" + + // "github.com/go-sql-driver/mysql" + dt "github.com/mattes/migrate/database/testing" + mt "github.com/mattes/migrate/testing" +) + +var versions = []mt.Version{ + {Image: "mysql:8", ENV: []string{"MYSQL_ROOT_PASSWORD=root", "MYSQL_DATABASE=public"}}, + {Image: "mysql:5.7", ENV: []string{"MYSQL_ROOT_PASSWORD=root", "MYSQL_DATABASE=public"}}, + {Image: "mysql:5.6", ENV: []string{"MYSQL_ROOT_PASSWORD=root", "MYSQL_DATABASE=public"}}, + {Image: "mysql:5.5", ENV: []string{"MYSQL_ROOT_PASSWORD=root", "MYSQL_DATABASE=public"}}, +} + +func isReady(i mt.Instance) bool { + db, err := sql.Open("mysql", fmt.Sprintf("root:root@tcp(%v:%v)/public", i.Host(), i.Port())) + if err != nil { + return false + } + defer db.Close() + err = db.Ping() + + if err == sqldriver.ErrBadConn { + return false + } + + return true +} + +func Test(t *testing.T) { + // mysql.SetLogger(mysql.Logger(log.New(ioutil.Discard, "", log.Ltime))) + + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + p := &Mysql{} + addr := fmt.Sprintf("mysql://root:root@tcp(%v:%v)/public", i.Host(), i.Port()) + d, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + dt.Test(t, d, []byte("SELECT 1")) + + // check ensureVersionTable + if err := d.(*Mysql).ensureVersionTable(); err != nil { + t.Fatal(err) + } + // check again + if err := d.(*Mysql).ensureVersionTable(); err != nil { + t.Fatal(err) + } + }) +} + +func TestLockWorks(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + p := &Mysql{} + addr := fmt.Sprintf("mysql://root:root@tcp(%v:%v)/public", i.Host(), i.Port()) + d, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + dt.Test(t, d, []byte("SELECT 1")) + + ms := d.(*Mysql) + + err = ms.Lock() + if err != nil { + t.Fatal(err) + } + err = ms.Unlock() + if err != nil { + t.Fatal(err) + } + + // make sure the 2nd lock works (RELEASE_LOCK is very finicky) + err = ms.Lock() + if err != nil { + t.Fatal(err) + } + err = ms.Unlock() + if err != nil { + t.Fatal(err) + } + }) +} diff --git a/vendor/github.com/rdallman/migrate/database/neo4j/README.md b/vendor/github.com/rdallman/migrate/database/neo4j/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/rdallman/migrate/database/postgres/README.md b/vendor/github.com/rdallman/migrate/database/postgres/README.md new file mode 100644 index 000000000..f6312392b --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/postgres/README.md @@ -0,0 +1,28 @@ +# postgres + +`postgres://user:password@host:port/dbname?query` (`postgresql://` works, too) + +| URL Query | WithInstance Config | Description | +|------------|---------------------|-------------| +| `x-migrations-table` | `MigrationsTable` | Name of the migrations table | +| `dbname` | `DatabaseName` | The name of the database to connect to | +| `search_path` | | This variable specifies the order in which schemas are searched when an object is referenced by a simple name with no schema specified. | +| `user` | | The user to sign in as | +| `password` | | The user's password | +| `host` | | The host to connect to. Values that start with / are for unix domain sockets. (default is localhost) | +| `port` | | The port to bind to. (default is 5432) | +| `fallback_application_name` | | An application_name to fall back to if one isn't provided. | +| `connect_timeout` | | Maximum wait for connection, in seconds. Zero or not specified means wait indefinitely. | +| `sslcert` | | Cert file location. The file must contain PEM encoded data. | +| `sslkey` | | Key file location. The file must contain PEM encoded data. | +| `sslrootcert` | | The location of the root certificate file. The file must contain PEM encoded data. | +| `sslmode` | | Whether or not to use SSL (disable\|require\|verify-ca\|verify-full) | + + +## Upgrading from v1 + +1. Write down the current migration version from schema_migrations +1. `DROP TABLE schema_migrations` +2. Wrap your existing migrations in transactions ([BEGIN/COMMIT](https://www.postgresql.org/docs/current/static/transaction-iso.html)) if you use multiple statements within one migration. +3. Download and install the latest migrate version. +4. Force the current migration version with `migrate force `. diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1085649617_create_users_table.down.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1085649617_create_users_table.down.sql new file mode 100644 index 000000000..c99ddcdc8 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1085649617_create_users_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS users; diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1085649617_create_users_table.up.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1085649617_create_users_table.up.sql new file mode 100644 index 000000000..92897dcab --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1085649617_create_users_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE users ( + user_id integer unique, + name varchar(40), + email varchar(40) +); diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.down.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.down.sql new file mode 100644 index 000000000..940c60712 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.down.sql @@ -0,0 +1 @@ +ALTER TABLE users DROP COLUMN IF EXISTS city; diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.up.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.up.sql new file mode 100644 index 000000000..67823edc9 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE users ADD COLUMN city varchar(100); + + diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.down.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.down.sql new file mode 100644 index 000000000..3e87dd229 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.down.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS users_email_index; diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.up.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.up.sql new file mode 100644 index 000000000..fbeb4ab4e --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.up.sql @@ -0,0 +1,3 @@ +CREATE UNIQUE INDEX CONCURRENTLY users_email_index ON users (email); + +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1385949617_create_books_table.down.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1385949617_create_books_table.down.sql new file mode 100644 index 000000000..1a0b1a214 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1385949617_create_books_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS books; diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1385949617_create_books_table.up.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1385949617_create_books_table.up.sql new file mode 100644 index 000000000..f1503b518 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1385949617_create_books_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE books ( + user_id integer, + name varchar(40), + author varchar(40) +); diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.down.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.down.sql new file mode 100644 index 000000000..3a5187689 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS movies; diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.up.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.up.sql new file mode 100644 index 000000000..f0ef5943b --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE movies ( + user_id integer, + name varchar(40), + director varchar(40) +); diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1585849751_just_a_comment.up.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1585849751_just_a_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1585849751_just_a_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1685849751_another_comment.up.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1685849751_another_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1685849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1785849751_another_comment.up.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1785849751_another_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1785849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1885849751_another_comment.up.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1885849751_another_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1885849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/database/postgres/postgres.go b/vendor/github.com/rdallman/migrate/database/postgres/postgres.go new file mode 100644 index 000000000..fb2d61c28 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/postgres/postgres.go @@ -0,0 +1,273 @@ +package postgres + +import ( + "database/sql" + "fmt" + "io" + "io/ioutil" + nurl "net/url" + + "github.com/lib/pq" + "github.com/mattes/migrate" + "github.com/mattes/migrate/database" +) + +func init() { + db := Postgres{} + database.Register("postgres", &db) + database.Register("postgresql", &db) +} + +var DefaultMigrationsTable = "schema_migrations" + +var ( + ErrNilConfig = fmt.Errorf("no config") + ErrNoDatabaseName = fmt.Errorf("no database name") + ErrNoSchema = fmt.Errorf("no schema") + ErrDatabaseDirty = fmt.Errorf("database is dirty") +) + +type Config struct { + MigrationsTable string + DatabaseName string +} + +type Postgres struct { + db *sql.DB + isLocked bool + + // Open and WithInstance need to garantuee that config is never nil + config *Config +} + +func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { + if config == nil { + return nil, ErrNilConfig + } + + if err := instance.Ping(); err != nil { + return nil, err + } + + query := `SELECT CURRENT_DATABASE()` + var databaseName string + if err := instance.QueryRow(query).Scan(&databaseName); err != nil { + return nil, &database.Error{OrigErr: err, Query: []byte(query)} + } + + if len(databaseName) == 0 { + return nil, ErrNoDatabaseName + } + + config.DatabaseName = databaseName + + if len(config.MigrationsTable) == 0 { + config.MigrationsTable = DefaultMigrationsTable + } + + px := &Postgres{ + db: instance, + config: config, + } + + if err := px.ensureVersionTable(); err != nil { + return nil, err + } + + return px, nil +} + +func (p *Postgres) Open(url string) (database.Driver, error) { + purl, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + db, err := sql.Open("postgres", migrate.FilterCustomQuery(purl).String()) + if err != nil { + return nil, err + } + + migrationsTable := purl.Query().Get("x-migrations-table") + if len(migrationsTable) == 0 { + migrationsTable = DefaultMigrationsTable + } + + px, err := WithInstance(db, &Config{ + DatabaseName: purl.Path, + MigrationsTable: migrationsTable, + }) + if err != nil { + return nil, err + } + + return px, nil +} + +func (p *Postgres) Close() error { + return p.db.Close() +} + +// https://www.postgresql.org/docs/9.6/static/explicit-locking.html#ADVISORY-LOCKS +func (p *Postgres) Lock() error { + if p.isLocked { + return database.ErrLocked + } + + aid, err := database.GenerateAdvisoryLockId(p.config.DatabaseName) + if err != nil { + return err + } + + // This will either obtain the lock immediately and return true, + // or return false if the lock cannot be acquired immediately. + query := `SELECT pg_try_advisory_lock($1)` + var success bool + if err := p.db.QueryRow(query, aid).Scan(&success); err != nil { + return &database.Error{OrigErr: err, Err: "try lock failed", Query: []byte(query)} + } + + if success { + p.isLocked = true + return nil + } + + return database.ErrLocked +} + +func (p *Postgres) Unlock() error { + if !p.isLocked { + return nil + } + + aid, err := database.GenerateAdvisoryLockId(p.config.DatabaseName) + if err != nil { + return err + } + + query := `SELECT pg_advisory_unlock($1)` + if _, err := p.db.Exec(query, aid); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + p.isLocked = false + return nil +} + +func (p *Postgres) Run(migration io.Reader) error { + migr, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + + // run migration + query := string(migr[:]) + if _, err := p.db.Exec(query); err != nil { + // TODO: cast to postgress error and get line number + return database.Error{OrigErr: err, Err: "migration failed", Query: migr} + } + + return nil +} + +func (p *Postgres) SetVersion(version int, dirty bool) error { + tx, err := p.db.Begin() + if err != nil { + return &database.Error{OrigErr: err, Err: "transaction start failed"} + } + + query := `TRUNCATE "` + p.config.MigrationsTable + `"` + if _, err := tx.Exec(query); err != nil { + tx.Rollback() + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + if version >= 0 { + query = `INSERT INTO "` + p.config.MigrationsTable + `" (version, dirty) VALUES ($1, $2)` + if _, err := tx.Exec(query, version, dirty); err != nil { + tx.Rollback() + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + + if err := tx.Commit(); err != nil { + return &database.Error{OrigErr: err, Err: "transaction commit failed"} + } + + return nil +} + +func (p *Postgres) Version() (version int, dirty bool, err error) { + query := `SELECT version, dirty FROM "` + p.config.MigrationsTable + `" LIMIT 1` + err = p.db.QueryRow(query).Scan(&version, &dirty) + switch { + case err == sql.ErrNoRows: + return database.NilVersion, false, nil + + case err != nil: + if e, ok := err.(*pq.Error); ok { + if e.Code.Name() == "undefined_table" { + return database.NilVersion, false, nil + } + } + return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} + + default: + return version, dirty, nil + } +} + +func (p *Postgres) Drop() error { + // select all tables in current schema + query := `SELECT table_name FROM information_schema.tables WHERE table_schema=(SELECT current_schema())` + tables, err := p.db.Query(query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + defer tables.Close() + + // delete one table after another + tableNames := make([]string, 0) + for tables.Next() { + var tableName string + if err := tables.Scan(&tableName); err != nil { + return err + } + if len(tableName) > 0 { + tableNames = append(tableNames, tableName) + } + } + + if len(tableNames) > 0 { + // delete one by one ... + for _, t := range tableNames { + query = `DROP TABLE IF EXISTS ` + t + ` CASCADE` + if _, err := p.db.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + if err := p.ensureVersionTable(); err != nil { + return err + } + } + + return nil +} + +func (p *Postgres) ensureVersionTable() error { + // check if migration table exists + var count int + query := `SELECT COUNT(1) FROM information_schema.tables WHERE table_name = $1 AND table_schema = (SELECT current_schema()) LIMIT 1` + if err := p.db.QueryRow(query, p.config.MigrationsTable).Scan(&count); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + if count == 1 { + return nil + } + + // if not, create the empty migration table + query = `CREATE TABLE "` + p.config.MigrationsTable + `" (version bigint not null primary key, dirty boolean not null)` + if _, err := p.db.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + return nil +} diff --git a/vendor/github.com/rdallman/migrate/database/postgres/postgres_test.go b/vendor/github.com/rdallman/migrate/database/postgres/postgres_test.go new file mode 100644 index 000000000..9a367a059 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/postgres/postgres_test.go @@ -0,0 +1,150 @@ +package postgres + +// error codes https://github.com/lib/pq/blob/master/error.go + +import ( + "bytes" + "database/sql" + "fmt" + "io" + "testing" + + "github.com/lib/pq" + dt "github.com/mattes/migrate/database/testing" + mt "github.com/mattes/migrate/testing" +) + +var versions = []mt.Version{ + {Image: "postgres:9.6"}, + {Image: "postgres:9.5"}, + {Image: "postgres:9.4"}, + {Image: "postgres:9.3"}, + {Image: "postgres:9.2"}, +} + +func isReady(i mt.Instance) bool { + db, err := sql.Open("postgres", fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port())) + if err != nil { + return false + } + defer db.Close() + err = db.Ping() + if err == io.EOF { + return false + + } else if e, ok := err.(*pq.Error); ok { + if e.Code.Name() == "cannot_connect_now" { + return false + } + } + + return true +} + +func Test(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + p := &Postgres{} + addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port()) + d, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + dt.Test(t, d, []byte("SELECT 1")) + }) +} + +func TestMultiStatement(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + p := &Postgres{} + addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port()) + d, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + if err := d.Run(bytes.NewReader([]byte("CREATE TABLE foo (foo text); CREATE TABLE bar (bar text);"))); err != nil { + t.Fatalf("expected err to be nil, got %v", err) + } + + // make sure second table exists + var exists bool + if err := d.(*Postgres).db.QueryRow("SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'bar' AND table_schema = (SELECT current_schema()))").Scan(&exists); err != nil { + t.Fatal(err) + } + if !exists { + t.Fatalf("expected table bar to exist") + } + }) +} + +func TestFilterCustomQuery(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + p := &Postgres{} + addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable&x-custom=foobar", i.Host(), i.Port()) + _, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + }) +} + +func TestWithSchema(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + p := &Postgres{} + addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port()) + d, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + + // create foobar schema + if err := d.Run(bytes.NewReader([]byte("CREATE SCHEMA foobar AUTHORIZATION postgres"))); err != nil { + t.Fatal(err) + } + if err := d.SetVersion(1, false); err != nil { + t.Fatal(err) + } + + // re-connect using that schema + d2, err := p.Open(fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable&search_path=foobar", i.Host(), i.Port())) + if err != nil { + t.Fatalf("%v", err) + } + + version, _, err := d2.Version() + if err != nil { + t.Fatal(err) + } + if version != -1 { + t.Fatal("expected NilVersion") + } + + // now update version and compare + if err := d2.SetVersion(2, false); err != nil { + t.Fatal(err) + } + version, _, err = d2.Version() + if err != nil { + t.Fatal(err) + } + if version != 2 { + t.Fatal("expected version 2") + } + + // meanwhile, the public schema still has the other version + version, _, err = d.Version() + if err != nil { + t.Fatal(err) + } + if version != 1 { + t.Fatal("expected version 2") + } + }) +} + +func TestWithInstance(t *testing.T) { + +} diff --git a/vendor/github.com/rdallman/migrate/database/ql/README.md b/vendor/github.com/rdallman/migrate/database/ql/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/rdallman/migrate/database/ql/migration/33_create_table.down.sql b/vendor/github.com/rdallman/migrate/database/ql/migration/33_create_table.down.sql new file mode 100644 index 000000000..72d18c554 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/ql/migration/33_create_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS pets; \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/ql/migration/33_create_table.up.sql b/vendor/github.com/rdallman/migrate/database/ql/migration/33_create_table.up.sql new file mode 100644 index 000000000..5ad3404d1 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/ql/migration/33_create_table.up.sql @@ -0,0 +1,3 @@ +CREATE TABLE pets ( + name string +); \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/ql/migration/44_alter_table.down.sql b/vendor/github.com/rdallman/migrate/database/ql/migration/44_alter_table.down.sql new file mode 100644 index 000000000..72d18c554 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/ql/migration/44_alter_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS pets; \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/ql/migration/44_alter_table.up.sql b/vendor/github.com/rdallman/migrate/database/ql/migration/44_alter_table.up.sql new file mode 100644 index 000000000..3993698de --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/ql/migration/44_alter_table.up.sql @@ -0,0 +1 @@ +ALTER TABLE pets ADD predator bool;; \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/ql/ql.go b/vendor/github.com/rdallman/migrate/database/ql/ql.go new file mode 100644 index 000000000..46722a9c2 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/ql/ql.go @@ -0,0 +1,212 @@ +package ql + +import ( + "database/sql" + "fmt" + "io" + "io/ioutil" + "strings" + + nurl "net/url" + + _ "github.com/cznic/ql/driver" + "github.com/mattes/migrate" + "github.com/mattes/migrate/database" +) + +func init() { + database.Register("ql", &Ql{}) +} + +var DefaultMigrationsTable = "schema_migrations" +var ( + ErrDatabaseDirty = fmt.Errorf("database is dirty") + ErrNilConfig = fmt.Errorf("no config") + ErrNoDatabaseName = fmt.Errorf("no database name") + ErrAppendPEM = fmt.Errorf("failed to append PEM") +) + +type Config struct { + MigrationsTable string + DatabaseName string +} + +type Ql struct { + db *sql.DB + isLocked bool + + config *Config +} + +func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { + if config == nil { + return nil, ErrNilConfig + } + + if err := instance.Ping(); err != nil { + return nil, err + } + if len(config.MigrationsTable) == 0 { + config.MigrationsTable = DefaultMigrationsTable + } + + mx := &Ql{ + db: instance, + config: config, + } + if err := mx.ensureVersionTable(); err != nil { + return nil, err + } + return mx, nil +} +func (m *Ql) ensureVersionTable() error { + tx, err := m.db.Begin() + if err != nil { + return err + } + if _, err := tx.Exec(fmt.Sprintf(` + CREATE TABLE IF NOT EXISTS %s (version uint64,dirty bool); + CREATE UNIQUE INDEX IF NOT EXISTS version_unique ON %s (version); +`, m.config.MigrationsTable, m.config.MigrationsTable)); err != nil { + if err := tx.Rollback(); err != nil { + return err + } + return err + } + if err := tx.Commit(); err != nil { + return err + } + return nil +} + +func (m *Ql) Open(url string) (database.Driver, error) { + purl, err := nurl.Parse(url) + if err != nil { + return nil, err + } + dbfile := strings.Replace(migrate.FilterCustomQuery(purl).String(), "ql://", "", 1) + db, err := sql.Open("ql", dbfile) + if err != nil { + return nil, err + } + migrationsTable := purl.Query().Get("x-migrations-table") + if len(migrationsTable) == 0 { + migrationsTable = DefaultMigrationsTable + } + mx, err := WithInstance(db, &Config{ + DatabaseName: purl.Path, + MigrationsTable: migrationsTable, + }) + if err != nil { + return nil, err + } + return mx, nil +} +func (m *Ql) Close() error { + return m.db.Close() +} +func (m *Ql) Drop() error { + query := `SELECT Name FROM __Table` + tables, err := m.db.Query(query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + defer tables.Close() + tableNames := make([]string, 0) + for tables.Next() { + var tableName string + if err := tables.Scan(&tableName); err != nil { + return err + } + if len(tableName) > 0 { + if strings.HasPrefix(tableName, "__") == false { + tableNames = append(tableNames, tableName) + } + } + } + if len(tableNames) > 0 { + for _, t := range tableNames { + query := "DROP TABLE " + t + err = m.executeQuery(query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + if err := m.ensureVersionTable(); err != nil { + return err + } + } + + return nil +} +func (m *Ql) Lock() error { + if m.isLocked { + return database.ErrLocked + } + m.isLocked = true + return nil +} +func (m *Ql) Unlock() error { + if !m.isLocked { + return nil + } + m.isLocked = false + return nil +} +func (m *Ql) Run(migration io.Reader) error { + migr, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + query := string(migr[:]) + + return m.executeQuery(query) +} +func (m *Ql) executeQuery(query string) error { + tx, err := m.db.Begin() + if err != nil { + return &database.Error{OrigErr: err, Err: "transaction start failed"} + } + if _, err := tx.Exec(query); err != nil { + tx.Rollback() + return &database.Error{OrigErr: err, Query: []byte(query)} + } + if err := tx.Commit(); err != nil { + return &database.Error{OrigErr: err, Err: "transaction commit failed"} + } + return nil +} +func (m *Ql) SetVersion(version int, dirty bool) error { + tx, err := m.db.Begin() + if err != nil { + return &database.Error{OrigErr: err, Err: "transaction start failed"} + } + + query := "TRUNCATE TABLE " + m.config.MigrationsTable + if _, err := tx.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + if version >= 0 { + query := fmt.Sprintf(`INSERT INTO %s (version, dirty) VALUES (%d, %t)`, m.config.MigrationsTable, version, dirty) + if _, err := tx.Exec(query); err != nil { + tx.Rollback() + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + + if err := tx.Commit(); err != nil { + return &database.Error{OrigErr: err, Err: "transaction commit failed"} + } + + return nil +} + +func (m *Ql) Version() (version int, dirty bool, err error) { + query := "SELECT version, dirty FROM " + m.config.MigrationsTable + " LIMIT 1" + err = m.db.QueryRow(query).Scan(&version, &dirty) + if err != nil { + return database.NilVersion, false, nil + } + return version, dirty, nil +} diff --git a/vendor/github.com/rdallman/migrate/database/ql/ql_test.go b/vendor/github.com/rdallman/migrate/database/ql/ql_test.go new file mode 100644 index 000000000..f04383fa2 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/ql/ql_test.go @@ -0,0 +1,62 @@ +package ql + +import ( + "database/sql" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + _ "github.com/cznic/ql/driver" + "github.com/mattes/migrate" + dt "github.com/mattes/migrate/database/testing" + _ "github.com/mattes/migrate/source/file" +) + +func Test(t *testing.T) { + dir, err := ioutil.TempDir("", "ql-driver-test") + if err != nil { + return + } + defer func() { + os.RemoveAll(dir) + }() + fmt.Printf("DB path : %s\n", filepath.Join(dir, "ql.db")) + p := &Ql{} + addr := fmt.Sprintf("ql://%s", filepath.Join(dir, "ql.db")) + d, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + + db, err := sql.Open("ql", filepath.Join(dir, "ql.db")) + if err != nil { + return + } + defer func() { + if err := db.Close(); err != nil { + return + } + }() + dt.Test(t, d, []byte("CREATE TABLE t (Qty int, Name string);")) + driver, err := WithInstance(db, &Config{}) + if err != nil { + t.Fatalf("%v", err) + } + if err := d.Drop(); err != nil { + t.Fatal(err) + } + + m, err := migrate.NewWithDatabaseInstance( + "file://./migration", + "ql", driver) + if err != nil { + t.Fatalf("%v", err) + } + fmt.Println("UP") + err = m.Up() + if err != nil { + t.Fatalf("%v", err) + } +} diff --git a/vendor/github.com/rdallman/migrate/database/redshift/README.md b/vendor/github.com/rdallman/migrate/database/redshift/README.md new file mode 100644 index 000000000..a03d109ae --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/redshift/README.md @@ -0,0 +1,6 @@ +Redshift +=== + +This provides a Redshift driver for migrations. It is used whenever the URL of the database starts with `redshift://`. + +Redshift is PostgreSQL compatible but has some specific features (or lack thereof) that require slightly different behavior. diff --git a/vendor/github.com/rdallman/migrate/database/redshift/redshift.go b/vendor/github.com/rdallman/migrate/database/redshift/redshift.go new file mode 100644 index 000000000..99cdde725 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/redshift/redshift.go @@ -0,0 +1,46 @@ +package redshift + +import ( + "net/url" + + "github.com/mattes/migrate/database" + "github.com/mattes/migrate/database/postgres" +) + +// init registers the driver under the name 'redshift' +func init() { + db := new(Redshift) + db.Driver = new(postgres.Postgres) + + database.Register("redshift", db) +} + +// Redshift is a wrapper around the PostgreSQL driver which implements Redshift-specific behavior. +// +// Currently, the only different behaviour is the lack of locking in Redshift. The (Un)Lock() method(s) have been overridden from the PostgreSQL adapter to simply return nil. +type Redshift struct { + // The wrapped PostgreSQL driver. + database.Driver +} + +// Open implements the database.Driver interface by parsing the URL, switching the scheme from "redshift" to "postgres", and delegating to the underlying PostgreSQL driver. +func (driver *Redshift) Open(dsn string) (database.Driver, error) { + parsed, err := url.Parse(dsn) + if err != nil { + return nil, err + } + + parsed.Scheme = "postgres" + psql, err := driver.Driver.Open(parsed.String()) + if err != nil { + return nil, err + } + + return &Redshift{Driver: psql}, nil +} + +// Lock implements the database.Driver interface by not locking and returning nil. +func (driver *Redshift) Lock() error { return nil } + +// Unlock implements the database.Driver interface by not unlocking and returning nil. +func (driver *Redshift) Unlock() error { return nil } diff --git a/vendor/github.com/rdallman/migrate/database/shell/README.md b/vendor/github.com/rdallman/migrate/database/shell/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/rdallman/migrate/database/spanner/README.md b/vendor/github.com/rdallman/migrate/database/spanner/README.md new file mode 100644 index 000000000..0de867a8d --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/spanner/README.md @@ -0,0 +1,35 @@ +# Google Cloud Spanner + +## Usage + +The DSN must be given in the following format. + +`spanner://projects/{projectId}/instances/{instanceId}/databases/{databaseName}` + +See [Google Spanner Documentation](https://cloud.google.com/spanner/docs) for details. + + +| Param | WithInstance Config | Description | +| ----- | ------------------- | ----------- | +| `x-migrations-table` | `MigrationsTable` | Name of the migrations table | +| `url` | `DatabaseName` | The full path to the Spanner database resource. If provided as part of `Config` it must not contain a scheme or query string to match the format `projects/{projectId}/instances/{instanceId}/databases/{databaseName}`| +| `projectId` || The Google Cloud Platform project id +| `instanceId` || The id of the instance running Spanner +| `databaseName` || The name of the Spanner database + + +> **Note:** Google Cloud Spanner migrations can take a considerable amount of +> time. The migrations provided as part of the example take about 6 minutes to +> run on a small instance. +> +> ```log +> 1481574547/u create_users_table (21.354507597s) +> 1496539702/u add_city_to_users (41.647359754s) +> 1496601752/u add_index_on_user_emails (2m12.155787369s) +> 1496602638/u create_books_table (2m30.77299181s) + +## Testing + +To unit test the `spanner` driver, `SPANNER_DATABASE` needs to be set. You'll +need to sign-up to Google Cloud Platform (GCP) and have a running Spanner +instance since it is not possible to run Google Spanner outside GCP. \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1481574547_create_users_table.down.sql b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1481574547_create_users_table.down.sql new file mode 100644 index 000000000..7bd522c12 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1481574547_create_users_table.down.sql @@ -0,0 +1 @@ +DROP TABLE Users diff --git a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1481574547_create_users_table.up.sql b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1481574547_create_users_table.up.sql new file mode 100644 index 000000000..97b8bdb74 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1481574547_create_users_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE Users ( + UserId INT64, + Name STRING(40), + Email STRING(83) +) PRIMARY KEY(UserId) \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.down.sql b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.down.sql new file mode 100644 index 000000000..f0fcd0854 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.down.sql @@ -0,0 +1 @@ +ALTER TABLE Users DROP COLUMN city \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.up.sql b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.up.sql new file mode 100644 index 000000000..b2d6c02bf --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.up.sql @@ -0,0 +1 @@ +ALTER TABLE Users ADD COLUMN city STRING(100) \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.down.sql b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.down.sql new file mode 100644 index 000000000..29f92559d --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.down.sql @@ -0,0 +1 @@ +DROP INDEX UsersEmailIndex diff --git a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.up.sql b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.up.sql new file mode 100644 index 000000000..e77b7f2db --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.up.sql @@ -0,0 +1 @@ +CREATE UNIQUE INDEX UsersEmailIndex ON Users (Email) diff --git a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496602638_create_books_table.down.sql b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496602638_create_books_table.down.sql new file mode 100644 index 000000000..bd2ce054c --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496602638_create_books_table.down.sql @@ -0,0 +1 @@ +DROP TABLE Books \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496602638_create_books_table.up.sql b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496602638_create_books_table.up.sql new file mode 100644 index 000000000..0bfa0d484 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496602638_create_books_table.up.sql @@ -0,0 +1,6 @@ +CREATE TABLE Books ( + UserId INT64, + Name STRING(40), + Author STRING(40) +) PRIMARY KEY(UserId, Name), +INTERLEAVE IN PARENT Users ON DELETE CASCADE diff --git a/vendor/github.com/rdallman/migrate/database/spanner/spanner.go b/vendor/github.com/rdallman/migrate/database/spanner/spanner.go new file mode 100644 index 000000000..6c65bab3f --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/spanner/spanner.go @@ -0,0 +1,294 @@ +package spanner + +import ( + "fmt" + "io" + "io/ioutil" + "log" + nurl "net/url" + "regexp" + "strings" + + "golang.org/x/net/context" + + "cloud.google.com/go/spanner" + sdb "cloud.google.com/go/spanner/admin/database/apiv1" + + "github.com/mattes/migrate" + "github.com/mattes/migrate/database" + + "google.golang.org/api/iterator" + adminpb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" +) + +func init() { + db := Spanner{} + database.Register("spanner", &db) +} + +// DefaultMigrationsTable is used if no custom table is specified +const DefaultMigrationsTable = "SchemaMigrations" + +// Driver errors +var ( + ErrNilConfig = fmt.Errorf("no config") + ErrNoDatabaseName = fmt.Errorf("no database name") + ErrNoSchema = fmt.Errorf("no schema") + ErrDatabaseDirty = fmt.Errorf("database is dirty") +) + +// Config used for a Spanner instance +type Config struct { + MigrationsTable string + DatabaseName string +} + +// Spanner implements database.Driver for Google Cloud Spanner +type Spanner struct { + db *DB + + config *Config +} + +type DB struct { + admin *sdb.DatabaseAdminClient + data *spanner.Client +} + +// WithInstance implements database.Driver +func WithInstance(instance *DB, config *Config) (database.Driver, error) { + if config == nil { + return nil, ErrNilConfig + } + + if len(config.DatabaseName) == 0 { + return nil, ErrNoDatabaseName + } + + if len(config.MigrationsTable) == 0 { + config.MigrationsTable = DefaultMigrationsTable + } + + sx := &Spanner{ + db: instance, + config: config, + } + + if err := sx.ensureVersionTable(); err != nil { + return nil, err + } + + return sx, nil +} + +// Open implements database.Driver +func (s *Spanner) Open(url string) (database.Driver, error) { + purl, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + ctx := context.Background() + + adminClient, err := sdb.NewDatabaseAdminClient(ctx) + if err != nil { + return nil, err + } + dbname := strings.Replace(migrate.FilterCustomQuery(purl).String(), "spanner://", "", 1) + dataClient, err := spanner.NewClient(ctx, dbname) + if err != nil { + log.Fatal(err) + } + + migrationsTable := purl.Query().Get("x-migrations-table") + if len(migrationsTable) == 0 { + migrationsTable = DefaultMigrationsTable + } + + db := &DB{admin: adminClient, data: dataClient} + return WithInstance(db, &Config{ + DatabaseName: dbname, + MigrationsTable: migrationsTable, + }) +} + +// Close implements database.Driver +func (s *Spanner) Close() error { + s.db.data.Close() + return s.db.admin.Close() +} + +// Lock implements database.Driver but doesn't do anything because Spanner only +// enqueues the UpdateDatabaseDdlRequest. +func (s *Spanner) Lock() error { + return nil +} + +// Unlock implements database.Driver but no action required, see Lock. +func (s *Spanner) Unlock() error { + return nil +} + +// Run implements database.Driver +func (s *Spanner) Run(migration io.Reader) error { + migr, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + + // run migration + stmts := migrationStatements(migr) + ctx := context.Background() + + op, err := s.db.admin.UpdateDatabaseDdl(ctx, &adminpb.UpdateDatabaseDdlRequest{ + Database: s.config.DatabaseName, + Statements: stmts, + }) + + if err != nil { + return &database.Error{OrigErr: err, Err: "migration failed", Query: migr} + } + + if err := op.Wait(ctx); err != nil { + return &database.Error{OrigErr: err, Err: "migration failed", Query: migr} + } + + return nil +} + +// SetVersion implements database.Driver +func (s *Spanner) SetVersion(version int, dirty bool) error { + ctx := context.Background() + + _, err := s.db.data.ReadWriteTransaction(ctx, + func(ctx context.Context, txn *spanner.ReadWriteTransaction) error { + m := []*spanner.Mutation{ + spanner.Delete(s.config.MigrationsTable, spanner.AllKeys()), + spanner.Insert(s.config.MigrationsTable, + []string{"Version", "Dirty"}, + []interface{}{version, dirty}, + )} + return txn.BufferWrite(m) + }) + if err != nil { + return &database.Error{OrigErr: err} + } + + return nil +} + +// Version implements database.Driver +func (s *Spanner) Version() (version int, dirty bool, err error) { + ctx := context.Background() + + stmt := spanner.Statement{ + SQL: `SELECT Version, Dirty FROM ` + s.config.MigrationsTable + ` LIMIT 1`, + } + iter := s.db.data.Single().Query(ctx, stmt) + defer iter.Stop() + + row, err := iter.Next() + switch err { + case iterator.Done: + return database.NilVersion, false, nil + case nil: + var v int64 + if err = row.Columns(&v, &dirty); err != nil { + return 0, false, &database.Error{OrigErr: err, Query: []byte(stmt.SQL)} + } + version = int(v) + default: + return 0, false, &database.Error{OrigErr: err, Query: []byte(stmt.SQL)} + } + + return version, dirty, nil +} + +// Drop implements database.Driver. Retrieves the database schema first and +// creates statements to drop the indexes and tables accordingly. +// Note: The drop statements are created in reverse order to how they're +// provided in the schema. Assuming the schema describes how the database can +// be "build up", it seems logical to "unbuild" the database simply by going the +// opposite direction. More testing +func (s *Spanner) Drop() error { + ctx := context.Background() + res, err := s.db.admin.GetDatabaseDdl(ctx, &adminpb.GetDatabaseDdlRequest{ + Database: s.config.DatabaseName, + }) + if err != nil { + return &database.Error{OrigErr: err, Err: "drop failed"} + } + if len(res.Statements) == 0 { + return nil + } + + r := regexp.MustCompile(`(CREATE TABLE\s(\S+)\s)|(CREATE.+INDEX\s(\S+)\s)`) + stmts := make([]string, 0) + for i := len(res.Statements) - 1; i >= 0; i-- { + s := res.Statements[i] + m := r.FindSubmatch([]byte(s)) + + if len(m) == 0 { + continue + } else if tbl := m[2]; len(tbl) > 0 { + stmts = append(stmts, fmt.Sprintf(`DROP TABLE %s`, tbl)) + } else if idx := m[4]; len(idx) > 0 { + stmts = append(stmts, fmt.Sprintf(`DROP INDEX %s`, idx)) + } + } + + op, err := s.db.admin.UpdateDatabaseDdl(ctx, &adminpb.UpdateDatabaseDdlRequest{ + Database: s.config.DatabaseName, + Statements: stmts, + }) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(strings.Join(stmts, "; "))} + } + if err := op.Wait(ctx); err != nil { + return &database.Error{OrigErr: err, Query: []byte(strings.Join(stmts, "; "))} + } + + if err := s.ensureVersionTable(); err != nil { + return err + } + + return nil +} + +func (s *Spanner) ensureVersionTable() error { + ctx := context.Background() + tbl := s.config.MigrationsTable + iter := s.db.data.Single().Read(ctx, tbl, spanner.AllKeys(), []string{"Version"}) + if err := iter.Do(func(r *spanner.Row) error { return nil }); err == nil { + return nil + } + + stmt := fmt.Sprintf(`CREATE TABLE %s ( + Version INT64 NOT NULL, + Dirty BOOL NOT NULL + ) PRIMARY KEY(Version)`, tbl) + + op, err := s.db.admin.UpdateDatabaseDdl(ctx, &adminpb.UpdateDatabaseDdlRequest{ + Database: s.config.DatabaseName, + Statements: []string{stmt}, + }) + + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(stmt)} + } + if err := op.Wait(ctx); err != nil { + return &database.Error{OrigErr: err, Query: []byte(stmt)} + } + + return nil +} + +func migrationStatements(migration []byte) []string { + regex := regexp.MustCompile(";$") + migrationString := string(migration[:]) + migrationString = strings.TrimSpace(migrationString) + migrationString = regex.ReplaceAllString(migrationString, "") + + statements := strings.Split(migrationString, ";") + return statements +} diff --git a/vendor/github.com/rdallman/migrate/database/spanner/spanner_test.go b/vendor/github.com/rdallman/migrate/database/spanner/spanner_test.go new file mode 100644 index 000000000..43d475ca4 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/spanner/spanner_test.go @@ -0,0 +1,28 @@ +package spanner + +import ( + "fmt" + "os" + "testing" + + dt "github.com/mattes/migrate/database/testing" +) + +func Test(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + db, ok := os.LookupEnv("SPANNER_DATABASE") + if !ok { + t.Skip("SPANNER_DATABASE not set, skipping test.") + } + + s := &Spanner{} + addr := fmt.Sprintf("spanner://%v", db) + d, err := s.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + dt.Test(t, d, []byte("SELECT 1")) +} diff --git a/vendor/github.com/rdallman/migrate/database/sqlite3/README.md b/vendor/github.com/rdallman/migrate/database/sqlite3/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/rdallman/migrate/database/sqlite3/migration/33_create_table.down.sql b/vendor/github.com/rdallman/migrate/database/sqlite3/migration/33_create_table.down.sql new file mode 100644 index 000000000..72d18c554 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/sqlite3/migration/33_create_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS pets; \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/sqlite3/migration/33_create_table.up.sql b/vendor/github.com/rdallman/migrate/database/sqlite3/migration/33_create_table.up.sql new file mode 100644 index 000000000..5ad3404d1 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/sqlite3/migration/33_create_table.up.sql @@ -0,0 +1,3 @@ +CREATE TABLE pets ( + name string +); \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/sqlite3/migration/44_alter_table.down.sql b/vendor/github.com/rdallman/migrate/database/sqlite3/migration/44_alter_table.down.sql new file mode 100644 index 000000000..72d18c554 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/sqlite3/migration/44_alter_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS pets; \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/sqlite3/migration/44_alter_table.up.sql b/vendor/github.com/rdallman/migrate/database/sqlite3/migration/44_alter_table.up.sql new file mode 100644 index 000000000..f0682fcca --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/sqlite3/migration/44_alter_table.up.sql @@ -0,0 +1 @@ +ALTER TABLE pets ADD predator bool; diff --git a/vendor/github.com/rdallman/migrate/database/sqlite3/sqlite3.go b/vendor/github.com/rdallman/migrate/database/sqlite3/sqlite3.go new file mode 100644 index 000000000..bfd1a5b81 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/sqlite3/sqlite3.go @@ -0,0 +1,214 @@ +package sqlite3 + +import ( + "database/sql" + "fmt" + "github.com/mattes/migrate" + "github.com/mattes/migrate/database" + _ "github.com/mattn/go-sqlite3" + "io" + "io/ioutil" + nurl "net/url" + "strings" +) + +func init() { + database.Register("sqlite3", &Sqlite{}) +} + +var DefaultMigrationsTable = "schema_migrations" +var ( + ErrDatabaseDirty = fmt.Errorf("database is dirty") + ErrNilConfig = fmt.Errorf("no config") + ErrNoDatabaseName = fmt.Errorf("no database name") +) + +type Config struct { + MigrationsTable string + DatabaseName string +} + +type Sqlite struct { + db *sql.DB + isLocked bool + + config *Config +} + +func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { + if config == nil { + return nil, ErrNilConfig + } + + if err := instance.Ping(); err != nil { + return nil, err + } + if len(config.MigrationsTable) == 0 { + config.MigrationsTable = DefaultMigrationsTable + } + + mx := &Sqlite{ + db: instance, + config: config, + } + if err := mx.ensureVersionTable(); err != nil { + return nil, err + } + return mx, nil +} + +func (m *Sqlite) ensureVersionTable() error { + + query := fmt.Sprintf(` + CREATE TABLE IF NOT EXISTS %s (version uint64,dirty bool); + CREATE UNIQUE INDEX IF NOT EXISTS version_unique ON %s (version); + `, DefaultMigrationsTable, DefaultMigrationsTable) + + if _, err := m.db.Exec(query); err != nil { + return err + } + return nil +} + +func (m *Sqlite) Open(url string) (database.Driver, error) { + purl, err := nurl.Parse(url) + if err != nil { + return nil, err + } + dbfile := strings.Replace(migrate.FilterCustomQuery(purl).String(), "sqlite3://", "", 1) + db, err := sql.Open("sqlite3", dbfile) + if err != nil { + return nil, err + } + + migrationsTable := purl.Query().Get("x-migrations-table") + if len(migrationsTable) == 0 { + migrationsTable = DefaultMigrationsTable + } + mx, err := WithInstance(db, &Config{ + DatabaseName: purl.Path, + MigrationsTable: migrationsTable, + }) + if err != nil { + return nil, err + } + return mx, nil +} + +func (m *Sqlite) Close() error { + return m.db.Close() +} + +func (m *Sqlite) Drop() error { + query := `SELECT name FROM sqlite_master WHERE type = 'table';` + tables, err := m.db.Query(query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + defer tables.Close() + tableNames := make([]string, 0) + for tables.Next() { + var tableName string + if err := tables.Scan(&tableName); err != nil { + return err + } + if len(tableName) > 0 { + tableNames = append(tableNames, tableName) + } + } + if len(tableNames) > 0 { + for _, t := range tableNames { + query := "DROP TABLE " + t + err = m.executeQuery(query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + if err := m.ensureVersionTable(); err != nil { + return err + } + query := "VACUUM" + _, err = m.db.Query(query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + + return nil +} + +func (m *Sqlite) Lock() error { + if m.isLocked { + return database.ErrLocked + } + m.isLocked = true + return nil +} + +func (m *Sqlite) Unlock() error { + if !m.isLocked { + return nil + } + m.isLocked = false + return nil +} + +func (m *Sqlite) Run(migration io.Reader) error { + migr, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + query := string(migr[:]) + + return m.executeQuery(query) +} + +func (m *Sqlite) executeQuery(query string) error { + tx, err := m.db.Begin() + if err != nil { + return &database.Error{OrigErr: err, Err: "transaction start failed"} + } + if _, err := tx.Exec(query); err != nil { + tx.Rollback() + return &database.Error{OrigErr: err, Query: []byte(query)} + } + if err := tx.Commit(); err != nil { + return &database.Error{OrigErr: err, Err: "transaction commit failed"} + } + return nil +} + +func (m *Sqlite) SetVersion(version int, dirty bool) error { + tx, err := m.db.Begin() + if err != nil { + return &database.Error{OrigErr: err, Err: "transaction start failed"} + } + + query := "DELETE FROM " + m.config.MigrationsTable + if _, err := tx.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + if version >= 0 { + query := fmt.Sprintf(`INSERT INTO %s (version, dirty) VALUES (%d, '%t')`, m.config.MigrationsTable, version, dirty) + if _, err := tx.Exec(query); err != nil { + tx.Rollback() + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + + if err := tx.Commit(); err != nil { + return &database.Error{OrigErr: err, Err: "transaction commit failed"} + } + + return nil +} + +func (m *Sqlite) Version() (version int, dirty bool, err error) { + query := "SELECT version, dirty FROM " + m.config.MigrationsTable + " LIMIT 1" + err = m.db.QueryRow(query).Scan(&version, &dirty) + if err != nil { + return database.NilVersion, false, nil + } + return version, dirty, nil +} diff --git a/vendor/github.com/rdallman/migrate/database/sqlite3/sqlite3_test.go b/vendor/github.com/rdallman/migrate/database/sqlite3/sqlite3_test.go new file mode 100644 index 000000000..6a5c5c864 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/sqlite3/sqlite3_test.go @@ -0,0 +1,61 @@ +package sqlite3 + +import ( + "database/sql" + "fmt" + "github.com/mattes/migrate" + dt "github.com/mattes/migrate/database/testing" + _ "github.com/mattes/migrate/source/file" + _ "github.com/mattn/go-sqlite3" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func Test(t *testing.T) { + dir, err := ioutil.TempDir("", "sqlite3-driver-test") + if err != nil { + return + } + defer func() { + os.RemoveAll(dir) + }() + fmt.Printf("DB path : %s\n", filepath.Join(dir, "sqlite3.db")) + p := &Sqlite{} + addr := fmt.Sprintf("sqlite3://%s", filepath.Join(dir, "sqlite3.db")) + d, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + + db, err := sql.Open("sqlite3", filepath.Join(dir, "sqlite3.db")) + if err != nil { + return + } + defer func() { + if err := db.Close(); err != nil { + return + } + }() + dt.Test(t, d, []byte("CREATE TABLE t (Qty int, Name string);")) + driver, err := WithInstance(db, &Config{}) + if err != nil { + t.Fatalf("%v", err) + } + if err := d.Drop(); err != nil { + t.Fatal(err) + } + + m, err := migrate.NewWithDatabaseInstance( + "file://./migration", + "ql", driver) + if err != nil { + t.Fatalf("%v", err) + } + fmt.Println("UP") + err = m.Up() + if err != nil { + t.Fatalf("%v", err) + } +} diff --git a/vendor/github.com/rdallman/migrate/database/stub/stub.go b/vendor/github.com/rdallman/migrate/database/stub/stub.go new file mode 100644 index 000000000..172bcd37b --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/stub/stub.go @@ -0,0 +1,95 @@ +package stub + +import ( + "io" + "io/ioutil" + "reflect" + + "github.com/mattes/migrate/database" +) + +func init() { + database.Register("stub", &Stub{}) +} + +type Stub struct { + Url string + Instance interface{} + CurrentVersion int + MigrationSequence []string + LastRunMigration []byte // todo: make []string + IsDirty bool + IsLocked bool + + Config *Config +} + +func (s *Stub) Open(url string) (database.Driver, error) { + return &Stub{ + Url: url, + CurrentVersion: -1, + MigrationSequence: make([]string, 0), + Config: &Config{}, + }, nil +} + +type Config struct{} + +func WithInstance(instance interface{}, config *Config) (database.Driver, error) { + return &Stub{ + Instance: instance, + CurrentVersion: -1, + MigrationSequence: make([]string, 0), + Config: config, + }, nil +} + +func (s *Stub) Close() error { + return nil +} + +func (s *Stub) Lock() error { + if s.IsLocked { + return database.ErrLocked + } + s.IsLocked = true + return nil +} + +func (s *Stub) Unlock() error { + s.IsLocked = false + return nil +} + +func (s *Stub) Run(migration io.Reader) error { + m, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + s.LastRunMigration = m + s.MigrationSequence = append(s.MigrationSequence, string(m[:])) + return nil +} + +func (s *Stub) SetVersion(version int, state bool) error { + s.CurrentVersion = version + s.IsDirty = state + return nil +} + +func (s *Stub) Version() (version int, dirty bool, err error) { + return s.CurrentVersion, s.IsDirty, nil +} + +const DROP = "DROP" + +func (s *Stub) Drop() error { + s.CurrentVersion = -1 + s.LastRunMigration = nil + s.MigrationSequence = append(s.MigrationSequence, DROP) + return nil +} + +func (s *Stub) EqualSequence(seq []string) bool { + return reflect.DeepEqual(seq, s.MigrationSequence) +} diff --git a/vendor/github.com/rdallman/migrate/database/stub/stub_test.go b/vendor/github.com/rdallman/migrate/database/stub/stub_test.go new file mode 100644 index 000000000..3d8b8926c --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/stub/stub_test.go @@ -0,0 +1,16 @@ +package stub + +import ( + "testing" + + dt "github.com/mattes/migrate/database/testing" +) + +func Test(t *testing.T) { + s := &Stub{} + d, err := s.Open("") + if err != nil { + t.Fatal(err) + } + dt.Test(t, d, []byte("/* foobar migration */")) +} diff --git a/vendor/github.com/rdallman/migrate/database/testing/testing.go b/vendor/github.com/rdallman/migrate/database/testing/testing.go new file mode 100644 index 000000000..4ab090d1a --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/testing/testing.go @@ -0,0 +1,138 @@ +// Package testing has the database tests. +// All database drivers must pass the Test function. +// This lives in it's own package so it stays a test dependency. +package testing + +import ( + "bytes" + "fmt" + "io" + "testing" + "time" + + "github.com/mattes/migrate/database" +) + +// Test runs tests against database implementations. +func Test(t *testing.T, d database.Driver, migration []byte) { + if migration == nil { + panic("test must provide migration reader") + } + + TestNilVersion(t, d) // test first + TestLockAndUnlock(t, d) + TestRun(t, d, bytes.NewReader(migration)) + TestDrop(t, d) + TestSetVersion(t, d) // also tests Version() +} + +func TestNilVersion(t *testing.T, d database.Driver) { + v, _, err := d.Version() + if err != nil { + t.Fatal(err) + } + if v != database.NilVersion { + t.Fatalf("Version: expected version to be NilVersion (-1), got %v", v) + } +} + +func TestLockAndUnlock(t *testing.T, d database.Driver) { + // add a timeout, in case there is a deadlock + done := make(chan bool, 1) + go func() { + timeout := time.After(15 * time.Second) + for { + select { + case <-done: + return + case <-timeout: + panic(fmt.Sprintf("Timeout after 15 seconds. Looks like a deadlock in Lock/UnLock.\n%#v", d)) + } + } + }() + defer func() { + done <- true + }() + + // run the locking test ... + + if err := d.Lock(); err != nil { + t.Fatal(err) + } + + // try to acquire lock again + if err := d.Lock(); err == nil { + t.Fatal("Lock: expected err not to be nil") + } + + // unlock + if err := d.Unlock(); err != nil { + t.Fatal(err) + } + + // try to lock + if err := d.Lock(); err != nil { + t.Fatal(err) + } + if err := d.Unlock(); err != nil { + t.Fatal(err) + } +} + +func TestRun(t *testing.T, d database.Driver, migration io.Reader) { + if migration == nil { + panic("migration can't be nil") + } + + if err := d.Run(migration); err != nil { + t.Fatal(err) + } +} + +func TestDrop(t *testing.T, d database.Driver) { + if err := d.Drop(); err != nil { + t.Fatal(err) + } +} + +func TestSetVersion(t *testing.T, d database.Driver) { + if err := d.SetVersion(1, true); err != nil { + t.Fatal(err) + } + + // call again + if err := d.SetVersion(1, true); err != nil { + t.Fatal(err) + } + + v, dirty, err := d.Version() + if err != nil { + t.Fatal(err) + } + if !dirty { + t.Fatal("expected dirty") + } + if v != 1 { + t.Fatal("expected version to be 1") + } + + if err := d.SetVersion(2, false); err != nil { + t.Fatal(err) + } + + // call again + if err := d.SetVersion(2, false); err != nil { + t.Fatal(err) + } + + v, dirty, err = d.Version() + if err != nil { + t.Fatal(err) + } + if dirty { + t.Fatal("expected not dirty") + } + if v != 2 { + t.Fatal("expected version to be 2") + } +} diff --git a/vendor/github.com/rdallman/migrate/database/util.go b/vendor/github.com/rdallman/migrate/database/util.go new file mode 100644 index 000000000..c636a7abe --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/util.go @@ -0,0 +1,15 @@ +package database + +import ( + "fmt" + "hash/crc32" +) + +const advisoryLockIdSalt uint = 1486364155 + +// inspired by rails migrations, see https://goo.gl/8o9bCT +func GenerateAdvisoryLockId(databaseName string) (string, error) { + sum := crc32.ChecksumIEEE([]byte(databaseName)) + sum = sum * uint32(advisoryLockIdSalt) + return fmt.Sprintf("%v", sum), nil +} diff --git a/vendor/github.com/rdallman/migrate/database/util_test.go b/vendor/github.com/rdallman/migrate/database/util_test.go new file mode 100644 index 000000000..905c840b9 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/database/util_test.go @@ -0,0 +1,12 @@ +package database + +func TestGenerateAdvisoryLockId(t *testing.T) { + id, err := p.generateAdvisoryLockId("database_name") + if err != nil { + t.Errorf("expected err to be nil, got %v", err) + } + if len(id) == 0 { + t.Errorf("expected generated id not to be empty") + } + t.Logf("generated id: %v", id) +} diff --git a/vendor/github.com/rdallman/migrate/log.go b/vendor/github.com/rdallman/migrate/log.go new file mode 100644 index 000000000..cb00b7798 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/log.go @@ -0,0 +1,12 @@ +package migrate + +// Logger is an interface so you can pass in your own +// logging implementation. +type Logger interface { + + // Printf is like fmt.Printf + Printf(format string, v ...interface{}) + + // Verbose should return true when verbose logging output is wanted + Verbose() bool +} diff --git a/vendor/github.com/rdallman/migrate/migrate.go b/vendor/github.com/rdallman/migrate/migrate.go new file mode 100644 index 000000000..58414e8fc --- /dev/null +++ b/vendor/github.com/rdallman/migrate/migrate.go @@ -0,0 +1,920 @@ +// Package migrate reads migrations from sources and runs them against databases. +// Sources are defined by the `source.Driver` and databases by the `database.Driver` +// interface. The driver interfaces are kept "dump", all migration logic is kept +// in this package. +package migrate + +import ( + "fmt" + "os" + "sync" + "time" + + "github.com/mattes/migrate/database" + "github.com/mattes/migrate/source" +) + +// DefaultPrefetchMigrations sets the number of migrations to pre-read +// from the source. This is helpful if the source is remote, but has little +// effect for a local source (i.e. file system). +// Please note that this setting has a major impact on the memory usage, +// since each pre-read migration is buffered in memory. See DefaultBufferSize. +var DefaultPrefetchMigrations = uint(10) + +// DefaultLockTimeout sets the max time a database driver has to acquire a lock. +var DefaultLockTimeout = 15 * time.Second + +var ( + ErrNoChange = fmt.Errorf("no change") + ErrNilVersion = fmt.Errorf("no migration") + ErrLocked = fmt.Errorf("database locked") + ErrLockTimeout = fmt.Errorf("timeout: can't acquire database lock") +) + +// ErrShortLimit is an error returned when not enough migrations +// can be returned by a source for a given limit. +type ErrShortLimit struct { + Short uint +} + +// Error implements the error interface. +func (e ErrShortLimit) Error() string { + return fmt.Sprintf("limit %v short", e.Short) +} + +type ErrDirty struct { + Version int +} + +func (e ErrDirty) Error() string { + return fmt.Sprintf("Dirty database version %v. Fix and force version.", e.Version) +} + +type Migrate struct { + sourceName string + sourceDrv source.Driver + databaseName string + databaseDrv database.Driver + + // Log accepts a Logger interface + Log Logger + + // GracefulStop accepts `true` and will stop executing migrations + // as soon as possible at a safe break point, so that the database + // is not corrupted. + GracefulStop chan bool + isGracefulStop bool + + isLockedMu *sync.Mutex + isLocked bool + + // PrefetchMigrations defaults to DefaultPrefetchMigrations, + // but can be set per Migrate instance. + PrefetchMigrations uint + + // LockTimeout defaults to DefaultLockTimeout, + // but can be set per Migrate instance. + LockTimeout time.Duration +} + +// New returns a new Migrate instance from a source URL and a database URL. +// The URL scheme is defined by each driver. +func New(sourceUrl, databaseUrl string) (*Migrate, error) { + m := newCommon() + + sourceName, err := schemeFromUrl(sourceUrl) + if err != nil { + return nil, err + } + m.sourceName = sourceName + + databaseName, err := schemeFromUrl(databaseUrl) + if err != nil { + return nil, err + } + m.databaseName = databaseName + + sourceDrv, err := source.Open(sourceUrl) + if err != nil { + return nil, err + } + m.sourceDrv = sourceDrv + + databaseDrv, err := database.Open(databaseUrl) + if err != nil { + return nil, err + } + m.databaseDrv = databaseDrv + + return m, nil +} + +// NewWithDatabaseInstance returns a new Migrate instance from a source URL +// and an existing database instance. The source URL scheme is defined by each driver. +// Use any string that can serve as an identifier during logging as databaseName. +// You are responsible for closing the underlying database client if necessary. +func NewWithDatabaseInstance(sourceUrl string, databaseName string, databaseInstance database.Driver) (*Migrate, error) { + m := newCommon() + + sourceName, err := schemeFromUrl(sourceUrl) + if err != nil { + return nil, err + } + m.sourceName = sourceName + + m.databaseName = databaseName + + sourceDrv, err := source.Open(sourceUrl) + if err != nil { + return nil, err + } + m.sourceDrv = sourceDrv + + m.databaseDrv = databaseInstance + + return m, nil +} + +// NewWithSourceInstance returns a new Migrate instance from an existing source instance +// and a database URL. The database URL scheme is defined by each driver. +// Use any string that can serve as an identifier during logging as sourceName. +// You are responsible for closing the underlying source client if necessary. +func NewWithSourceInstance(sourceName string, sourceInstance source.Driver, databaseUrl string) (*Migrate, error) { + m := newCommon() + + databaseName, err := schemeFromUrl(databaseUrl) + if err != nil { + return nil, err + } + m.databaseName = databaseName + + m.sourceName = sourceName + + databaseDrv, err := database.Open(databaseUrl) + if err != nil { + return nil, err + } + m.databaseDrv = databaseDrv + + m.sourceDrv = sourceInstance + + return m, nil +} + +// NewWithInstance returns a new Migrate instance from an existing source and +// database instance. Use any string that can serve as an identifier during logging +// as sourceName and databaseName. You are responsible for closing down +// the underlying source and database client if necessary. +func NewWithInstance(sourceName string, sourceInstance source.Driver, databaseName string, databaseInstance database.Driver) (*Migrate, error) { + m := newCommon() + + m.sourceName = sourceName + m.databaseName = databaseName + + m.sourceDrv = sourceInstance + m.databaseDrv = databaseInstance + + return m, nil +} + +func newCommon() *Migrate { + return &Migrate{ + GracefulStop: make(chan bool, 1), + PrefetchMigrations: DefaultPrefetchMigrations, + LockTimeout: DefaultLockTimeout, + isLockedMu: &sync.Mutex{}, + } +} + +// Close closes the the source and the database. +func (m *Migrate) Close() (source error, database error) { + databaseSrvClose := make(chan error) + sourceSrvClose := make(chan error) + + m.logVerbosePrintf("Closing source and database\n") + + go func() { + databaseSrvClose <- m.databaseDrv.Close() + }() + + go func() { + sourceSrvClose <- m.sourceDrv.Close() + }() + + return <-sourceSrvClose, <-databaseSrvClose +} + +// Migrate looks at the currently active migration version, +// then migrates either up or down to the specified version. +func (m *Migrate) Migrate(version uint) error { + if err := m.lock(); err != nil { + return err + } + + curVersion, dirty, err := m.databaseDrv.Version() + if err != nil { + return m.unlockErr(err) + } + + if dirty { + return m.unlockErr(ErrDirty{curVersion}) + } + + ret := make(chan interface{}, m.PrefetchMigrations) + go m.read(curVersion, int(version), ret) + + return m.unlockErr(m.runMigrations(ret)) +} + +// Steps looks at the currently active migration version. +// It will migrate up if n > 0, and down if n < 0. +func (m *Migrate) Steps(n int) error { + if n == 0 { + return ErrNoChange + } + + if err := m.lock(); err != nil { + return err + } + + curVersion, dirty, err := m.databaseDrv.Version() + if err != nil { + return m.unlockErr(err) + } + + if dirty { + return m.unlockErr(ErrDirty{curVersion}) + } + + ret := make(chan interface{}, m.PrefetchMigrations) + + if n > 0 { + go m.readUp(curVersion, n, ret) + } else { + go m.readDown(curVersion, -n, ret) + } + + return m.unlockErr(m.runMigrations(ret)) +} + +// Up looks at the currently active migration version +// and will migrate all the way up (applying all up migrations). +func (m *Migrate) Up() error { + if err := m.lock(); err != nil { + return err + } + + curVersion, dirty, err := m.databaseDrv.Version() + if err != nil { + return m.unlockErr(err) + } + + if dirty { + return m.unlockErr(ErrDirty{curVersion}) + } + + ret := make(chan interface{}, m.PrefetchMigrations) + + go m.readUp(curVersion, -1, ret) + return m.unlockErr(m.runMigrations(ret)) +} + +// Down looks at the currently active migration version +// and will migrate all the way down (applying all down migrations). +func (m *Migrate) Down() error { + if err := m.lock(); err != nil { + return err + } + + curVersion, dirty, err := m.databaseDrv.Version() + if err != nil { + return m.unlockErr(err) + } + + if dirty { + return m.unlockErr(ErrDirty{curVersion}) + } + + ret := make(chan interface{}, m.PrefetchMigrations) + go m.readDown(curVersion, -1, ret) + return m.unlockErr(m.runMigrations(ret)) +} + +// Drop deletes everything in the database. +func (m *Migrate) Drop() error { + if err := m.lock(); err != nil { + return err + } + if err := m.databaseDrv.Drop(); err != nil { + return m.unlockErr(err) + } + return m.unlock() +} + +// Run runs any migration provided by you against the database. +// It does not check any currently active version in database. +// Usually you don't need this function at all. Use Migrate, +// Steps, Up or Down instead. +func (m *Migrate) Run(migration ...*Migration) error { + if len(migration) == 0 { + return ErrNoChange + } + + if err := m.lock(); err != nil { + return err + } + + curVersion, dirty, err := m.databaseDrv.Version() + if err != nil { + return m.unlockErr(err) + } + + if dirty { + return m.unlockErr(ErrDirty{curVersion}) + } + + ret := make(chan interface{}, m.PrefetchMigrations) + + go func() { + defer close(ret) + for _, migr := range migration { + if m.PrefetchMigrations > 0 && migr.Body != nil { + m.logVerbosePrintf("Start buffering %v\n", migr.LogString()) + } else { + m.logVerbosePrintf("Scheduled %v\n", migr.LogString()) + } + + ret <- migr + go migr.Buffer() + } + }() + + return m.unlockErr(m.runMigrations(ret)) +} + +// Force sets a migration version. +// It does not check any currently active version in database. +// It resets the dirty state to false. +func (m *Migrate) Force(version int) error { + if version < -1 { + panic("version must be >= -1") + } + + if err := m.lock(); err != nil { + return err + } + + if err := m.databaseDrv.SetVersion(version, false); err != nil { + return m.unlockErr(err) + } + + return m.unlock() +} + +// Version returns the currently active migration version. +// If no migration has been applied, yet, it will return ErrNilVersion. +func (m *Migrate) Version() (version uint, dirty bool, err error) { + v, d, err := m.databaseDrv.Version() + if err != nil { + return 0, false, err + } + + if v == database.NilVersion { + return 0, false, ErrNilVersion + } + + return suint(v), d, nil +} + +// read reads either up or down migrations from source `from` to `to`. +// Each migration is then written to the ret channel. +// If an error occurs during reading, that error is written to the ret channel, too. +// Once read is done reading it will close the ret channel. +func (m *Migrate) read(from int, to int, ret chan<- interface{}) { + defer close(ret) + + // check if from version exists + if from >= 0 { + if m.versionExists(suint(from)) != nil { + ret <- os.ErrNotExist + return + } + } + + // check if to version exists + if to >= 0 { + if m.versionExists(suint(to)) != nil { + ret <- os.ErrNotExist + return + } + } + + // no change? + if from == to { + ret <- ErrNoChange + return + } + + if from < to { + // it's going up + // apply first migration if from is nil version + if from == -1 { + firstVersion, err := m.sourceDrv.First() + if err != nil { + ret <- err + return + } + + migr, err := m.newMigration(firstVersion, int(firstVersion)) + if err != nil { + ret <- err + return + } + + ret <- migr + go migr.Buffer() + from = int(firstVersion) + } + + // run until we reach target ... + for from < to { + if m.stop() { + return + } + + next, err := m.sourceDrv.Next(suint(from)) + if err != nil { + ret <- err + return + } + + migr, err := m.newMigration(next, int(next)) + if err != nil { + ret <- err + return + } + + ret <- migr + go migr.Buffer() + from = int(next) + } + + } else { + // it's going down + // run until we reach target ... + for from > to && from >= 0 { + if m.stop() { + return + } + + prev, err := m.sourceDrv.Prev(suint(from)) + if os.IsNotExist(err) && to == -1 { + // apply nil migration + migr, err := m.newMigration(suint(from), -1) + if err != nil { + ret <- err + return + } + ret <- migr + go migr.Buffer() + return + + } else if err != nil { + ret <- err + return + } + + migr, err := m.newMigration(suint(from), int(prev)) + if err != nil { + ret <- err + return + } + + ret <- migr + go migr.Buffer() + from = int(prev) + } + } +} + +// readUp reads up migrations from `from` limitted by `limit`. +// limit can be -1, implying no limit and reading until there are no more migrations. +// Each migration is then written to the ret channel. +// If an error occurs during reading, that error is written to the ret channel, too. +// Once readUp is done reading it will close the ret channel. +func (m *Migrate) readUp(from int, limit int, ret chan<- interface{}) { + defer close(ret) + + // check if from version exists + if from >= 0 { + if m.versionExists(suint(from)) != nil { + ret <- os.ErrNotExist + return + } + } + + if limit == 0 { + ret <- ErrNoChange + return + } + + count := 0 + for count < limit || limit == -1 { + if m.stop() { + return + } + + // apply first migration if from is nil version + if from == -1 { + firstVersion, err := m.sourceDrv.First() + if err != nil { + ret <- err + return + } + + migr, err := m.newMigration(firstVersion, int(firstVersion)) + if err != nil { + ret <- err + return + } + + ret <- migr + go migr.Buffer() + from = int(firstVersion) + count++ + continue + } + + // apply next migration + next, err := m.sourceDrv.Next(suint(from)) + if os.IsNotExist(err) { + // no limit, but no migrations applied? + if limit == -1 && count == 0 { + ret <- ErrNoChange + return + } + + // no limit, reached end + if limit == -1 { + return + } + + // reached end, and didn't apply any migrations + if limit > 0 && count == 0 { + ret <- os.ErrNotExist + return + } + + // applied less migrations than limit? + if count < limit { + ret <- ErrShortLimit{suint(limit - count)} + return + } + } + if err != nil { + ret <- err + return + } + + migr, err := m.newMigration(next, int(next)) + if err != nil { + ret <- err + return + } + + ret <- migr + go migr.Buffer() + from = int(next) + count++ + } +} + +// readDown reads down migrations from `from` limitted by `limit`. +// limit can be -1, implying no limit and reading until there are no more migrations. +// Each migration is then written to the ret channel. +// If an error occurs during reading, that error is written to the ret channel, too. +// Once readDown is done reading it will close the ret channel. +func (m *Migrate) readDown(from int, limit int, ret chan<- interface{}) { + defer close(ret) + + // check if from version exists + if from >= 0 { + if m.versionExists(suint(from)) != nil { + ret <- os.ErrNotExist + return + } + } + + if limit == 0 { + ret <- ErrNoChange + return + } + + // no change if already at nil version + if from == -1 && limit == -1 { + ret <- ErrNoChange + return + } + + // can't go over limit if already at nil version + if from == -1 && limit > 0 { + ret <- os.ErrNotExist + return + } + + count := 0 + for count < limit || limit == -1 { + if m.stop() { + return + } + + prev, err := m.sourceDrv.Prev(suint(from)) + if os.IsNotExist(err) { + // no limit or haven't reached limit, apply "first" migration + if limit == -1 || limit-count > 0 { + firstVersion, err := m.sourceDrv.First() + if err != nil { + ret <- err + return + } + + migr, err := m.newMigration(firstVersion, -1) + if err != nil { + ret <- err + return + } + ret <- migr + go migr.Buffer() + count++ + } + + if count < limit { + ret <- ErrShortLimit{suint(limit - count)} + } + return + } + if err != nil { + ret <- err + return + } + + migr, err := m.newMigration(suint(from), int(prev)) + if err != nil { + ret <- err + return + } + + ret <- migr + go migr.Buffer() + from = int(prev) + count++ + } +} + +// runMigrations reads *Migration and error from a channel. Any other type +// sent on this channel will result in a panic. Each migration is then +// proxied to the database driver and run against the database. +// Before running a newly received migration it will check if it's supposed +// to stop execution because it might have received a stop signal on the +// GracefulStop channel. +func (m *Migrate) runMigrations(ret <-chan interface{}) error { + for r := range ret { + + if m.stop() { + return nil + } + + switch r.(type) { + case error: + return r.(error) + + case *Migration: + migr := r.(*Migration) + + // set version with dirty state + if err := m.databaseDrv.SetVersion(migr.TargetVersion, true); err != nil { + return err + } + + if migr.Body != nil { + m.logVerbosePrintf("Read and execute %v\n", migr.LogString()) + if err := m.databaseDrv.Run(migr.BufferedBody); err != nil { + return err + } + } + + // set clean state + if err := m.databaseDrv.SetVersion(migr.TargetVersion, false); err != nil { + return err + } + + endTime := time.Now() + readTime := migr.FinishedReading.Sub(migr.StartedBuffering) + runTime := endTime.Sub(migr.FinishedReading) + + // log either verbose or normal + if m.Log != nil { + if m.Log.Verbose() { + m.logPrintf("Finished %v (read %v, ran %v)\n", migr.LogString(), readTime, runTime) + } else { + m.logPrintf("%v (%v)\n", migr.LogString(), readTime+runTime) + } + } + + default: + panic("unknown type") + } + } + return nil +} + +// versionExists checks the source if either the up or down migration for +// the specified migration version exists. +func (m *Migrate) versionExists(version uint) error { + // try up migration first + up, _, err := m.sourceDrv.ReadUp(version) + if err == nil { + defer up.Close() + } + if os.IsExist(err) { + return nil + } else if !os.IsNotExist(err) { + return err + } + + // then try down migration + down, _, err := m.sourceDrv.ReadDown(version) + if err == nil { + defer down.Close() + } + if os.IsExist(err) { + return nil + } else if !os.IsNotExist(err) { + return err + } + + return os.ErrNotExist +} + +// stop returns true if no more migrations should be run against the database +// because a stop signal was received on the GracefulStop channel. +// Calls are cheap and this function is not blocking. +func (m *Migrate) stop() bool { + if m.isGracefulStop { + return true + } + + select { + case <-m.GracefulStop: + m.isGracefulStop = true + return true + + default: + return false + } +} + +// newMigration is a helper func that returns a *Migration for the +// specified version and targetVersion. +func (m *Migrate) newMigration(version uint, targetVersion int) (*Migration, error) { + var migr *Migration + + if targetVersion >= int(version) { + r, identifier, err := m.sourceDrv.ReadUp(version) + if os.IsNotExist(err) { + // create "empty" migration + migr, err = NewMigration(nil, "", version, targetVersion) + if err != nil { + return nil, err + } + + } else if err != nil { + return nil, err + + } else { + // create migration from up source + migr, err = NewMigration(r, identifier, version, targetVersion) + if err != nil { + return nil, err + } + } + + } else { + r, identifier, err := m.sourceDrv.ReadDown(version) + if os.IsNotExist(err) { + // create "empty" migration + migr, err = NewMigration(nil, "", version, targetVersion) + if err != nil { + return nil, err + } + + } else if err != nil { + return nil, err + + } else { + // create migration from down source + migr, err = NewMigration(r, identifier, version, targetVersion) + if err != nil { + return nil, err + } + } + } + + if m.PrefetchMigrations > 0 && migr.Body != nil { + m.logVerbosePrintf("Start buffering %v\n", migr.LogString()) + } else { + m.logVerbosePrintf("Scheduled %v\n", migr.LogString()) + } + + return migr, nil +} + +// lock is a thread safe helper function to lock the database. +// It should be called as late as possible when running migrations. +func (m *Migrate) lock() error { + m.isLockedMu.Lock() + defer m.isLockedMu.Unlock() + + if m.isLocked { + return ErrLocked + } + + // create done channel, used in the timeout goroutine + done := make(chan bool, 1) + defer func() { + done <- true + }() + + // use errchan to signal error back to this context + errchan := make(chan error, 2) + + // start timeout goroutine + timeout := time.After(m.LockTimeout) + go func() { + for { + select { + case <-done: + return + case <-timeout: + errchan <- ErrLockTimeout + return + } + } + }() + + // now try to acquire the lock + go func() { + if err := m.databaseDrv.Lock(); err != nil { + errchan <- err + } else { + errchan <- nil + } + return + }() + + // wait until we either recieve ErrLockTimeout or error from Lock operation + err := <-errchan + if err == nil { + m.isLocked = true + } + return err +} + +// unlock is a thread safe helper function to unlock the database. +// It should be called as early as possible when no more migrations are +// expected to be executed. +func (m *Migrate) unlock() error { + m.isLockedMu.Lock() + defer m.isLockedMu.Unlock() + + if err := m.databaseDrv.Unlock(); err != nil { + // BUG: Can potentially create a deadlock. Add a timeout. + return err + } + + m.isLocked = false + return nil +} + +// unlockErr calls unlock and returns a combined error +// if a prevErr is not nil. +func (m *Migrate) unlockErr(prevErr error) error { + if err := m.unlock(); err != nil { + return NewMultiError(prevErr, err) + } + return prevErr +} + +// logPrintf writes to m.Log if not nil +func (m *Migrate) logPrintf(format string, v ...interface{}) { + if m.Log != nil { + m.Log.Printf(format, v...) + } +} + +// logVerbosePrintf writes to m.Log if not nil. Use for verbose logging output. +func (m *Migrate) logVerbosePrintf(format string, v ...interface{}) { + if m.Log != nil && m.Log.Verbose() { + m.Log.Printf(format, v...) + } +} diff --git a/vendor/github.com/rdallman/migrate/migrate_test.go b/vendor/github.com/rdallman/migrate/migrate_test.go new file mode 100644 index 000000000..0ec4bce21 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/migrate_test.go @@ -0,0 +1,941 @@ +package migrate + +import ( + "bytes" + "database/sql" + "io/ioutil" + "log" + "os" + "testing" + + dStub "github.com/mattes/migrate/database/stub" + "github.com/mattes/migrate/source" + sStub "github.com/mattes/migrate/source/stub" +) + +// sourceStubMigrations hold the following migrations: +// u = up migration, d = down migration, n = version +// | 1 | - | 3 | 4 | 5 | - | 7 | +// | u d | - | u | u d | d | - | u d | +var sourceStubMigrations *source.Migrations + +func init() { + sourceStubMigrations = source.NewMigrations() + sourceStubMigrations.Append(&source.Migration{Version: 1, Direction: source.Up}) + sourceStubMigrations.Append(&source.Migration{Version: 1, Direction: source.Down}) + sourceStubMigrations.Append(&source.Migration{Version: 3, Direction: source.Up}) + sourceStubMigrations.Append(&source.Migration{Version: 4, Direction: source.Up}) + sourceStubMigrations.Append(&source.Migration{Version: 4, Direction: source.Down}) + sourceStubMigrations.Append(&source.Migration{Version: 5, Direction: source.Down}) + sourceStubMigrations.Append(&source.Migration{Version: 7, Direction: source.Up}) + sourceStubMigrations.Append(&source.Migration{Version: 7, Direction: source.Down}) +} + +type DummyInstance struct{ Name string } + +func TestNew(t *testing.T) { + m, err := New("stub://", "stub://") + if err != nil { + t.Fatal(err) + } + + if m.sourceName != "stub" { + t.Errorf("expected stub, got %v", m.sourceName) + } + if m.sourceDrv == nil { + t.Error("expected sourceDrv not to be nil") + } + + if m.databaseName != "stub" { + t.Errorf("expected stub, got %v", m.databaseName) + } + if m.databaseDrv == nil { + t.Error("expected databaseDrv not to be nil") + } +} + +func ExampleNew() { + // Read migrations from /home/mattes/migrations and connect to a local postgres database. + m, err := New("file:///home/mattes/migrations", "postgres://mattes:secret@localhost:5432/database?sslmode=disable") + if err != nil { + log.Fatal(err) + } + + // Migrate all the way up ... + if err := m.Up(); err != nil { + log.Fatal(err) + } +} + +func TestNewWithDatabaseInstance(t *testing.T) { + dummyDb := &DummyInstance{"database"} + dbInst, err := dStub.WithInstance(dummyDb, &dStub.Config{}) + if err != nil { + t.Fatal(err) + } + + m, err := NewWithDatabaseInstance("stub://", "stub", dbInst) + if err != nil { + t.Fatal(err) + } + + if m.sourceName != "stub" { + t.Errorf("expected stub, got %v", m.sourceName) + } + if m.sourceDrv == nil { + t.Error("expected sourceDrv not to be nil") + } + + if m.databaseName != "stub" { + t.Errorf("expected stub, got %v", m.databaseName) + } + if m.databaseDrv == nil { + t.Error("expected databaseDrv not to be nil") + } +} + +func ExampleNewWithDatabaseInstance() { + // Create and use an existing database instance. + db, err := sql.Open("postgres", "postgres://mattes:secret@localhost:5432/database?sslmode=disable") + if err != nil { + log.Fatal(err) + } + defer db.Close() + + // Create driver instance from db. + // Check each driver if it supports the WithInstance function. + // `import "github.com/mattes/migrate/database/postgres"` + instance, err := dStub.WithInstance(db, &dStub.Config{}) + if err != nil { + log.Fatal(err) + } + + // Read migrations from /home/mattes/migrations and connect to a local postgres database. + m, err := NewWithDatabaseInstance("file:///home/mattes/migrations", "postgres", instance) + if err != nil { + log.Fatal(err) + } + + // Migrate all the way up ... + if err := m.Up(); err != nil { + log.Fatal(err) + } +} + +func TestNewWithSourceInstance(t *testing.T) { + dummySource := &DummyInstance{"source"} + sInst, err := sStub.WithInstance(dummySource, &sStub.Config{}) + if err != nil { + t.Fatal(err) + } + + m, err := NewWithSourceInstance("stub", sInst, "stub://") + if err != nil { + t.Fatal(err) + } + + if m.sourceName != "stub" { + t.Errorf("expected stub, got %v", m.sourceName) + } + if m.sourceDrv == nil { + t.Error("expected sourceDrv not to be nil") + } + + if m.databaseName != "stub" { + t.Errorf("expected stub, got %v", m.databaseName) + } + if m.databaseDrv == nil { + t.Error("expected databaseDrv not to be nil") + } +} + +func ExampleNewWithSourceInstance() { + di := &DummyInstance{"think any client required for a source here"} + + // Create driver instance from DummyInstance di. + // Check each driver if it support the WithInstance function. + // `import "github.com/mattes/migrate/source/stub"` + instance, err := sStub.WithInstance(di, &sStub.Config{}) + if err != nil { + log.Fatal(err) + } + + // Read migrations from Stub and connect to a local postgres database. + m, err := NewWithSourceInstance("stub", instance, "postgres://mattes:secret@localhost:5432/database?sslmode=disable") + if err != nil { + log.Fatal(err) + } + + // Migrate all the way up ... + if err := m.Up(); err != nil { + log.Fatal(err) + } +} + +func TestNewWithInstance(t *testing.T) { + dummyDb := &DummyInstance{"database"} + dbInst, err := dStub.WithInstance(dummyDb, &dStub.Config{}) + if err != nil { + t.Fatal(err) + } + + dummySource := &DummyInstance{"source"} + sInst, err := sStub.WithInstance(dummySource, &sStub.Config{}) + if err != nil { + t.Fatal(err) + } + + m, err := NewWithInstance("stub", sInst, "stub", dbInst) + if err != nil { + t.Fatal(err) + } + + if m.sourceName != "stub" { + t.Errorf("expected stub, got %v", m.sourceName) + } + if m.sourceDrv == nil { + t.Error("expected sourceDrv not to be nil") + } + + if m.databaseName != "stub" { + t.Errorf("expected stub, got %v", m.databaseName) + } + if m.databaseDrv == nil { + t.Error("expected databaseDrv not to be nil") + } +} + +func ExampleNewWithInstance() { + // See NewWithDatabaseInstance and NewWithSourceInstance for an example. +} + +func TestClose(t *testing.T) { + m, _ := New("stub://", "stub://") + sourceErr, databaseErr := m.Close() + if sourceErr != nil { + t.Error(sourceErr) + } + if databaseErr != nil { + t.Error(databaseErr) + } +} + +func TestMigrate(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + dbDrv := m.databaseDrv.(*dStub.Stub) + seq := newMigSeq() + + tt := []struct { + version uint + expectErr error + expectVersion uint + expectSeq migrationSequence + }{ + // migrate all the way Up in single steps + {version: 0, expectErr: os.ErrNotExist}, + {version: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(1))}, + {version: 2, expectErr: os.ErrNotExist}, + {version: 3, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(3))}, + {version: 4, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(4))}, + {version: 5, expectErr: nil, expectVersion: 5, expectSeq: seq.add()}, // 5 has no up migration + {version: 6, expectErr: os.ErrNotExist}, + {version: 7, expectErr: nil, expectVersion: 7, expectSeq: seq.add(M(7))}, + {version: 8, expectErr: os.ErrNotExist}, + + // migrate all the way Down in single steps + {version: 6, expectErr: os.ErrNotExist}, + {version: 5, expectErr: nil, expectVersion: 5, expectSeq: seq.add(M(7, 5))}, + {version: 4, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(5, 4))}, + {version: 3, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(4, 3))}, + {version: 2, expectErr: os.ErrNotExist}, + {version: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add()}, // 3 has no down migration + {version: 0, expectErr: os.ErrNotExist}, + + // migrate all the way Up in one step + {version: 7, expectErr: nil, expectVersion: 7, expectSeq: seq.add(M(3), M(4), M(7))}, + + // migrate all the way Down in one step + {version: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1))}, + + // can't migrate the same version twice + {version: 1, expectErr: ErrNoChange}, + } + + for i, v := range tt { + err := m.Migrate(v.version) + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || + (v.expectErr != os.ErrNotExist && err != v.expectErr) { + t.Errorf("expected err %v, got %v, in %v", v.expectErr, err, i) + + } else if err == nil { + version, _, err := m.Version() + if err != nil { + t.Error(err) + } + if version != v.expectVersion { + t.Errorf("expected version %v, got %v, in %v", v.expectVersion, version, i) + } + equalDbSeq(t, i, v.expectSeq, dbDrv) + } + } +} + +func TestMigrateDirty(t *testing.T) { + m, _ := New("stub://", "stub://") + dbDrv := m.databaseDrv.(*dStub.Stub) + if err := dbDrv.SetVersion(0, true); err != nil { + t.Fatal(err) + } + + err := m.Migrate(1) + if _, ok := err.(ErrDirty); !ok { + t.Fatalf("expected ErrDirty, got %v", err) + } +} + +func TestSteps(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + dbDrv := m.databaseDrv.(*dStub.Stub) + seq := newMigSeq() + + tt := []struct { + n int + expectErr error + expectVersion int + expectSeq migrationSequence + }{ + // step must be != 0 + {n: 0, expectErr: ErrNoChange}, + + // can't go Down if ErrNilVersion + {n: -1, expectErr: os.ErrNotExist}, + + // migrate all the way Up + {n: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(1))}, + {n: 1, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(3))}, + {n: 1, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(4))}, + {n: 1, expectErr: nil, expectVersion: 5, expectSeq: seq.add()}, + {n: 1, expectErr: nil, expectVersion: 7, expectSeq: seq.add(M(7))}, + {n: 1, expectErr: os.ErrNotExist}, + + // migrate all the way Down + {n: -1, expectErr: nil, expectVersion: 5, expectSeq: seq.add(M(7, 5))}, + {n: -1, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(5, 4))}, + {n: -1, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(4, 3))}, + {n: -1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(3, 1))}, + {n: -1, expectErr: nil, expectVersion: -1, expectSeq: seq.add(M(1, -1))}, + + // migrate Up in bigger step + {n: 4, expectErr: nil, expectVersion: 5, expectSeq: seq.add(M(1), M(3), M(4), M(5))}, + + // apply one migration, then reaches out of boundary + {n: 2, expectErr: ErrShortLimit{1}, expectVersion: 7, expectSeq: seq.add(M(7))}, + + // migrate Down in bigger step + {n: -4, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1))}, + + // apply one migration, then reaches out of boundary + {n: -2, expectErr: ErrShortLimit{1}, expectVersion: -1, expectSeq: seq.add(M(1, -1))}, + } + + for i, v := range tt { + err := m.Steps(v.n) + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || + (v.expectErr != os.ErrNotExist && err != v.expectErr) { + t.Errorf("expected err %v, got %v, in %v", v.expectErr, err, i) + + } else if err == nil { + version, _, err := m.Version() + if err != ErrNilVersion && err != nil { + t.Error(err) + } + if v.expectVersion == -1 && err != ErrNilVersion { + t.Errorf("expected ErrNilVersion, got %v, in %v", version, i) + + } else if v.expectVersion >= 0 && version != uint(v.expectVersion) { + t.Errorf("expected version %v, got %v, in %v", v.expectVersion, version, i) + } + equalDbSeq(t, i, v.expectSeq, dbDrv) + } + } +} + +func TestStepsDirty(t *testing.T) { + m, _ := New("stub://", "stub://") + dbDrv := m.databaseDrv.(*dStub.Stub) + if err := dbDrv.SetVersion(0, true); err != nil { + t.Fatal(err) + } + + err := m.Steps(1) + if _, ok := err.(ErrDirty); !ok { + t.Fatalf("expected ErrDirty, got %v", err) + } +} + +func TestUpAndDown(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + dbDrv := m.databaseDrv.(*dStub.Stub) + seq := newMigSeq() + + // go Up first + if err := m.Up(); err != nil { + t.Fatal(err) + } + equalDbSeq(t, 0, seq.add(M(1), M(3), M(4), M(5), M(7)), dbDrv) + + // go Down + if err := m.Down(); err != nil { + t.Fatal(err) + } + equalDbSeq(t, 1, seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1)), dbDrv) + + // go 1 Up and then all the way Up + if err := m.Steps(1); err != nil { + t.Fatal(err) + } + if err := m.Up(); err != nil { + t.Fatal(err) + } + equalDbSeq(t, 2, seq.add(M(1), M(3), M(4), M(5), M(7)), dbDrv) + + // go 1 Down and then all the way Down + if err := m.Steps(-1); err != nil { + t.Fatal(err) + } + if err := m.Down(); err != nil { + t.Fatal(err) + } + equalDbSeq(t, 0, seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1)), dbDrv) +} + +func TestUpDirty(t *testing.T) { + m, _ := New("stub://", "stub://") + dbDrv := m.databaseDrv.(*dStub.Stub) + if err := dbDrv.SetVersion(0, true); err != nil { + t.Fatal(err) + } + + err := m.Up() + if _, ok := err.(ErrDirty); !ok { + t.Fatalf("expected ErrDirty, got %v", err) + } +} + +func TestDownDirty(t *testing.T) { + m, _ := New("stub://", "stub://") + dbDrv := m.databaseDrv.(*dStub.Stub) + if err := dbDrv.SetVersion(0, true); err != nil { + t.Fatal(err) + } + + err := m.Down() + if _, ok := err.(ErrDirty); !ok { + t.Fatalf("expected ErrDirty, got %v", err) + } +} + +func TestDrop(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + dbDrv := m.databaseDrv.(*dStub.Stub) + + if err := m.Drop(); err != nil { + t.Fatal(err) + } + + if dbDrv.MigrationSequence[len(dbDrv.MigrationSequence)-1] != dStub.DROP { + t.Fatalf("expected database to DROP, got sequence %v", dbDrv.MigrationSequence) + } +} + +func TestVersion(t *testing.T) { + m, _ := New("stub://", "stub://") + dbDrv := m.databaseDrv.(*dStub.Stub) + + _, _, err := m.Version() + if err != ErrNilVersion { + t.Fatalf("expected ErrNilVersion, got %v", err) + } + + if err := dbDrv.Run(bytes.NewBufferString("1_up")); err != nil { + t.Fatal(err) + } + + if err := dbDrv.SetVersion(1, false); err != nil { + t.Fatal(err) + } + + v, _, err := m.Version() + if err != nil { + t.Fatal(err) + } + + if v != 1 { + t.Fatalf("expected version 1, got %v", v) + } +} + +func TestRun(t *testing.T) { + m, _ := New("stub://", "stub://") + + mx, err := NewMigration(nil, "", 1, 2) + if err != nil { + t.Fatal(err) + } + + if err := m.Run(mx); err != nil { + t.Fatal(err) + } + + v, _, err := m.Version() + if err != nil { + t.Fatal(err) + } + + if v != 2 { + t.Errorf("expected version 2, got %v", v) + } +} + +func TestRunDirty(t *testing.T) { + m, _ := New("stub://", "stub://") + dbDrv := m.databaseDrv.(*dStub.Stub) + if err := dbDrv.SetVersion(0, true); err != nil { + t.Fatal(err) + } + + migr, err := NewMigration(nil, "", 1, 2) + if err != nil { + t.Fatal(err) + } + + err = m.Run(migr) + if _, ok := err.(ErrDirty); !ok { + t.Fatalf("expected ErrDirty, got %v", err) + } +} + +func TestForce(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + + if err := m.Force(7); err != nil { + t.Fatal(err) + } + + v, dirty, err := m.Version() + if err != nil { + t.Fatal(err) + } + if dirty { + t.Errorf("expected dirty to be false") + } + if v != 7 { + t.Errorf("expected version to be 7") + } +} + +func TestForceDirty(t *testing.T) { + m, _ := New("stub://", "stub://") + dbDrv := m.databaseDrv.(*dStub.Stub) + if err := dbDrv.SetVersion(0, true); err != nil { + t.Fatal(err) + } + + if err := m.Force(1); err != nil { + t.Fatal(err) + } +} + +func TestRead(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + + tt := []struct { + from int + to int + expectErr error + expectMigrations migrationSequence + }{ + {from: -1, to: -1, expectErr: ErrNoChange}, + {from: -1, to: 0, expectErr: os.ErrNotExist}, + {from: -1, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(1))}, + {from: -1, to: 2, expectErr: os.ErrNotExist}, + {from: -1, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3))}, + {from: -1, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4))}, + {from: -1, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4), M(5))}, + {from: -1, to: 6, expectErr: os.ErrNotExist}, + {from: -1, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4), M(5), M(7))}, + {from: -1, to: 8, expectErr: os.ErrNotExist}, + + {from: 0, to: -1, expectErr: os.ErrNotExist}, + {from: 0, to: 0, expectErr: os.ErrNotExist}, + {from: 0, to: 1, expectErr: os.ErrNotExist}, + {from: 0, to: 2, expectErr: os.ErrNotExist}, + {from: 0, to: 3, expectErr: os.ErrNotExist}, + {from: 0, to: 4, expectErr: os.ErrNotExist}, + {from: 0, to: 5, expectErr: os.ErrNotExist}, + {from: 0, to: 6, expectErr: os.ErrNotExist}, + {from: 0, to: 7, expectErr: os.ErrNotExist}, + {from: 0, to: 8, expectErr: os.ErrNotExist}, + + {from: 1, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(1, -1))}, + {from: 1, to: 0, expectErr: os.ErrNotExist}, + {from: 1, to: 1, expectErr: ErrNoChange}, + {from: 1, to: 2, expectErr: os.ErrNotExist}, + {from: 1, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(3))}, + {from: 1, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4))}, + {from: 1, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4), M(5))}, + {from: 1, to: 6, expectErr: os.ErrNotExist}, + {from: 1, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4), M(5), M(7))}, + {from: 1, to: 8, expectErr: os.ErrNotExist}, + + {from: 2, to: -1, expectErr: os.ErrNotExist}, + {from: 2, to: 0, expectErr: os.ErrNotExist}, + {from: 2, to: 1, expectErr: os.ErrNotExist}, + {from: 2, to: 2, expectErr: os.ErrNotExist}, + {from: 2, to: 3, expectErr: os.ErrNotExist}, + {from: 2, to: 4, expectErr: os.ErrNotExist}, + {from: 2, to: 5, expectErr: os.ErrNotExist}, + {from: 2, to: 6, expectErr: os.ErrNotExist}, + {from: 2, to: 7, expectErr: os.ErrNotExist}, + {from: 2, to: 8, expectErr: os.ErrNotExist}, + + {from: 3, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1), M(1, -1))}, + {from: 3, to: 0, expectErr: os.ErrNotExist}, + {from: 3, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1))}, + {from: 3, to: 2, expectErr: os.ErrNotExist}, + {from: 3, to: 3, expectErr: ErrNoChange}, + {from: 3, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(4))}, + {from: 3, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5))}, + {from: 3, to: 6, expectErr: os.ErrNotExist}, + {from: 3, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5), M(7))}, + {from: 3, to: 8, expectErr: os.ErrNotExist}, + + {from: 4, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1), M(1, -1))}, + {from: 4, to: 0, expectErr: os.ErrNotExist}, + {from: 4, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1))}, + {from: 4, to: 2, expectErr: os.ErrNotExist}, + {from: 4, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(4, 3))}, + {from: 4, to: 4, expectErr: ErrNoChange}, + {from: 4, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(5))}, + {from: 4, to: 6, expectErr: os.ErrNotExist}, + {from: 4, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(5), M(7))}, + {from: 4, to: 8, expectErr: os.ErrNotExist}, + + {from: 5, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3), M(3, 1), M(1, -1))}, + {from: 5, to: 0, expectErr: os.ErrNotExist}, + {from: 5, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3), M(3, 1))}, + {from: 5, to: 2, expectErr: os.ErrNotExist}, + {from: 5, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3))}, + {from: 5, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(5, 4))}, + {from: 5, to: 5, expectErr: ErrNoChange}, + {from: 5, to: 6, expectErr: os.ErrNotExist}, + {from: 5, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(7))}, + {from: 5, to: 8, expectErr: os.ErrNotExist}, + + {from: 6, to: -1, expectErr: os.ErrNotExist}, + {from: 6, to: 0, expectErr: os.ErrNotExist}, + {from: 6, to: 1, expectErr: os.ErrNotExist}, + {from: 6, to: 2, expectErr: os.ErrNotExist}, + {from: 6, to: 3, expectErr: os.ErrNotExist}, + {from: 6, to: 4, expectErr: os.ErrNotExist}, + {from: 6, to: 5, expectErr: os.ErrNotExist}, + {from: 6, to: 6, expectErr: os.ErrNotExist}, + {from: 6, to: 7, expectErr: os.ErrNotExist}, + {from: 6, to: 8, expectErr: os.ErrNotExist}, + + {from: 7, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1))}, + {from: 7, to: 0, expectErr: os.ErrNotExist}, + {from: 7, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3), M(3, 1))}, + {from: 7, to: 2, expectErr: os.ErrNotExist}, + {from: 7, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3))}, + {from: 7, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4))}, + {from: 7, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(7, 5))}, + {from: 7, to: 6, expectErr: os.ErrNotExist}, + {from: 7, to: 7, expectErr: ErrNoChange}, + {from: 7, to: 8, expectErr: os.ErrNotExist}, + + {from: 8, to: -1, expectErr: os.ErrNotExist}, + {from: 8, to: 0, expectErr: os.ErrNotExist}, + {from: 8, to: 1, expectErr: os.ErrNotExist}, + {from: 8, to: 2, expectErr: os.ErrNotExist}, + {from: 8, to: 3, expectErr: os.ErrNotExist}, + {from: 8, to: 4, expectErr: os.ErrNotExist}, + {from: 8, to: 5, expectErr: os.ErrNotExist}, + {from: 8, to: 6, expectErr: os.ErrNotExist}, + {from: 8, to: 7, expectErr: os.ErrNotExist}, + {from: 8, to: 8, expectErr: os.ErrNotExist}, + } + + for i, v := range tt { + ret := make(chan interface{}) + go m.read(v.from, v.to, ret) + migrations, err := migrationsFromChannel(ret) + + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || + (v.expectErr != os.ErrNotExist && v.expectErr != err) { + t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) + t.Logf("%v, in %v", migrations, i) + } + if len(v.expectMigrations) > 0 { + equalMigSeq(t, i, v.expectMigrations, migrations) + } + } +} + +func TestReadUp(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + + tt := []struct { + from int + limit int // -1 means no limit + expectErr error + expectMigrations migrationSequence + }{ + {from: -1, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4), M(5), M(7))}, + {from: -1, limit: 0, expectErr: ErrNoChange}, + {from: -1, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(1))}, + {from: -1, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3))}, + + {from: 0, limit: -1, expectErr: os.ErrNotExist}, + {from: 0, limit: 0, expectErr: os.ErrNotExist}, + {from: 0, limit: 1, expectErr: os.ErrNotExist}, + {from: 0, limit: 2, expectErr: os.ErrNotExist}, + + {from: 1, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4), M(5), M(7))}, + {from: 1, limit: 0, expectErr: ErrNoChange}, + {from: 1, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(3))}, + {from: 1, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4))}, + + {from: 2, limit: -1, expectErr: os.ErrNotExist}, + {from: 2, limit: 0, expectErr: os.ErrNotExist}, + {from: 2, limit: 1, expectErr: os.ErrNotExist}, + {from: 2, limit: 2, expectErr: os.ErrNotExist}, + + {from: 3, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5), M(7))}, + {from: 3, limit: 0, expectErr: ErrNoChange}, + {from: 3, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(4))}, + {from: 3, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5))}, + + {from: 4, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(5), M(7))}, + {from: 4, limit: 0, expectErr: ErrNoChange}, + {from: 4, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(5))}, + {from: 4, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(5), M(7))}, + + {from: 5, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(7))}, + {from: 5, limit: 0, expectErr: ErrNoChange}, + {from: 5, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(7))}, + {from: 5, limit: 2, expectErr: ErrShortLimit{1}, expectMigrations: newMigSeq(M(7))}, + + {from: 6, limit: -1, expectErr: os.ErrNotExist}, + {from: 6, limit: 0, expectErr: os.ErrNotExist}, + {from: 6, limit: 1, expectErr: os.ErrNotExist}, + {from: 6, limit: 2, expectErr: os.ErrNotExist}, + + {from: 7, limit: -1, expectErr: ErrNoChange}, + {from: 7, limit: 0, expectErr: ErrNoChange}, + {from: 7, limit: 1, expectErr: os.ErrNotExist}, + {from: 7, limit: 2, expectErr: os.ErrNotExist}, + + {from: 8, limit: -1, expectErr: os.ErrNotExist}, + {from: 8, limit: 0, expectErr: os.ErrNotExist}, + {from: 8, limit: 1, expectErr: os.ErrNotExist}, + {from: 8, limit: 2, expectErr: os.ErrNotExist}, + } + + for i, v := range tt { + ret := make(chan interface{}) + go m.readUp(v.from, v.limit, ret) + migrations, err := migrationsFromChannel(ret) + + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || + (v.expectErr != os.ErrNotExist && v.expectErr != err) { + t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) + t.Logf("%v, in %v", migrations, i) + } + if len(v.expectMigrations) > 0 { + equalMigSeq(t, i, v.expectMigrations, migrations) + } + } +} + +func TestReadDown(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + + tt := []struct { + from int + limit int // -1 means no limit + expectErr error + expectMigrations migrationSequence + }{ + {from: -1, limit: -1, expectErr: ErrNoChange}, + {from: -1, limit: 0, expectErr: ErrNoChange}, + {from: -1, limit: 1, expectErr: os.ErrNotExist}, + {from: -1, limit: 2, expectErr: os.ErrNotExist}, + + {from: 0, limit: -1, expectErr: os.ErrNotExist}, + {from: 0, limit: 0, expectErr: os.ErrNotExist}, + {from: 0, limit: 1, expectErr: os.ErrNotExist}, + {from: 0, limit: 2, expectErr: os.ErrNotExist}, + + {from: 1, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(1, -1))}, + {from: 1, limit: 0, expectErr: ErrNoChange}, + {from: 1, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(1, -1))}, + {from: 1, limit: 2, expectErr: ErrShortLimit{1}, expectMigrations: newMigSeq(M(1, -1))}, + + {from: 2, limit: -1, expectErr: os.ErrNotExist}, + {from: 2, limit: 0, expectErr: os.ErrNotExist}, + {from: 2, limit: 1, expectErr: os.ErrNotExist}, + {from: 2, limit: 2, expectErr: os.ErrNotExist}, + + {from: 3, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1), M(1, -1))}, + {from: 3, limit: 0, expectErr: ErrNoChange}, + {from: 3, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1))}, + {from: 3, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(3, 1), M(1, -1))}, + + {from: 4, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1), M(1, -1))}, + {from: 4, limit: 0, expectErr: ErrNoChange}, + {from: 4, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3))}, + {from: 4, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1))}, + + {from: 5, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3), M(3, 1), M(1, -1))}, + {from: 5, limit: 0, expectErr: ErrNoChange}, + {from: 5, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4))}, + {from: 5, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3))}, + + {from: 6, limit: -1, expectErr: os.ErrNotExist}, + {from: 6, limit: 0, expectErr: os.ErrNotExist}, + {from: 6, limit: 1, expectErr: os.ErrNotExist}, + {from: 6, limit: 2, expectErr: os.ErrNotExist}, + + {from: 7, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1))}, + {from: 7, limit: 0, expectErr: ErrNoChange}, + {from: 7, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5))}, + {from: 7, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4))}, + + {from: 8, limit: -1, expectErr: os.ErrNotExist}, + {from: 8, limit: 0, expectErr: os.ErrNotExist}, + {from: 8, limit: 1, expectErr: os.ErrNotExist}, + {from: 8, limit: 2, expectErr: os.ErrNotExist}, + } + + for i, v := range tt { + ret := make(chan interface{}) + go m.readDown(v.from, v.limit, ret) + migrations, err := migrationsFromChannel(ret) + + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || + (v.expectErr != os.ErrNotExist && v.expectErr != err) { + t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) + t.Logf("%v, in %v", migrations, i) + } + if len(v.expectMigrations) > 0 { + equalMigSeq(t, i, v.expectMigrations, migrations) + } + } +} + +func TestLock(t *testing.T) { + m, _ := New("stub://", "stub://") + if err := m.lock(); err != nil { + t.Fatal(err) + } + + if err := m.lock(); err == nil { + t.Fatal("should be locked already") + } +} + +func migrationsFromChannel(ret chan interface{}) ([]*Migration, error) { + slice := make([]*Migration, 0) + for r := range ret { + switch r.(type) { + case error: + return slice, r.(error) + + case *Migration: + slice = append(slice, r.(*Migration)) + } + } + return slice, nil +} + +type migrationSequence []*Migration + +func newMigSeq(migr ...*Migration) migrationSequence { + return migr +} + +func (m *migrationSequence) add(migr ...*Migration) migrationSequence { + *m = append(*m, migr...) + return *m +} + +func (m *migrationSequence) bodySequence() []string { + r := make([]string, 0) + for _, v := range *m { + if v.Body != nil { + body, err := ioutil.ReadAll(v.Body) + if err != nil { + panic(err) // that should never happen + } + + // reset body reader + // TODO: is there a better/nicer way? + v.Body = ioutil.NopCloser(bytes.NewReader(body)) + + r = append(r, string(body[:])) + } + } + return r +} + +// M is a convenience func to create a new *Migration +func M(version uint, targetVersion ...int) *Migration { + if len(targetVersion) > 1 { + panic("only one targetVersion allowed") + } + ts := int(version) + if len(targetVersion) == 1 { + ts = targetVersion[0] + } + + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + migr, err := m.newMigration(version, ts) + if err != nil { + panic(err) + } + return migr +} + +func equalMigSeq(t *testing.T, i int, expected, got migrationSequence) { + if len(expected) != len(got) { + t.Errorf("expected migrations %v, got %v, in %v", expected, got, i) + + } else { + for ii := 0; ii < len(expected); ii++ { + if expected[ii].Version != got[ii].Version { + t.Errorf("expected version %v, got %v, in %v", expected[ii].Version, got[ii].Version, i) + } + + if expected[ii].TargetVersion != got[ii].TargetVersion { + t.Errorf("expected targetVersion %v, got %v, in %v", expected[ii].TargetVersion, got[ii].TargetVersion, i) + } + } + } +} + +func equalDbSeq(t *testing.T, i int, expected migrationSequence, got *dStub.Stub) { + bs := expected.bodySequence() + if !got.EqualSequence(bs) { + t.Fatalf("\nexpected sequence %v,\ngot %v, in %v", bs, got.MigrationSequence, i) + } +} diff --git a/vendor/github.com/rdallman/migrate/migration.go b/vendor/github.com/rdallman/migrate/migration.go new file mode 100644 index 000000000..069e7f038 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/migration.go @@ -0,0 +1,154 @@ +package migrate + +import ( + "bufio" + "fmt" + "io" + "time" +) + +// DefaultBufferSize sets the in memory buffer size (in Bytes) for every +// pre-read migration (see DefaultPrefetchMigrations). +var DefaultBufferSize = uint(100000) + +// Migration holds information about a migration. +// It is initially created from data coming from the source and then +// used when run against the database. +type Migration struct { + // Identifier can be any string to help identifying + // the migration in the source. + Identifier string + + // Version is the version of this migration. + Version uint + + // TargetVersion is the migration version after this migration + // has been applied to the database. + // Can be -1, implying that this is a NilVersion. + TargetVersion int + + // Body holds an io.ReadCloser to the source. + Body io.ReadCloser + + // BufferedBody holds an buffered io.Reader to the underlying Body. + BufferedBody io.Reader + + // BufferSize defaults to DefaultBufferSize + BufferSize uint + + // bufferWriter holds an io.WriteCloser and pipes to BufferBody. + // It's an *Closer for flow control. + bufferWriter io.WriteCloser + + // Scheduled is the time when the migration was scheduled/ queued. + Scheduled time.Time + + // StartedBuffering is the time when buffering of the migration source started. + StartedBuffering time.Time + + // FinishedBuffering is the time when buffering of the migration source finished. + FinishedBuffering time.Time + + // FinishedReading is the time when the migration source is fully read. + FinishedReading time.Time + + // BytesRead holds the number of Bytes read from the migration source. + BytesRead int64 +} + +// NewMigration returns a new Migration and sets the body, identifier, +// version and targetVersion. Body can be nil, which turns this migration +// into a "NilMigration". If no identifier is provided, it will default to "". +// targetVersion can be -1, implying it is a NilVersion. +// +// What is a NilMigration? +// Usually each migration version coming from source is expected to have an +// Up and Down migration. This is not a hard requirement though, leading to +// a situation where only the Up or Down migration is present. So let's say +// the user wants to migrate up to a version that doesn't have the actual Up +// migration, in that case we still want to apply the version, but with an empty +// body. We are calling that a NilMigration, a migration with an empty body. +// +// What is a NilVersion? +// NilVersion is a const(-1). When running down migrations and we are at the +// last down migration, there is no next down migration, the targetVersion should +// be nil. Nil in this case is represented by -1 (because type int). +func NewMigration(body io.ReadCloser, identifier string, + version uint, targetVersion int) (*Migration, error) { + tnow := time.Now() + m := &Migration{ + Identifier: identifier, + Version: version, + TargetVersion: targetVersion, + Scheduled: tnow, + } + + if body == nil { + if len(identifier) == 0 { + m.Identifier = "" + } + + m.StartedBuffering = tnow + m.FinishedBuffering = tnow + m.FinishedReading = tnow + return m, nil + } + + br, bw := io.Pipe() + m.Body = body // want to simulate low latency? newSlowReader(body) + m.BufferSize = DefaultBufferSize + m.BufferedBody = br + m.bufferWriter = bw + return m, nil +} + +// String implements string.Stringer and is used in tests. +func (m *Migration) String() string { + return fmt.Sprintf("%v [%v=>%v]", m.Identifier, m.Version, m.TargetVersion) +} + +// LogString returns a string describing this migration to humans. +func (m *Migration) LogString() string { + directionStr := "u" + if m.TargetVersion < int(m.Version) { + directionStr = "d" + } + return fmt.Sprintf("%v/%v %v", m.Version, directionStr, m.Identifier) +} + +// Buffer buffers Body up to BufferSize. +// Calling this function blocks. Call with goroutine. +func (m *Migration) Buffer() error { + if m.Body == nil { + return nil + } + + m.StartedBuffering = time.Now() + + b := bufio.NewReaderSize(m.Body, int(m.BufferSize)) + + // start reading from body, peek won't move the read pointer though + // poor man's solution? + b.Peek(int(m.BufferSize)) + + m.FinishedBuffering = time.Now() + + // write to bufferWriter, this will block until + // something starts reading from m.Buffer + n, err := b.WriteTo(m.bufferWriter) + if err != nil { + return err + } + + m.FinishedReading = time.Now() + m.BytesRead = n + + // close bufferWriter so Buffer knows that there is no + // more data coming + m.bufferWriter.Close() + + // it's safe to close the Body too + m.Body.Close() + + return nil +} diff --git a/vendor/github.com/rdallman/migrate/migration_test.go b/vendor/github.com/rdallman/migrate/migration_test.go new file mode 100644 index 000000000..b6589f938 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/migration_test.go @@ -0,0 +1,56 @@ +package migrate + +import ( + "fmt" + "io/ioutil" + "log" + "strings" +) + +func ExampleNewMigration() { + // Create a dummy migration body, this is coming from the source usually. + body := ioutil.NopCloser(strings.NewReader("dumy migration that creates users table")) + + // Create a new Migration that represents version 1486686016. + // Once this migration has been applied to the database, the new + // migration version will be 1486689359. + migr, err := NewMigration(body, "create_users_table", 1486686016, 1486689359) + if err != nil { + log.Fatal(err) + } + + fmt.Print(migr.LogString()) + // Output: + // 1486686016/u create_users_table +} + +func ExampleNewMigration_nilMigration() { + // Create a new Migration that represents a NilMigration. + // Once this migration has been applied to the database, the new + // migration version will be 1486689359. + migr, err := NewMigration(nil, "", 1486686016, 1486689359) + if err != nil { + log.Fatal(err) + } + + fmt.Print(migr.LogString()) + // Output: + // 1486686016/u +} + +func ExampleNewMigration_nilVersion() { + // Create a dummy migration body, this is coming from the source usually. + body := ioutil.NopCloser(strings.NewReader("dumy migration that deletes users table")) + + // Create a new Migration that represents version 1486686016. + // This is the last available down migration, so the migration version + // will be -1, meaning NilVersion once this migration ran. + migr, err := NewMigration(body, "drop_users_table", 1486686016, -1) + if err != nil { + log.Fatal(err) + } + + fmt.Print(migr.LogString()) + // Output: + // 1486686016/d drop_users_table +} diff --git a/vendor/github.com/rdallman/migrate/source/aws-s3/README.md b/vendor/github.com/rdallman/migrate/source/aws-s3/README.md new file mode 100644 index 000000000..3a59cfec9 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/aws-s3/README.md @@ -0,0 +1,3 @@ +# aws-s3 + +`s3:///` diff --git a/vendor/github.com/rdallman/migrate/source/aws-s3/s3.go b/vendor/github.com/rdallman/migrate/source/aws-s3/s3.go new file mode 100644 index 000000000..8b581402c --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/aws-s3/s3.go @@ -0,0 +1,125 @@ +package awss3 + +import ( + "fmt" + "io" + "net/url" + "os" + "path" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" + "github.com/mattes/migrate/source" +) + +func init() { + source.Register("s3", &s3Driver{}) +} + +type s3Driver struct { + s3client s3iface.S3API + bucket string + prefix string + migrations *source.Migrations +} + +func (s *s3Driver) Open(folder string) (source.Driver, error) { + u, err := url.Parse(folder) + if err != nil { + return nil, err + } + sess, err := session.NewSession() + if err != nil { + return nil, err + } + driver := s3Driver{ + bucket: u.Host, + prefix: strings.Trim(u.Path, "/") + "/", + s3client: s3.New(sess), + migrations: source.NewMigrations(), + } + err = driver.loadMigrations() + if err != nil { + return nil, err + } + return &driver, nil +} + +func (s *s3Driver) loadMigrations() error { + output, err := s.s3client.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(s.bucket), + Prefix: aws.String(s.prefix), + Delimiter: aws.String("/"), + }) + if err != nil { + return err + } + for _, object := range output.Contents { + _, fileName := path.Split(aws.StringValue(object.Key)) + m, err := source.DefaultParse(fileName) + if err != nil { + continue + } + if !s.migrations.Append(m) { + return fmt.Errorf("unable to parse file %v", aws.StringValue(object.Key)) + } + } + return nil +} + +func (s *s3Driver) Close() error { + return nil +} + +func (s *s3Driver) First() (uint, error) { + v, ok := s.migrations.First() + if !ok { + return 0, os.ErrNotExist + } + return v, nil +} + +func (s *s3Driver) Prev(version uint) (uint, error) { + v, ok := s.migrations.Prev(version) + if !ok { + return 0, os.ErrNotExist + } + return v, nil +} + +func (s *s3Driver) Next(version uint) (uint, error) { + v, ok := s.migrations.Next(version) + if !ok { + return 0, os.ErrNotExist + } + return v, nil +} + +func (s *s3Driver) ReadUp(version uint) (io.ReadCloser, string, error) { + if m, ok := s.migrations.Up(version); ok { + return s.open(m) + } + return nil, "", os.ErrNotExist +} + +func (s *s3Driver) ReadDown(version uint) (io.ReadCloser, string, error) { + if m, ok := s.migrations.Down(version); ok { + return s.open(m) + } + return nil, "", os.ErrNotExist +} + +func (s *s3Driver) open(m *source.Migration) (io.ReadCloser, string, error) { + key := path.Join(s.prefix, m.Raw) + object, err := s.s3client.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(s.bucket), + Key: aws.String(key), + }) + if err != nil { + return nil, "", err + } + return object.Body, m.Identifier, nil +} diff --git a/vendor/github.com/rdallman/migrate/source/aws-s3/s3_test.go b/vendor/github.com/rdallman/migrate/source/aws-s3/s3_test.go new file mode 100644 index 000000000..f07d7ff2c --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/aws-s3/s3_test.go @@ -0,0 +1,82 @@ +package awss3 + +import ( + "errors" + "io/ioutil" + "strings" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/mattes/migrate/source" + st "github.com/mattes/migrate/source/testing" +) + +func Test(t *testing.T) { + s3Client := fakeS3{ + bucket: "some-bucket", + objects: map[string]string{ + "staging/migrations/1_foobar.up.sql": "1 up", + "staging/migrations/1_foobar.down.sql": "1 down", + "prod/migrations/1_foobar.up.sql": "1 up", + "prod/migrations/1_foobar.down.sql": "1 down", + "prod/migrations/3_foobar.up.sql": "3 up", + "prod/migrations/4_foobar.up.sql": "4 up", + "prod/migrations/4_foobar.down.sql": "4 down", + "prod/migrations/5_foobar.down.sql": "5 down", + "prod/migrations/7_foobar.up.sql": "7 up", + "prod/migrations/7_foobar.down.sql": "7 down", + "prod/migrations/not-a-migration.txt": "", + "prod/migrations/0-random-stuff/whatever.txt": "", + }, + } + driver := s3Driver{ + bucket: "some-bucket", + prefix: "prod/migrations/", + migrations: source.NewMigrations(), + s3client: &s3Client, + } + err := driver.loadMigrations() + if err != nil { + t.Fatal(err) + } + st.Test(t, &driver) +} + +type fakeS3 struct { + s3.S3 + bucket string + objects map[string]string +} + +func (s *fakeS3) ListObjects(input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) { + bucket := aws.StringValue(input.Bucket) + if bucket != s.bucket { + return nil, errors.New("bucket not found") + } + prefix := aws.StringValue(input.Prefix) + delimiter := aws.StringValue(input.Delimiter) + var output s3.ListObjectsOutput + for name := range s.objects { + if strings.HasPrefix(name, prefix) { + if delimiter == "" || !strings.Contains(strings.Replace(name, prefix, "", 1), delimiter) { + output.Contents = append(output.Contents, &s3.Object{ + Key: aws.String(name), + }) + } + } + } + return &output, nil +} + +func (s *fakeS3) GetObject(input *s3.GetObjectInput) (*s3.GetObjectOutput, error) { + bucket := aws.StringValue(input.Bucket) + if bucket != s.bucket { + return nil, errors.New("bucket not found") + } + if data, ok := s.objects[aws.StringValue(input.Key)]; ok { + body := ioutil.NopCloser(strings.NewReader(data)) + return &s3.GetObjectOutput{Body: body}, nil + } + return nil, errors.New("object not found") +} diff --git a/vendor/github.com/rdallman/migrate/source/driver.go b/vendor/github.com/rdallman/migrate/source/driver.go new file mode 100644 index 000000000..b9c052c16 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/driver.go @@ -0,0 +1,107 @@ +// Package source provides the Source interface. +// All source drivers must implement this interface, register themselves, +// optionally provide a `WithInstance` function and pass the tests +// in package source/testing. +package source + +import ( + "fmt" + "io" + nurl "net/url" + "sync" +) + +var driversMu sync.RWMutex +var drivers = make(map[string]Driver) + +// Driver is the interface every source driver must implement. +// +// How to implement a source driver? +// 1. Implement this interface. +// 2. Optionally, add a function named `WithInstance`. +// This function should accept an existing source instance and a Config{} struct +// and return a driver instance. +// 3. Add a test that calls source/testing.go:Test() +// 4. Add own tests for Open(), WithInstance() (when provided) and Close(). +// All other functions are tested by tests in source/testing. +// Saves you some time and makes sure all source drivers behave the same way. +// 5. Call Register in init(). +// +// Guidelines: +// * All configuration input must come from the URL string in func Open() +// or the Config{} struct in WithInstance. Don't os.Getenv(). +// * Drivers are supposed to be read only. +// * Ideally don't load any contents (into memory) in Open or WithInstance. +type Driver interface { + // Open returns a a new driver instance configured with parameters + // coming from the URL string. Migrate will call this function + // only once per instance. + Open(url string) (Driver, error) + + // Close closes the underlying source instance managed by the driver. + // Migrate will call this function only once per instance. + Close() error + + // First returns the very first migration version available to the driver. + // Migrate will call this function multiple times. + // If there is no version available, it must return os.ErrNotExist. + First() (version uint, err error) + + // Prev returns the previous version for a given version available to the driver. + // Migrate will call this function multiple times. + // If there is no previous version available, it must return os.ErrNotExist. + Prev(version uint) (prevVersion uint, err error) + + // Next returns the next version for a given version available to the driver. + // Migrate will call this function multiple times. + // If there is no next version available, it must return os.ErrNotExist. + Next(version uint) (nextVersion uint, err error) + + // ReadUp returns the UP migration body and an identifier that helps + // finding this migration in the source for a given version. + // If there is no up migration available for this version, + // it must return os.ErrNotExist. + // Do not start reading, just return the ReadCloser! + ReadUp(version uint) (r io.ReadCloser, identifier string, err error) + + // ReadDown returns the DOWN migration body and an identifier that helps + // finding this migration in the source for a given version. + // If there is no down migration available for this version, + // it must return os.ErrNotExist. + // Do not start reading, just return the ReadCloser! + ReadDown(version uint) (r io.ReadCloser, identifier string, err error) +} + +// Open returns a new driver instance. +func Open(url string) (Driver, error) { + u, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + if u.Scheme == "" { + return nil, fmt.Errorf("source driver: invalid URL scheme") + } + + driversMu.RLock() + d, ok := drivers[u.Scheme] + driversMu.RUnlock() + if !ok { + return nil, fmt.Errorf("source driver: unknown driver %v (forgotten import?)", u.Scheme) + } + + return d.Open(url) +} + +// Register globally registers a driver. +func Register(name string, driver Driver) { + driversMu.Lock() + defer driversMu.Unlock() + if driver == nil { + panic("Register driver is nil") + } + if _, dup := drivers[name]; dup { + panic("Register called twice for driver " + name) + } + drivers[name] = driver +} diff --git a/vendor/github.com/rdallman/migrate/source/driver_test.go b/vendor/github.com/rdallman/migrate/source/driver_test.go new file mode 100644 index 000000000..82284a0b9 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/driver_test.go @@ -0,0 +1,8 @@ +package source + +func ExampleDriver() { + // see source/stub for an example + + // source/stub/stub.go has the driver implementation + // source/stub/stub_test.go runs source/testing/test.go:Test +} diff --git a/vendor/github.com/rdallman/migrate/source/file/README.md b/vendor/github.com/rdallman/migrate/source/file/README.md new file mode 100644 index 000000000..7912eff66 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/file/README.md @@ -0,0 +1,4 @@ +# file + +`file:///absolute/path` +`file://relative/path` diff --git a/vendor/github.com/rdallman/migrate/source/file/file.go b/vendor/github.com/rdallman/migrate/source/file/file.go new file mode 100644 index 000000000..b97d0aa3d --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/file/file.go @@ -0,0 +1,127 @@ +package file + +import ( + "fmt" + "io" + "io/ioutil" + nurl "net/url" + "os" + "path" + "path/filepath" + + "github.com/mattes/migrate/source" +) + +func init() { + source.Register("file", &File{}) +} + +type File struct { + url string + path string + migrations *source.Migrations +} + +func (f *File) Open(url string) (source.Driver, error) { + u, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + // concat host and path to restore full path + // host might be `.` + p := u.Host + u.Path + + if len(p) == 0 { + // default to current directory if no path + wd, err := os.Getwd() + if err != nil { + return nil, err + } + p = wd + + } else if p[0:1] == "." || p[0:1] != "/" { + // make path absolute if relative + abs, err := filepath.Abs(p) + if err != nil { + return nil, err + } + p = abs + } + + // scan directory + files, err := ioutil.ReadDir(p) + if err != nil { + return nil, err + } + + nf := &File{ + url: url, + path: p, + migrations: source.NewMigrations(), + } + + for _, fi := range files { + if !fi.IsDir() { + m, err := source.DefaultParse(fi.Name()) + if err != nil { + continue // ignore files that we can't parse + } + if !nf.migrations.Append(m) { + return nil, fmt.Errorf("unable to parse file %v", fi.Name()) + } + } + } + return nf, nil +} + +func (f *File) Close() error { + // nothing do to here + return nil +} + +func (f *File) First() (version uint, err error) { + if v, ok := f.migrations.First(); !ok { + return 0, &os.PathError{"first", f.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (f *File) Prev(version uint) (prevVersion uint, err error) { + if v, ok := f.migrations.Prev(version); !ok { + return 0, &os.PathError{fmt.Sprintf("prev for version %v", version), f.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (f *File) Next(version uint) (nextVersion uint, err error) { + if v, ok := f.migrations.Next(version); !ok { + return 0, &os.PathError{fmt.Sprintf("next for version %v", version), f.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (f *File) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := f.migrations.Up(version); ok { + r, err := os.Open(path.Join(f.path, m.Raw)) + if err != nil { + return nil, "", err + } + return r, m.Identifier, nil + } + return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), f.path, os.ErrNotExist} +} + +func (f *File) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := f.migrations.Down(version); ok { + r, err := os.Open(path.Join(f.path, m.Raw)) + if err != nil { + return nil, "", err + } + return r, m.Identifier, nil + } + return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), f.path, os.ErrNotExist} +} diff --git a/vendor/github.com/rdallman/migrate/source/file/file_test.go b/vendor/github.com/rdallman/migrate/source/file/file_test.go new file mode 100644 index 000000000..310131c6f --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/file/file_test.go @@ -0,0 +1,207 @@ +package file + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" + + st "github.com/mattes/migrate/source/testing" +) + +func Test(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + // write files that meet driver test requirements + mustWriteFile(t, tmpDir, "1_foobar.up.sql", "1 up") + mustWriteFile(t, tmpDir, "1_foobar.down.sql", "1 down") + + mustWriteFile(t, tmpDir, "3_foobar.up.sql", "3 up") + + mustWriteFile(t, tmpDir, "4_foobar.up.sql", "4 up") + mustWriteFile(t, tmpDir, "4_foobar.down.sql", "4 down") + + mustWriteFile(t, tmpDir, "5_foobar.down.sql", "5 down") + + mustWriteFile(t, tmpDir, "7_foobar.up.sql", "7 up") + mustWriteFile(t, tmpDir, "7_foobar.down.sql", "7 down") + + f := &File{} + d, err := f.Open("file://" + tmpDir) + if err != nil { + t.Fatal(err) + } + + st.Test(t, d) +} + +func TestOpen(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "TestOpen") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + mustWriteFile(t, tmpDir, "1_foobar.up.sql", "") + mustWriteFile(t, tmpDir, "1_foobar.down.sql", "") + + if !filepath.IsAbs(tmpDir) { + t.Fatal("expected tmpDir to be absolute path") + } + + f := &File{} + _, err = f.Open("file://" + tmpDir) // absolute path + if err != nil { + t.Fatal(err) + } +} + +func TestOpenWithRelativePath(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "TestOpen") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + defer os.Chdir(wd) // rescue working dir after we are done + + if err := os.Chdir(tmpDir); err != nil { + t.Fatal(err) + } + + if err := os.Mkdir(filepath.Join(tmpDir, "foo"), os.ModePerm); err != nil { + t.Fatal(err) + } + + mustWriteFile(t, filepath.Join(tmpDir, "foo"), "1_foobar.up.sql", "") + + f := &File{} + + // dir: foo + d, err := f.Open("file://foo") + if err != nil { + t.Fatal(err) + } + _, err = d.First() + if err != nil { + t.Fatalf("expected first file in working dir %v for foo", tmpDir) + } + + // dir: ./foo + d, err = f.Open("file://./foo") + if err != nil { + t.Fatal(err) + } + _, err = d.First() + if err != nil { + t.Fatalf("expected first file in working dir %v for ./foo", tmpDir) + } +} + +func TestOpenDefaultsToCurrentDirectory(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + f := &File{} + d, err := f.Open("file://") + if err != nil { + t.Fatal(err) + } + + if d.(*File).path != wd { + t.Fatal("expected driver to default to current directory") + } +} + +func TestOpenWithDuplicateVersion(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "TestOpenWithDuplicateVersion") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + mustWriteFile(t, tmpDir, "1_foo.up.sql", "") // 1 up + mustWriteFile(t, tmpDir, "1_bar.up.sql", "") // 1 up + + f := &File{} + _, err = f.Open("file://" + tmpDir) + if err == nil { + t.Fatal("expected err") + } +} + +func TestClose(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "TestOpen") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + f := &File{} + d, err := f.Open("file://" + tmpDir) + if err != nil { + t.Fatal(err) + } + + if d.Close() != nil { + t.Fatal("expected nil") + } +} + +func mustWriteFile(t testing.TB, dir, file string, body string) { + if err := ioutil.WriteFile(path.Join(dir, file), []byte(body), 06444); err != nil { + t.Fatal(err) + } +} + +func mustCreateBenchmarkDir(t *testing.B) (dir string) { + tmpDir, err := ioutil.TempDir("", "Benchmark") + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 1000; i++ { + mustWriteFile(t, tmpDir, fmt.Sprintf("%v_foobar.up.sql", i), "") + mustWriteFile(t, tmpDir, fmt.Sprintf("%v_foobar.down.sql", i), "") + } + + return tmpDir +} + +func BenchmarkOpen(b *testing.B) { + dir := mustCreateBenchmarkDir(b) + defer os.RemoveAll(dir) + b.ResetTimer() + for n := 0; n < b.N; n++ { + f := &File{} + f.Open("file://" + dir) + } + b.StopTimer() +} + +func BenchmarkNext(b *testing.B) { + dir := mustCreateBenchmarkDir(b) + defer os.RemoveAll(dir) + f := &File{} + d, _ := f.Open("file://" + dir) + b.ResetTimer() + v, err := d.First() + for n := 0; n < b.N; n++ { + for !os.IsNotExist(err) { + v, err = d.Next(v) + } + } + b.StopTimer() +} diff --git a/vendor/github.com/rdallman/migrate/source/github/.gitignore b/vendor/github.com/rdallman/migrate/source/github/.gitignore new file mode 100644 index 000000000..3006ad5eb --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/github/.gitignore @@ -0,0 +1 @@ +.github_test_secrets diff --git a/vendor/github.com/rdallman/migrate/source/github/README.md b/vendor/github.com/rdallman/migrate/source/github/README.md new file mode 100644 index 000000000..257f575c4 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/github/README.md @@ -0,0 +1,11 @@ +# github + +`github://user:personal-access-token@owner/repo/path` + +| URL Query | WithInstance Config | Description | +|------------|---------------------|-------------| +| user | | The username of the user connecting | +| personal-access-token | | An access token from Github (https://github.com/settings/tokens) | +| owner | | the repo owner | +| repo | | the name of the repository | +| path | | path in repo to migrations | diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1085649617_create_users_table.down.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1085649617_create_users_table.down.sql new file mode 100644 index 000000000..c99ddcdc8 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1085649617_create_users_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS users; diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1085649617_create_users_table.up.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1085649617_create_users_table.up.sql new file mode 100644 index 000000000..92897dcab --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1085649617_create_users_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE users ( + user_id integer unique, + name varchar(40), + email varchar(40) +); diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1185749658_add_city_to_users.down.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1185749658_add_city_to_users.down.sql new file mode 100644 index 000000000..940c60712 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1185749658_add_city_to_users.down.sql @@ -0,0 +1 @@ +ALTER TABLE users DROP COLUMN IF EXISTS city; diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1185749658_add_city_to_users.up.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1185749658_add_city_to_users.up.sql new file mode 100644 index 000000000..67823edc9 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1185749658_add_city_to_users.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE users ADD COLUMN city varchar(100); + + diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.down.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.down.sql new file mode 100644 index 000000000..3e87dd229 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.down.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS users_email_index; diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.up.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.up.sql new file mode 100644 index 000000000..fbeb4ab4e --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.up.sql @@ -0,0 +1,3 @@ +CREATE UNIQUE INDEX CONCURRENTLY users_email_index ON users (email); + +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1385949617_create_books_table.down.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1385949617_create_books_table.down.sql new file mode 100644 index 000000000..1a0b1a214 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1385949617_create_books_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS books; diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1385949617_create_books_table.up.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1385949617_create_books_table.up.sql new file mode 100644 index 000000000..f1503b518 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1385949617_create_books_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE books ( + user_id integer, + name varchar(40), + author varchar(40) +); diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1485949617_create_movies_table.down.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1485949617_create_movies_table.down.sql new file mode 100644 index 000000000..3a5187689 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1485949617_create_movies_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS movies; diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1485949617_create_movies_table.up.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1485949617_create_movies_table.up.sql new file mode 100644 index 000000000..f0ef5943b --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1485949617_create_movies_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE movies ( + user_id integer, + name varchar(40), + director varchar(40) +); diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1585849751_just_a_comment.up.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1585849751_just_a_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1585849751_just_a_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1685849751_another_comment.up.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1685849751_another_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1685849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1785849751_another_comment.up.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1785849751_another_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1785849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1885849751_another_comment.up.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1885849751_another_comment.up.sql new file mode 100644 index 000000000..9b6b57a61 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1885849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/source/github/github.go b/vendor/github.com/rdallman/migrate/source/github/github.go new file mode 100644 index 000000000..d534ed37b --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/github/github.go @@ -0,0 +1,180 @@ +package github + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + nurl "net/url" + "os" + "path" + "strings" + + "github.com/google/go-github/github" + "github.com/mattes/migrate/source" +) + +func init() { + source.Register("github", &Github{}) +} + +var ( + ErrNoUserInfo = fmt.Errorf("no username:token provided") + ErrNoAccessToken = fmt.Errorf("no access token") + ErrInvalidRepo = fmt.Errorf("invalid repo") + ErrInvalidGithubClient = fmt.Errorf("expected *github.Client") + ErrNoDir = fmt.Errorf("no directory") +) + +type Github struct { + client *github.Client + url string + + pathOwner string + pathRepo string + path string + migrations *source.Migrations +} + +type Config struct { +} + +func (g *Github) Open(url string) (source.Driver, error) { + u, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + if u.User == nil { + return nil, ErrNoUserInfo + } + + password, ok := u.User.Password() + if !ok { + return nil, ErrNoUserInfo + } + + tr := &github.BasicAuthTransport{ + Username: u.User.Username(), + Password: password, + } + + gn := &Github{ + client: github.NewClient(tr.Client()), + url: url, + migrations: source.NewMigrations(), + } + + // set owner, repo and path in repo + gn.pathOwner = u.Host + pe := strings.Split(strings.Trim(u.Path, "/"), "/") + if len(pe) < 1 { + return nil, ErrInvalidRepo + } + gn.pathRepo = pe[0] + if len(pe) > 1 { + gn.path = strings.Join(pe[1:], "/") + } + + if err := gn.readDirectory(); err != nil { + return nil, err + } + + return gn, nil +} + +func WithInstance(client *github.Client, config *Config) (source.Driver, error) { + gn := &Github{ + client: client, + migrations: source.NewMigrations(), + } + if err := gn.readDirectory(); err != nil { + return nil, err + } + return gn, nil +} + +func (g *Github) readDirectory() error { + fileContent, dirContents, _, err := g.client.Repositories.GetContents(context.Background(), g.pathOwner, g.pathRepo, g.path, &github.RepositoryContentGetOptions{}) + if err != nil { + return err + } + if fileContent != nil { + return ErrNoDir + } + + for _, fi := range dirContents { + m, err := source.DefaultParse(*fi.Name) + if err != nil { + continue // ignore files that we can't parse + } + if !g.migrations.Append(m) { + return fmt.Errorf("unable to parse file %v", *fi.Name) + } + } + + return nil +} + +func (g *Github) Close() error { + return nil +} + +func (g *Github) First() (version uint, er error) { + if v, ok := g.migrations.First(); !ok { + return 0, &os.PathError{"first", g.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (g *Github) Prev(version uint) (prevVersion uint, err error) { + if v, ok := g.migrations.Prev(version); !ok { + return 0, &os.PathError{fmt.Sprintf("prev for version %v", version), g.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (g *Github) Next(version uint) (nextVersion uint, err error) { + if v, ok := g.migrations.Next(version); !ok { + return 0, &os.PathError{fmt.Sprintf("next for version %v", version), g.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (g *Github) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := g.migrations.Up(version); ok { + file, _, _, err := g.client.Repositories.GetContents(context.Background(), g.pathOwner, g.pathRepo, path.Join(g.path, m.Raw), &github.RepositoryContentGetOptions{}) + if err != nil { + return nil, "", err + } + if file != nil { + r, err := file.GetContent() + if err != nil { + return nil, "", err + } + return ioutil.NopCloser(bytes.NewReader([]byte(r))), m.Identifier, nil + } + } + return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), g.path, os.ErrNotExist} +} + +func (g *Github) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := g.migrations.Down(version); ok { + file, _, _, err := g.client.Repositories.GetContents(context.Background(), g.pathOwner, g.pathRepo, path.Join(g.path, m.Raw), &github.RepositoryContentGetOptions{}) + if err != nil { + return nil, "", err + } + if file != nil { + r, err := file.GetContent() + if err != nil { + return nil, "", err + } + return ioutil.NopCloser(bytes.NewReader([]byte(r))), m.Identifier, nil + } + } + return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), g.path, os.ErrNotExist} +} diff --git a/vendor/github.com/rdallman/migrate/source/github/github_test.go b/vendor/github.com/rdallman/migrate/source/github/github_test.go new file mode 100644 index 000000000..83e86618e --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/github/github_test.go @@ -0,0 +1,32 @@ +package github + +import ( + "bytes" + "io/ioutil" + "testing" + + st "github.com/mattes/migrate/source/testing" +) + +var GithubTestSecret = "" // username:token + +func init() { + secrets, err := ioutil.ReadFile(".github_test_secrets") + if err == nil { + GithubTestSecret = string(bytes.TrimSpace(secrets)[:]) + } +} + +func Test(t *testing.T) { + if len(GithubTestSecret) == 0 { + t.Skip("test requires .github_test_secrets") + } + + g := &Github{} + d, err := g.Open("github://" + GithubTestSecret + "@mattes/migrate_test_tmp/test") + if err != nil { + t.Fatal(err) + } + + st.Test(t, d) +} diff --git a/vendor/github.com/rdallman/migrate/source/go-bindata/README.md b/vendor/github.com/rdallman/migrate/source/go-bindata/README.md new file mode 100644 index 000000000..cd9dd4b78 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/go-bindata/README.md @@ -0,0 +1,43 @@ +# go-bindata + +## Usage + + + +### Read bindata with NewWithSourceInstance + +```shell +go get -u github.com/jteeuwen/go-bindata/... +cd examples/migrations && go-bindata -pkg migrations . +``` + +```go +import ( + "github.com/mattes/migrate" + "github.com/mattes/migrate/source/go-bindata" + "github.com/mattes/migrate/source/go-bindata/examples/migrations" +) + +func main() { + // wrap assets into Resource + s := bindata.Resource(migrations.AssetNames(), + func(name string) ([]byte, error) { + return migrations.Asset(name) + }) + + d, err := bindata.WithInstance(s) + m, err := migrate.NewWithSourceInstance("go-bindata", d, "database://foobar") + m.Up() // run your migrations and handle the errors above of course +} +``` + +### Read bindata with URL (todo) + +This will restore the assets in a tmp directory and then +proxy to source/file. go-bindata must be in your `$PATH`. + +``` +migrate -source go-bindata://examples/migrations/bindata.go +``` + + diff --git a/vendor/github.com/rdallman/migrate/source/go-bindata/examples/migrations/bindata.go b/vendor/github.com/rdallman/migrate/source/go-bindata/examples/migrations/bindata.go new file mode 100644 index 000000000..282d5ef54 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/go-bindata/examples/migrations/bindata.go @@ -0,0 +1,304 @@ +// Code generated by go-bindata. +// sources: +// 1085649617_create_users_table.down.sql +// 1085649617_create_users_table.up.sql +// 1185749658_add_city_to_users.down.sql +// 1185749658_add_city_to_users.up.sql +// DO NOT EDIT! + +package testdata + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var __1085649617_create_users_tableDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\xf0\x74\x53\x70\x8d\xf0\x0c\x0e\x09\x56\x28\x2d\x4e\x2d\x2a\xb6\xe6\x02\x04\x00\x00\xff\xff\x2c\x02\x3d\xa7\x1c\x00\x00\x00") + +func _1085649617_create_users_tableDownSqlBytes() ([]byte, error) { + return bindataRead( + __1085649617_create_users_tableDownSql, + "1085649617_create_users_table.down.sql", + ) +} + +func _1085649617_create_users_tableDownSql() (*asset, error) { + bytes, err := _1085649617_create_users_tableDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1085649617_create_users_table.down.sql", size: 28, mode: os.FileMode(420), modTime: time.Unix(1485750305, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __1085649617_create_users_tableUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x72\x0e\x72\x75\x0c\x71\x55\x08\x71\x74\xf2\x71\x55\x28\x2d\x4e\x2d\x2a\x56\xd0\xe0\x52\x00\xb3\xe2\x33\x53\x14\x32\xf3\x4a\x52\xd3\x53\x8b\x14\x4a\xf3\x32\x0b\x4b\x53\x75\xb8\x14\x14\xf2\x12\x73\x53\x15\x14\x14\x14\xca\x12\x8b\x92\x33\x12\x8b\x34\x4c\x0c\x34\x41\xc2\xa9\xb9\x89\x99\x39\xa8\xc2\x5c\x9a\xd6\x5c\x80\x00\x00\x00\xff\xff\xa3\x57\xbc\x0b\x5f\x00\x00\x00") + +func _1085649617_create_users_tableUpSqlBytes() ([]byte, error) { + return bindataRead( + __1085649617_create_users_tableUpSql, + "1085649617_create_users_table.up.sql", + ) +} + +func _1085649617_create_users_tableUpSql() (*asset, error) { + bytes, err := _1085649617_create_users_tableUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1085649617_create_users_table.up.sql", size: 95, mode: os.FileMode(420), modTime: time.Unix(1485803085, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __1185749658_add_city_to_usersDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x2d\x4e\x2d\x2a\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x09\xf5\xf5\x53\xf0\x74\x53\x70\x8d\xf0\x0c\x0e\x09\x56\x48\xce\x2c\xa9\xb4\xe6\x02\x04\x00\x00\xff\xff\xb7\x52\x88\xd7\x2e\x00\x00\x00") + +func _1185749658_add_city_to_usersDownSqlBytes() ([]byte, error) { + return bindataRead( + __1185749658_add_city_to_usersDownSql, + "1185749658_add_city_to_users.down.sql", + ) +} + +func _1185749658_add_city_to_usersDownSql() (*asset, error) { + bytes, err := _1185749658_add_city_to_usersDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1185749658_add_city_to_users.down.sql", size: 46, mode: os.FileMode(420), modTime: time.Unix(1485750443, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __1185749658_add_city_to_usersUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x2d\x4e\x2d\x2a\x56\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x53\x48\xce\x2c\xa9\x54\x28\x4b\x2c\x4a\xce\x48\x2c\xd2\x30\x34\x30\xd0\xb4\xe6\xe2\xe2\x02\x04\x00\x00\xff\xff\xa8\x0f\x49\xc6\x32\x00\x00\x00") + +func _1185749658_add_city_to_usersUpSqlBytes() ([]byte, error) { + return bindataRead( + __1185749658_add_city_to_usersUpSql, + "1185749658_add_city_to_users.up.sql", + ) +} + +func _1185749658_add_city_to_usersUpSql() (*asset, error) { + bytes, err := _1185749658_add_city_to_usersUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1185749658_add_city_to_users.up.sql", size: 50, mode: os.FileMode(420), modTime: time.Unix(1485843733, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "1085649617_create_users_table.down.sql": _1085649617_create_users_tableDownSql, + "1085649617_create_users_table.up.sql": _1085649617_create_users_tableUpSql, + "1185749658_add_city_to_users.down.sql": _1185749658_add_city_to_usersDownSql, + "1185749658_add_city_to_users.up.sql": _1185749658_add_city_to_usersUpSql, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} +var _bintree = &bintree{nil, map[string]*bintree{ + "1085649617_create_users_table.down.sql": &bintree{_1085649617_create_users_tableDownSql, map[string]*bintree{}}, + "1085649617_create_users_table.up.sql": &bintree{_1085649617_create_users_tableUpSql, map[string]*bintree{}}, + "1185749658_add_city_to_users.down.sql": &bintree{_1185749658_add_city_to_usersDownSql, map[string]*bintree{}}, + "1185749658_add_city_to_users.up.sql": &bintree{_1185749658_add_city_to_usersUpSql, map[string]*bintree{}}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} + diff --git a/vendor/github.com/rdallman/migrate/source/go-bindata/go-bindata.go b/vendor/github.com/rdallman/migrate/source/go-bindata/go-bindata.go new file mode 100644 index 000000000..7426db71b --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/go-bindata/go-bindata.go @@ -0,0 +1,119 @@ +package bindata + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/mattes/migrate/source" +) + +type AssetFunc func(name string) ([]byte, error) + +func Resource(names []string, afn AssetFunc) *AssetSource { + return &AssetSource{ + Names: names, + AssetFunc: afn, + } +} + +type AssetSource struct { + Names []string + AssetFunc AssetFunc +} + +func init() { + source.Register("go-bindata", &Bindata{}) +} + +type Bindata struct { + path string + assetSource *AssetSource + migrations *source.Migrations +} + +func (b *Bindata) Open(url string) (source.Driver, error) { + return nil, fmt.Errorf("not yet implemented") +} + +var ( + ErrNoAssetSource = fmt.Errorf("expects *AssetSource") +) + +func WithInstance(instance interface{}) (source.Driver, error) { + if _, ok := instance.(*AssetSource); !ok { + return nil, ErrNoAssetSource + } + as := instance.(*AssetSource) + + bn := &Bindata{ + path: "", + assetSource: as, + migrations: source.NewMigrations(), + } + + for _, fi := range as.Names { + m, err := source.DefaultParse(fi) + if err != nil { + continue // ignore files that we can't parse + } + + if !bn.migrations.Append(m) { + return nil, fmt.Errorf("unable to parse file %v", fi) + } + } + + return bn, nil +} + +func (b *Bindata) Close() error { + return nil +} + +func (b *Bindata) First() (version uint, err error) { + if v, ok := b.migrations.First(); !ok { + return 0, &os.PathError{"first", b.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (b *Bindata) Prev(version uint) (prevVersion uint, err error) { + if v, ok := b.migrations.Prev(version); !ok { + return 0, &os.PathError{fmt.Sprintf("prev for version %v", version), b.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (b *Bindata) Next(version uint) (nextVersion uint, err error) { + if v, ok := b.migrations.Next(version); !ok { + return 0, &os.PathError{fmt.Sprintf("next for version %v", version), b.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (b *Bindata) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := b.migrations.Up(version); ok { + body, err := b.assetSource.AssetFunc(m.Raw) + if err != nil { + return nil, "", err + } + return ioutil.NopCloser(bytes.NewReader(body)), m.Identifier, nil + } + return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), b.path, os.ErrNotExist} +} + +func (b *Bindata) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := b.migrations.Down(version); ok { + body, err := b.assetSource.AssetFunc(m.Raw) + if err != nil { + return nil, "", err + } + return ioutil.NopCloser(bytes.NewReader(body)), m.Identifier, nil + } + return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), b.path, os.ErrNotExist} +} diff --git a/vendor/github.com/rdallman/migrate/source/go-bindata/go-bindata_test.go b/vendor/github.com/rdallman/migrate/source/go-bindata/go-bindata_test.go new file mode 100644 index 000000000..746a7b91f --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/go-bindata/go-bindata_test.go @@ -0,0 +1,43 @@ +package bindata + +import ( + "testing" + + "github.com/mattes/migrate/source/go-bindata/testdata" + st "github.com/mattes/migrate/source/testing" +) + +func Test(t *testing.T) { + // wrap assets into Resource first + s := Resource(testdata.AssetNames(), + func(name string) ([]byte, error) { + return testdata.Asset(name) + }) + + d, err := WithInstance(s) + if err != nil { + t.Fatal(err) + } + st.Test(t, d) +} + +func TestWithInstance(t *testing.T) { + // wrap assets into Resource + s := Resource(testdata.AssetNames(), + func(name string) ([]byte, error) { + return testdata.Asset(name) + }) + + _, err := WithInstance(s) + if err != nil { + t.Fatal(err) + } +} + +func TestOpen(t *testing.T) { + b := &Bindata{} + _, err := b.Open("") + if err == nil { + t.Fatal("expected err, because it's not implemented yet") + } +} diff --git a/vendor/github.com/rdallman/migrate/source/go-bindata/testdata/bindata.go b/vendor/github.com/rdallman/migrate/source/go-bindata/testdata/bindata.go new file mode 100644 index 000000000..304f3d87c --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/go-bindata/testdata/bindata.go @@ -0,0 +1,396 @@ +// Code generated by go-bindata. +// sources: +// 1_test.down.sql +// 1_test.up.sql +// 3_test.up.sql +// 4_test.down.sql +// 4_test.up.sql +// 5_test.down.sql +// 7_test.down.sql +// 7_test.up.sql +// DO NOT EDIT! + +package testdata + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var __1_testDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _1_testDownSqlBytes() ([]byte, error) { + return bindataRead( + __1_testDownSql, + "1_test.down.sql", + ) +} + +func _1_testDownSql() (*asset, error) { + bytes, err := _1_testDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1_test.down.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440324, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __1_testUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _1_testUpSqlBytes() ([]byte, error) { + return bindataRead( + __1_testUpSql, + "1_test.up.sql", + ) +} + +func _1_testUpSql() (*asset, error) { + bytes, err := _1_testUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1_test.up.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440319, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __3_testUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _3_testUpSqlBytes() ([]byte, error) { + return bindataRead( + __3_testUpSql, + "3_test.up.sql", + ) +} + +func _3_testUpSql() (*asset, error) { + bytes, err := _3_testUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "3_test.up.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440331, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __4_testDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _4_testDownSqlBytes() ([]byte, error) { + return bindataRead( + __4_testDownSql, + "4_test.down.sql", + ) +} + +func _4_testDownSql() (*asset, error) { + bytes, err := _4_testDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "4_test.down.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440337, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __4_testUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _4_testUpSqlBytes() ([]byte, error) { + return bindataRead( + __4_testUpSql, + "4_test.up.sql", + ) +} + +func _4_testUpSql() (*asset, error) { + bytes, err := _4_testUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "4_test.up.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440335, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __5_testDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _5_testDownSqlBytes() ([]byte, error) { + return bindataRead( + __5_testDownSql, + "5_test.down.sql", + ) +} + +func _5_testDownSql() (*asset, error) { + bytes, err := _5_testDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "5_test.down.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440340, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __7_testDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _7_testDownSqlBytes() ([]byte, error) { + return bindataRead( + __7_testDownSql, + "7_test.down.sql", + ) +} + +func _7_testDownSql() (*asset, error) { + bytes, err := _7_testDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "7_test.down.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440343, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __7_testUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _7_testUpSqlBytes() ([]byte, error) { + return bindataRead( + __7_testUpSql, + "7_test.up.sql", + ) +} + +func _7_testUpSql() (*asset, error) { + bytes, err := _7_testUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "7_test.up.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440347, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "1_test.down.sql": _1_testDownSql, + "1_test.up.sql": _1_testUpSql, + "3_test.up.sql": _3_testUpSql, + "4_test.down.sql": _4_testDownSql, + "4_test.up.sql": _4_testUpSql, + "5_test.down.sql": _5_testDownSql, + "7_test.down.sql": _7_testDownSql, + "7_test.up.sql": _7_testUpSql, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} +var _bintree = &bintree{nil, map[string]*bintree{ + "1_test.down.sql": &bintree{_1_testDownSql, map[string]*bintree{}}, + "1_test.up.sql": &bintree{_1_testUpSql, map[string]*bintree{}}, + "3_test.up.sql": &bintree{_3_testUpSql, map[string]*bintree{}}, + "4_test.down.sql": &bintree{_4_testDownSql, map[string]*bintree{}}, + "4_test.up.sql": &bintree{_4_testUpSql, map[string]*bintree{}}, + "5_test.down.sql": &bintree{_5_testDownSql, map[string]*bintree{}}, + "7_test.down.sql": &bintree{_7_testDownSql, map[string]*bintree{}}, + "7_test.up.sql": &bintree{_7_testUpSql, map[string]*bintree{}}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} + diff --git a/vendor/github.com/rdallman/migrate/source/google-cloud-storage/README.md b/vendor/github.com/rdallman/migrate/source/google-cloud-storage/README.md new file mode 100644 index 000000000..e61cb2311 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/google-cloud-storage/README.md @@ -0,0 +1,3 @@ +# google-cloud-storage + +`gcs:///` diff --git a/vendor/github.com/rdallman/migrate/source/google-cloud-storage/storage.go b/vendor/github.com/rdallman/migrate/source/google-cloud-storage/storage.go new file mode 100644 index 000000000..c1a18bc2f --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/google-cloud-storage/storage.go @@ -0,0 +1,119 @@ +package googlecloudstorage + +import ( + "fmt" + "io" + "net/url" + "os" + "path" + "strings" + + "cloud.google.com/go/storage" + "github.com/mattes/migrate/source" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func init() { + source.Register("gcs", &gcs{}) +} + +type gcs struct { + bucket *storage.BucketHandle + prefix string + migrations *source.Migrations +} + +func (g *gcs) Open(folder string) (source.Driver, error) { + u, err := url.Parse(folder) + if err != nil { + return nil, err + } + client, err := storage.NewClient(context.Background()) + if err != nil { + return nil, err + } + driver := gcs{ + bucket: client.Bucket(u.Host), + prefix: strings.Trim(u.Path, "/") + "/", + migrations: source.NewMigrations(), + } + err = driver.loadMigrations() + if err != nil { + return nil, err + } + return &driver, nil +} + +func (g *gcs) loadMigrations() error { + iter := g.bucket.Objects(context.Background(), &storage.Query{ + Prefix: g.prefix, + Delimiter: "/", + }) + object, err := iter.Next() + for ; err == nil; object, err = iter.Next() { + _, fileName := path.Split(object.Name) + m, parseErr := source.DefaultParse(fileName) + if parseErr != nil { + continue + } + if !g.migrations.Append(m) { + return fmt.Errorf("unable to parse file %v", object.Name) + } + } + if err != iterator.Done { + return err + } + return nil +} + +func (g *gcs) Close() error { + return nil +} + +func (g *gcs) First() (uint, error) { + v, ok := g.migrations.First() + if !ok { + return 0, os.ErrNotExist + } + return v, nil +} + +func (g *gcs) Prev(version uint) (uint, error) { + v, ok := g.migrations.Prev(version) + if !ok { + return 0, os.ErrNotExist + } + return v, nil +} + +func (g *gcs) Next(version uint) (uint, error) { + v, ok := g.migrations.Next(version) + if !ok { + return 0, os.ErrNotExist + } + return v, nil +} + +func (g *gcs) ReadUp(version uint) (io.ReadCloser, string, error) { + if m, ok := g.migrations.Up(version); ok { + return g.open(m) + } + return nil, "", os.ErrNotExist +} + +func (g *gcs) ReadDown(version uint) (io.ReadCloser, string, error) { + if m, ok := g.migrations.Down(version); ok { + return g.open(m) + } + return nil, "", os.ErrNotExist +} + +func (g *gcs) open(m *source.Migration) (io.ReadCloser, string, error) { + objectPath := path.Join(g.prefix, m.Raw) + reader, err := g.bucket.Object(objectPath).NewReader(context.Background()) + if err != nil { + return nil, "", err + } + return reader, m.Identifier, nil +} diff --git a/vendor/github.com/rdallman/migrate/source/google-cloud-storage/storage_test.go b/vendor/github.com/rdallman/migrate/source/google-cloud-storage/storage_test.go new file mode 100644 index 000000000..2af4947cc --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/google-cloud-storage/storage_test.go @@ -0,0 +1,37 @@ +package googlecloudstorage + +import ( + "testing" + + "github.com/fsouza/fake-gcs-server/fakestorage" + "github.com/mattes/migrate/source" + st "github.com/mattes/migrate/source/testing" +) + +func Test(t *testing.T) { + server := fakestorage.NewServer([]fakestorage.Object{ + {BucketName: "some-bucket", Name: "staging/migrations/1_foobar.up.sql", Content: []byte("1 up")}, + {BucketName: "some-bucket", Name: "staging/migrations/1_foobar.down.sql", Content: []byte("1 down")}, + {BucketName: "some-bucket", Name: "prod/migrations/1_foobar.up.sql", Content: []byte("1 up")}, + {BucketName: "some-bucket", Name: "prod/migrations/1_foobar.down.sql", Content: []byte("1 down")}, + {BucketName: "some-bucket", Name: "prod/migrations/3_foobar.up.sql", Content: []byte("3 up")}, + {BucketName: "some-bucket", Name: "prod/migrations/4_foobar.up.sql", Content: []byte("4 up")}, + {BucketName: "some-bucket", Name: "prod/migrations/4_foobar.down.sql", Content: []byte("4 down")}, + {BucketName: "some-bucket", Name: "prod/migrations/5_foobar.down.sql", Content: []byte("5 down")}, + {BucketName: "some-bucket", Name: "prod/migrations/7_foobar.up.sql", Content: []byte("7 up")}, + {BucketName: "some-bucket", Name: "prod/migrations/7_foobar.down.sql", Content: []byte("7 down")}, + {BucketName: "some-bucket", Name: "prod/migrations/not-a-migration.txt"}, + {BucketName: "some-bucket", Name: "prod/migrations/0-random-stuff/whatever.txt"}, + }) + defer server.Stop() + driver := gcs{ + bucket: server.Client().Bucket("some-bucket"), + prefix: "prod/migrations/", + migrations: source.NewMigrations(), + } + err := driver.loadMigrations() + if err != nil { + t.Fatal(err) + } + st.Test(t, &driver) +} diff --git a/vendor/github.com/rdallman/migrate/source/migration.go b/vendor/github.com/rdallman/migrate/source/migration.go new file mode 100644 index 000000000..97a4ee226 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/migration.go @@ -0,0 +1,143 @@ +package source + +import ( + "sort" +) + +// Direction is either up or down. +type Direction string + +const ( + Down Direction = "down" + Up = "up" +) + +// Migration is a helper struct for source drivers that need to +// build the full directory tree in memory. +// Migration is fully independent from migrate.Migration. +type Migration struct { + // Version is the version of this migration. + Version uint + + // Identifier can be any string that helps identifying + // this migration in the source. + Identifier string + + // Direction is either Up or Down. + Direction Direction + + // Raw holds the raw location path to this migration in source. + // ReadUp and ReadDown will use this. + Raw string +} + +// Migrations wraps Migration and has an internal index +// to keep track of Migration order. +type Migrations struct { + index uintSlice + migrations map[uint]map[Direction]*Migration +} + +func NewMigrations() *Migrations { + return &Migrations{ + index: make(uintSlice, 0), + migrations: make(map[uint]map[Direction]*Migration), + } +} + +func (i *Migrations) Append(m *Migration) (ok bool) { + if m == nil { + return false + } + + if i.migrations[m.Version] == nil { + i.migrations[m.Version] = make(map[Direction]*Migration) + } + + // reject duplicate versions + if _, dup := i.migrations[m.Version][m.Direction]; dup { + return false + } + + i.migrations[m.Version][m.Direction] = m + i.buildIndex() + + return true +} + +func (i *Migrations) buildIndex() { + i.index = make(uintSlice, 0) + for version, _ := range i.migrations { + i.index = append(i.index, version) + } + sort.Sort(i.index) +} + +func (i *Migrations) First() (version uint, ok bool) { + if len(i.index) == 0 { + return 0, false + } + return i.index[0], true +} + +func (i *Migrations) Prev(version uint) (prevVersion uint, ok bool) { + pos := i.findPos(version) + if pos >= 1 && len(i.index) > pos-1 { + return i.index[pos-1], true + } + return 0, false +} + +func (i *Migrations) Next(version uint) (nextVersion uint, ok bool) { + pos := i.findPos(version) + if pos >= 0 && len(i.index) > pos+1 { + return i.index[pos+1], true + } + return 0, false +} + +func (i *Migrations) Up(version uint) (m *Migration, ok bool) { + if _, ok := i.migrations[version]; ok { + if mx, ok := i.migrations[version][Up]; ok { + return mx, true + } + } + return nil, false +} + +func (i *Migrations) Down(version uint) (m *Migration, ok bool) { + if _, ok := i.migrations[version]; ok { + if mx, ok := i.migrations[version][Down]; ok { + return mx, true + } + } + return nil, false +} + +func (i *Migrations) findPos(version uint) int { + if len(i.index) > 0 { + ix := i.index.Search(version) + if ix < len(i.index) && i.index[ix] == version { + return ix + } + } + return -1 +} + +type uintSlice []uint + +func (s uintSlice) Len() int { + return len(s) +} + +func (s uintSlice) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s uintSlice) Less(i, j int) bool { + return s[i] < s[j] +} + +func (s uintSlice) Search(x uint) int { + return sort.Search(len(s), func(i int) bool { return s[i] >= x }) +} diff --git a/vendor/github.com/rdallman/migrate/source/migration_test.go b/vendor/github.com/rdallman/migrate/source/migration_test.go new file mode 100644 index 000000000..857cd26af --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/migration_test.go @@ -0,0 +1,46 @@ +package source + +import ( + "testing" +) + +func TestNewMigrations(t *testing.T) { + // TODO +} + +func TestAppend(t *testing.T) { + // TODO +} + +func TestBuildIndex(t *testing.T) { + // TODO +} + +func TestFirst(t *testing.T) { + // TODO +} + +func TestPrev(t *testing.T) { + // TODO +} + +func TestUp(t *testing.T) { + // TODO +} + +func TestDown(t *testing.T) { + // TODO +} + +func TestFindPos(t *testing.T) { + m := Migrations{index: uintSlice{1, 2, 3}} + if p := m.findPos(0); p != -1 { + t.Errorf("expected -1, got %v", p) + } + if p := m.findPos(1); p != 0 { + t.Errorf("expected 0, got %v", p) + } + if p := m.findPos(3); p != 2 { + t.Errorf("expected 2, got %v", p) + } +} diff --git a/vendor/github.com/rdallman/migrate/source/parse.go b/vendor/github.com/rdallman/migrate/source/parse.go new file mode 100644 index 000000000..2f888fe75 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/parse.go @@ -0,0 +1,39 @@ +package source + +import ( + "fmt" + "regexp" + "strconv" +) + +var ( + ErrParse = fmt.Errorf("no match") +) + +var ( + DefaultParse = Parse + DefaultRegex = Regex +) + +// Regex matches the following pattern: +// 123_name.up.ext +// 123_name.down.ext +var Regex = regexp.MustCompile(`^([0-9]+)_(.*)\.(` + string(Down) + `|` + string(Up) + `)\.(.*)$`) + +// Parse returns Migration for matching Regex pattern. +func Parse(raw string) (*Migration, error) { + m := Regex.FindStringSubmatch(raw) + if len(m) == 5 { + versionUint64, err := strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, err + } + return &Migration{ + Version: uint(versionUint64), + Identifier: m[2], + Direction: Direction(m[3]), + Raw: raw, + }, nil + } + return nil, ErrParse +} diff --git a/vendor/github.com/rdallman/migrate/source/parse_test.go b/vendor/github.com/rdallman/migrate/source/parse_test.go new file mode 100644 index 000000000..d06356cc8 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/parse_test.go @@ -0,0 +1,106 @@ +package source + +import ( + "testing" +) + +func TestParse(t *testing.T) { + tt := []struct { + name string + expectErr error + expectMigration *Migration + }{ + { + name: "1_foobar.up.sql", + expectErr: nil, + expectMigration: &Migration{ + Version: 1, + Identifier: "foobar", + Direction: Up, + Raw: "1_foobar.up.sql", + }, + }, + { + name: "1_foobar.down.sql", + expectErr: nil, + expectMigration: &Migration{ + Version: 1, + Identifier: "foobar", + Direction: Down, + Raw: "1_foobar.down.sql", + }, + }, + { + name: "1_f-o_ob+ar.up.sql", + expectErr: nil, + expectMigration: &Migration{ + Version: 1, + Identifier: "f-o_ob+ar", + Direction: Up, + Raw: "1_f-o_ob+ar.up.sql", + }, + }, + { + name: "1485385885_foobar.up.sql", + expectErr: nil, + expectMigration: &Migration{ + Version: 1485385885, + Identifier: "foobar", + Direction: Up, + Raw: "1485385885_foobar.up.sql", + }, + }, + { + name: "20170412214116_date_foobar.up.sql", + expectErr: nil, + expectMigration: &Migration{ + Version: 20170412214116, + Identifier: "date_foobar", + Direction: Up, + Raw: "20170412214116_date_foobar.up.sql", + }, + }, + { + name: "-1_foobar.up.sql", + expectErr: ErrParse, + expectMigration: nil, + }, + { + name: "foobar.up.sql", + expectErr: ErrParse, + expectMigration: nil, + }, + { + name: "1.up.sql", + expectErr: ErrParse, + expectMigration: nil, + }, + { + name: "1_foobar.sql", + expectErr: ErrParse, + expectMigration: nil, + }, + { + name: "1_foobar.up", + expectErr: ErrParse, + expectMigration: nil, + }, + { + name: "1_foobar.down", + expectErr: ErrParse, + expectMigration: nil, + }, + } + + for i, v := range tt { + f, err := Parse(v.name) + + if err != v.expectErr { + t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) + } + + if v.expectMigration != nil && *f != *v.expectMigration { + t.Errorf("expected %+v, got %+v, in %v", *v.expectMigration, *f, i) + } + } +} diff --git a/vendor/github.com/rdallman/migrate/source/stub/stub.go b/vendor/github.com/rdallman/migrate/source/stub/stub.go new file mode 100644 index 000000000..0f4153c54 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/stub/stub.go @@ -0,0 +1,85 @@ +package stub + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/mattes/migrate/source" +) + +func init() { + source.Register("stub", &Stub{}) +} + +type Config struct{} + +// d, _ := source.Open("stub://") +// d.(*stub.Stub).Migrations = + +type Stub struct { + Url string + Instance interface{} + Migrations *source.Migrations + Config *Config +} + +func (s *Stub) Open(url string) (source.Driver, error) { + return &Stub{ + Url: url, + Migrations: source.NewMigrations(), + Config: &Config{}, + }, nil +} + +func WithInstance(instance interface{}, config *Config) (source.Driver, error) { + return &Stub{ + Instance: instance, + Migrations: source.NewMigrations(), + Config: config, + }, nil +} + +func (s *Stub) Close() error { + return nil +} + +func (s *Stub) First() (version uint, err error) { + if v, ok := s.Migrations.First(); !ok { + return 0, &os.PathError{"first", s.Url, os.ErrNotExist} // TODO: s.Url can be empty when called with WithInstance + } else { + return v, nil + } +} + +func (s *Stub) Prev(version uint) (prevVersion uint, err error) { + if v, ok := s.Migrations.Prev(version); !ok { + return 0, &os.PathError{fmt.Sprintf("prev for version %v", version), s.Url, os.ErrNotExist} + } else { + return v, nil + } +} + +func (s *Stub) Next(version uint) (nextVersion uint, err error) { + if v, ok := s.Migrations.Next(version); !ok { + return 0, &os.PathError{fmt.Sprintf("next for version %v", version), s.Url, os.ErrNotExist} + } else { + return v, nil + } +} + +func (s *Stub) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := s.Migrations.Up(version); ok { + return ioutil.NopCloser(bytes.NewBufferString(m.Identifier)), fmt.Sprintf("%v.up.stub", version), nil + } + return nil, "", &os.PathError{fmt.Sprintf("read up version %v", version), s.Url, os.ErrNotExist} +} + +func (s *Stub) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := s.Migrations.Down(version); ok { + return ioutil.NopCloser(bytes.NewBufferString(m.Identifier)), fmt.Sprintf("%v.down.stub", version), nil + } + return nil, "", &os.PathError{fmt.Sprintf("read down version %v", version), s.Url, os.ErrNotExist} +} diff --git a/vendor/github.com/rdallman/migrate/source/stub/stub_test.go b/vendor/github.com/rdallman/migrate/source/stub/stub_test.go new file mode 100644 index 000000000..05ce819d7 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/stub/stub_test.go @@ -0,0 +1,30 @@ +package stub + +import ( + "testing" + + "github.com/mattes/migrate/source" + st "github.com/mattes/migrate/source/testing" +) + +func Test(t *testing.T) { + s := &Stub{} + d, err := s.Open("") + if err != nil { + t.Fatal(err) + } + + m := source.NewMigrations() + m.Append(&source.Migration{Version: 1, Direction: source.Up}) + m.Append(&source.Migration{Version: 1, Direction: source.Down}) + m.Append(&source.Migration{Version: 3, Direction: source.Up}) + m.Append(&source.Migration{Version: 4, Direction: source.Up}) + m.Append(&source.Migration{Version: 4, Direction: source.Down}) + m.Append(&source.Migration{Version: 5, Direction: source.Down}) + m.Append(&source.Migration{Version: 7, Direction: source.Up}) + m.Append(&source.Migration{Version: 7, Direction: source.Down}) + + d.(*Stub).Migrations = m + + st.Test(t, d) +} diff --git a/vendor/github.com/rdallman/migrate/source/testing/testing.go b/vendor/github.com/rdallman/migrate/source/testing/testing.go new file mode 100644 index 000000000..3cc003c59 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/source/testing/testing.go @@ -0,0 +1,169 @@ +// Package testing has the source tests. +// All source drivers must pass the Test function. +// This lives in it's own package so it stays a test dependency. +package testing + +import ( + "os" + "testing" + + "github.com/mattes/migrate/source" +) + +// Test runs tests against source implementations. +// It assumes that the driver tests has access to the following migrations: +// +// u = up migration, d = down migration, n = version +// | 1 | - | 3 | 4 | 5 | - | 7 | +// | u d | - | u | u d | d | - | u d | +// +// See source/stub/stub_test.go or source/file/file_test.go for an example. +func Test(t *testing.T, d source.Driver) { + TestFirst(t, d) + TestPrev(t, d) + TestNext(t, d) + TestReadUp(t, d) + TestReadDown(t, d) +} + +func TestFirst(t *testing.T, d source.Driver) { + version, err := d.First() + if err != nil { + t.Fatalf("First: expected err to be nil, got %v", err) + } + if version != 1 { + t.Errorf("First: expected 1, got %v", version) + } +} + +func TestPrev(t *testing.T, d source.Driver) { + tt := []struct { + version uint + expectErr error + expectPrevVersion uint + }{ + {version: 0, expectErr: os.ErrNotExist}, + {version: 1, expectErr: os.ErrNotExist}, + {version: 2, expectErr: os.ErrNotExist}, + {version: 3, expectErr: nil, expectPrevVersion: 1}, + {version: 4, expectErr: nil, expectPrevVersion: 3}, + {version: 5, expectErr: nil, expectPrevVersion: 4}, + {version: 6, expectErr: os.ErrNotExist}, + {version: 7, expectErr: nil, expectPrevVersion: 5}, + {version: 8, expectErr: os.ErrNotExist}, + {version: 9, expectErr: os.ErrNotExist}, + } + + for i, v := range tt { + pv, err := d.Prev(v.version) + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) && v.expectErr != err { + t.Errorf("Prev: expected %v, got %v, in %v", v.expectErr, err, i) + } + if err == nil && v.expectPrevVersion != pv { + t.Errorf("Prev: expected %v, got %v, in %v", v.expectPrevVersion, pv, i) + } + } +} + +func TestNext(t *testing.T, d source.Driver) { + tt := []struct { + version uint + expectErr error + expectNextVersion uint + }{ + {version: 0, expectErr: os.ErrNotExist}, + {version: 1, expectErr: nil, expectNextVersion: 3}, + {version: 2, expectErr: os.ErrNotExist}, + {version: 3, expectErr: nil, expectNextVersion: 4}, + {version: 4, expectErr: nil, expectNextVersion: 5}, + {version: 5, expectErr: nil, expectNextVersion: 7}, + {version: 6, expectErr: os.ErrNotExist}, + {version: 7, expectErr: os.ErrNotExist}, + {version: 8, expectErr: os.ErrNotExist}, + {version: 9, expectErr: os.ErrNotExist}, + } + + for i, v := range tt { + nv, err := d.Next(v.version) + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) && v.expectErr != err { + t.Errorf("Next: expected %v, got %v, in %v", v.expectErr, err, i) + } + if err == nil && v.expectNextVersion != nv { + t.Errorf("Next: expected %v, got %v, in %v", v.expectNextVersion, nv, i) + } + } +} + +func TestReadUp(t *testing.T, d source.Driver) { + tt := []struct { + version uint + expectErr error + expectUp bool + }{ + {version: 0, expectErr: os.ErrNotExist}, + {version: 1, expectErr: nil, expectUp: true}, + {version: 2, expectErr: os.ErrNotExist}, + {version: 3, expectErr: nil, expectUp: true}, + {version: 4, expectErr: nil, expectUp: true}, + {version: 5, expectErr: os.ErrNotExist}, + {version: 6, expectErr: os.ErrNotExist}, + {version: 7, expectErr: nil, expectUp: true}, + {version: 8, expectErr: os.ErrNotExist}, + } + + for i, v := range tt { + up, identifier, err := d.ReadUp(v.version) + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || + (v.expectErr != os.ErrNotExist && err != v.expectErr) { + t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) + + } else if err == nil { + if len(identifier) == 0 { + t.Errorf("expected identifier not to be empty, in %v", i) + } + + if v.expectUp == true && up == nil { + t.Errorf("expected up not to be nil, in %v", i) + } else if v.expectUp == false && up != nil { + t.Errorf("expected up to be nil, got %v, in %v", up, i) + } + } + } +} + +func TestReadDown(t *testing.T, d source.Driver) { + tt := []struct { + version uint + expectErr error + expectDown bool + }{ + {version: 0, expectErr: os.ErrNotExist}, + {version: 1, expectErr: nil, expectDown: true}, + {version: 2, expectErr: os.ErrNotExist}, + {version: 3, expectErr: os.ErrNotExist}, + {version: 4, expectErr: nil, expectDown: true}, + {version: 5, expectErr: nil, expectDown: true}, + {version: 6, expectErr: os.ErrNotExist}, + {version: 7, expectErr: nil, expectDown: true}, + {version: 8, expectErr: os.ErrNotExist}, + } + + for i, v := range tt { + down, identifier, err := d.ReadDown(v.version) + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || + (v.expectErr != os.ErrNotExist && err != v.expectErr) { + t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) + + } else if err == nil { + if len(identifier) == 0 { + t.Errorf("expected identifier not to be empty, in %v", i) + } + + if v.expectDown == true && down == nil { + t.Errorf("expected down not to be nil, in %v", i) + } else if v.expectDown == false && down != nil { + t.Errorf("expected down to be nil, got %v, in %v", down, i) + } + } + } +} diff --git a/vendor/github.com/rdallman/migrate/testing/docker.go b/vendor/github.com/rdallman/migrate/testing/docker.go new file mode 100644 index 000000000..f7a7c4152 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/testing/docker.go @@ -0,0 +1,254 @@ +// Package testing is used in driver tests. +package testing + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "io" + "math/rand" + "strconv" + "strings" + "testing" + "time" + dockertypes "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" + dockernetwork "github.com/docker/docker/api/types/network" + dockerclient "github.com/docker/docker/client" +) + +func NewDockerContainer(t testing.TB, image string, env []string, cmd []string) (*DockerContainer, error) { + c, err := dockerclient.NewEnvClient() + if err != nil { + return nil, err + } + + if cmd == nil { + cmd = make([]string, 0) + } + + contr := &DockerContainer{ + t: t, + client: c, + ImageName: image, + ENV: env, + Cmd: cmd, + } + + if err := contr.PullImage(); err != nil { + return nil, err + } + + if err := contr.Start(); err != nil { + return nil, err + } + + return contr, nil +} + +// DockerContainer implements Instance interface +type DockerContainer struct { + t testing.TB + client *dockerclient.Client + ImageName string + ENV []string + Cmd []string + ContainerId string + ContainerName string + ContainerJSON dockertypes.ContainerJSON + containerInspected bool + keepForDebugging bool +} + +func (d *DockerContainer) PullImage() error { + d.t.Logf("Docker: Pull image %v", d.ImageName) + r, err := d.client.ImagePull(context.Background(), d.ImageName, dockertypes.ImagePullOptions{}) + if err != nil { + return err + } + defer r.Close() + + // read output and log relevant lines + bf := bufio.NewScanner(r) + for bf.Scan() { + var resp dockerImagePullOutput + if err := json.Unmarshal(bf.Bytes(), &resp); err != nil { + return err + } + if strings.HasPrefix(resp.Status, "Status: ") { + d.t.Logf("Docker: %v", resp.Status) + } + } + return bf.Err() +} + +func (d *DockerContainer) Start() error { + containerName := fmt.Sprintf("migrate_test_%v", pseudoRandStr(10)) + + // create container first + resp, err := d.client.ContainerCreate(context.Background(), + &dockercontainer.Config{ + Image: d.ImageName, + Labels: map[string]string{"migrate_test": "true"}, + Env: d.ENV, + Cmd: d.Cmd, + }, + &dockercontainer.HostConfig{ + PublishAllPorts: true, + }, + &dockernetwork.NetworkingConfig{}, + containerName) + if err != nil { + return err + } + + d.ContainerId = resp.ID + d.ContainerName = containerName + + // then start it + if err := d.client.ContainerStart(context.Background(), resp.ID, dockertypes.ContainerStartOptions{}); err != nil { + return err + } + + d.t.Logf("Docker: Started container %v (%v) for image %v listening at %v:%v", resp.ID[0:12], containerName, d.ImageName, d.Host(), d.Port()) + for _, v := range resp.Warnings { + d.t.Logf("Docker: Warning: %v", v) + } + return nil +} + +func (d *DockerContainer) KeepForDebugging() { + d.keepForDebugging = true +} + +func (d *DockerContainer) Remove() error { + if d.keepForDebugging { + return nil + } + + if len(d.ContainerId) == 0 { + return fmt.Errorf("missing containerId") + } + if err := d.client.ContainerRemove(context.Background(), d.ContainerId, + dockertypes.ContainerRemoveOptions{ + Force: true, + }); err != nil { + d.t.Log(err) + return err + } + d.t.Logf("Docker: Removed %v", d.ContainerName) + return nil +} + +func (d *DockerContainer) Inspect() error { + if len(d.ContainerId) == 0 { + return fmt.Errorf("missing containerId") + } + resp, err := d.client.ContainerInspect(context.Background(), d.ContainerId) + if err != nil { + return err + } + + d.ContainerJSON = resp + d.containerInspected = true + return nil +} + +func (d *DockerContainer) Logs() (io.ReadCloser, error) { + if len(d.ContainerId) == 0 { + return nil, fmt.Errorf("missing containerId") + } + + return d.client.ContainerLogs(context.Background(), d.ContainerId, dockertypes.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + }) +} + +func (d *DockerContainer) portMapping(selectFirst bool, cPort int) (containerPort uint, hostIP string, hostPort uint, err error) { + if !d.containerInspected { + if err := d.Inspect(); err != nil { + d.t.Fatal(err) + } + } + + for port, bindings := range d.ContainerJSON.NetworkSettings.Ports { + if !selectFirst && port.Int() != cPort { + // Skip ahead until we find the port we want + continue + } + for _, binding := range bindings { + + hostPortUint, err := strconv.ParseUint(binding.HostPort, 10, 64) + if err != nil { + return 0, "", 0, err + } + + return uint(port.Int()), binding.HostIP, uint(hostPortUint), nil + } + } + + if selectFirst { + return 0, "", 0, fmt.Errorf("no port binding") + } else { + return 0, "", 0, fmt.Errorf("specified port not bound") + } +} + +func (d *DockerContainer) Host() string { + _, hostIP, _, err := d.portMapping(true, -1) + if err != nil { + d.t.Fatal(err) + } + + if hostIP == "0.0.0.0" { + return "127.0.0.1" + } else { + return hostIP + } +} + +func (d *DockerContainer) Port() uint { + _, _, port, err := d.portMapping(true, -1) + if err != nil { + d.t.Fatal(err) + } + return port +} + +func (d *DockerContainer) PortFor(cPort int) uint { + _, _, port, err := d.portMapping(false, cPort) + if err != nil { + d.t.Fatal(err) + } + return port +} + +func (d *DockerContainer) NetworkSettings() dockertypes.NetworkSettings { + netSettings := d.ContainerJSON.NetworkSettings + return *netSettings +} + +type dockerImagePullOutput struct { + Status string `json:"status"` + ProgressDetails struct { + Current int `json:"current"` + Total int `json:"total"` + } `json:"progressDetail"` + Id string `json:"id"` + Progress string `json:"progress"` +} + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +func pseudoRandStr(n int) string { + var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz0123456789") + b := make([]rune, n) + for i := range b { + b[i] = letterRunes[rand.Intn(len(letterRunes))] + } + return string(b) +} diff --git a/vendor/github.com/rdallman/migrate/testing/testing.go b/vendor/github.com/rdallman/migrate/testing/testing.go new file mode 100644 index 000000000..64e0a6465 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/testing/testing.go @@ -0,0 +1,96 @@ +package testing + +import ( + "io/ioutil" + "os" + "strconv" + "testing" + "time" + + dockertypes "github.com/docker/docker/api/types" +) + +type IsReadyFunc func(Instance) bool + +type TestFunc func(*testing.T, Instance) + +type Version struct { + Image string + ENV []string + Cmd []string +} + +func ParallelTest(t *testing.T, versions []Version, readyFn IsReadyFunc, testFn TestFunc) { + delay, err := strconv.Atoi(os.Getenv("MIGRATE_TEST_CONTAINER_BOOT_DELAY")) + if err != nil { + delay = 0 + } + + for i, version := range versions { + version := version // capture range variable, see https://goo.gl/60w3p2 + + // Only test against one version in short mode + // TODO: order is random, maybe always pick first version instead? + if i > 0 && testing.Short() { + t.Logf("Skipping %v in short mode", version) + + } else { + t.Run(version.Image, func(t *testing.T) { + t.Parallel() + + // create new container + container, err := NewDockerContainer(t, version.Image, version.ENV, version.Cmd) + if err != nil { + t.Fatalf("%v\n%s", err, containerLogs(t, container)) + } + + // make sure to remove container once done + defer container.Remove() + + // wait until database is ready + tick := time.Tick(1000 * time.Millisecond) + timeout := time.After(time.Duration(delay + 60) * time.Second) + outer: + for { + select { + case <-tick: + if readyFn(container) { + break outer + } + + case <-timeout: + t.Fatalf("Docker: Container not ready, timeout for %v.\n%s", version, containerLogs(t, container)) + } + } + + time.Sleep(time.Duration(int64(delay)) * time.Second) + + // we can now run the tests + testFn(t, container) + }) + } + } +} + +func containerLogs(t *testing.T, c *DockerContainer) []byte { + r, err := c.Logs() + if err != nil { + t.Error("%v", err) + return nil + } + defer r.Close() + b, err := ioutil.ReadAll(r) + if err != nil { + t.Error("%v", err) + return nil + } + return b +} + +type Instance interface { + Host() string + Port() uint + PortFor(int) uint + NetworkSettings() dockertypes.NetworkSettings + KeepForDebugging() +} diff --git a/vendor/github.com/rdallman/migrate/testing/testing_test.go b/vendor/github.com/rdallman/migrate/testing/testing_test.go new file mode 100644 index 000000000..8217decfa --- /dev/null +++ b/vendor/github.com/rdallman/migrate/testing/testing_test.go @@ -0,0 +1,20 @@ +package testing + +import ( + "testing" +) + +func ExampleParallelTest(t *testing.T) { + var isReady = func(i Instance) bool { + // Return true if Instance is ready to run tests. + // Don't block here though. + return true + } + + // t is *testing.T coming from parent Test(t *testing.T) + ParallelTest(t, []Version{{Image: "docker_image:9.6"}}, isReady, + func(t *testing.T, i Instance) { + // Run your test/s ... + t.Fatal("...") + }) +} diff --git a/vendor/github.com/rdallman/migrate/util.go b/vendor/github.com/rdallman/migrate/util.go new file mode 100644 index 000000000..67048ea5c --- /dev/null +++ b/vendor/github.com/rdallman/migrate/util.go @@ -0,0 +1,105 @@ +package migrate + +import ( + "bufio" + "fmt" + "io" + nurl "net/url" + "strings" + "time" +) + +// MultiError holds multiple errors. +type MultiError struct { + Errs []error +} + +// NewMultiError returns an error type holding multiple errors. +func NewMultiError(errs ...error) MultiError { + compactErrs := make([]error, 0) + for _, e := range errs { + if e != nil { + compactErrs = append(compactErrs, e) + } + } + return MultiError{compactErrs} +} + +// Error implements error. Mulitple errors are concatenated with 'and's. +func (m MultiError) Error() string { + var strs = make([]string, 0) + for _, e := range m.Errs { + if len(e.Error()) > 0 { + strs = append(strs, e.Error()) + } + } + return strings.Join(strs, " and ") +} + +// suint safely converts int to uint +// see https://goo.gl/wEcqof +// see https://goo.gl/pai7Dr +func suint(n int) uint { + if n < 0 { + panic(fmt.Sprintf("suint(%v) expects input >= 0", n)) + } + return uint(n) +} + +// newSlowReader turns an io.ReadCloser into a slow io.ReadCloser. +// Use this to simulate a slow internet connection. +func newSlowReader(r io.ReadCloser) io.ReadCloser { + return &slowReader{ + rx: r, + reader: bufio.NewReader(r), + } +} + +type slowReader struct { + rx io.ReadCloser + reader *bufio.Reader +} + +func (b *slowReader) Read(p []byte) (n int, err error) { + time.Sleep(10 * time.Millisecond) + c, err := b.reader.ReadByte() + if err != nil { + return 0, err + } else { + copy(p, []byte{c}) + return 1, nil + } +} + +func (b *slowReader) Close() error { + return b.rx.Close() +} + +var errNoScheme = fmt.Errorf("no scheme") + +// schemeFromUrl returns the scheme from a URL string +func schemeFromUrl(url string) (string, error) { + u, err := nurl.Parse(url) + if err != nil { + return "", err + } + + if len(u.Scheme) == 0 { + return "", errNoScheme + } + + return u.Scheme, nil +} + +// FilterCustomQuery filters all query values starting with `x-` +func FilterCustomQuery(u *nurl.URL) *nurl.URL { + ux := *u + vx := make(nurl.Values) + for k, v := range ux.Query() { + if len(k) <= 1 || (len(k) > 1 && k[0:2] != "x-") { + vx[k] = v + } + } + ux.RawQuery = vx.Encode() + return &ux +} diff --git a/vendor/github.com/rdallman/migrate/util_test.go b/vendor/github.com/rdallman/migrate/util_test.go new file mode 100644 index 000000000..1ad234473 --- /dev/null +++ b/vendor/github.com/rdallman/migrate/util_test.go @@ -0,0 +1,32 @@ +package migrate + +import ( + nurl "net/url" + "testing" +) + +func TestSuintPanicsWithNegativeInput(t *testing.T) { + defer func() { + if r := recover(); r == nil { + t.Fatal("expected suint to panic for -1") + } + }() + suint(-1) +} + +func TestSuint(t *testing.T) { + if u := suint(0); u != 0 { + t.Fatalf("expected 0, got %v", u) + } +} + +func TestFilterCustomQuery(t *testing.T) { + n, err := nurl.Parse("foo://host?a=b&x-custom=foo&c=d") + if err != nil { + t.Fatal(err) + } + nx := FilterCustomQuery(n).Query() + if nx.Get("x-custom") != "" { + t.Fatalf("didn't expect x-custom") + } +} diff --git a/vendor/golang.org/x/crypto/acme/acme.go b/vendor/golang.org/x/crypto/acme/acme.go index e8388b083..fa9c4b39e 100644 --- a/vendor/golang.org/x/crypto/acme/acme.go +++ b/vendor/golang.org/x/crypto/acme/acme.go @@ -142,7 +142,7 @@ func (c *Client) Discover(ctx context.Context) (Directory, error) { // // In the case where CA server does not provide the issued certificate in the response, // CreateCert will poll certURL using c.FetchCert, which will result in additional round-trips. -// In such scenario the caller can cancel the polling with ctx. +// In such a scenario, the caller can cancel the polling with ctx. // // CreateCert returns an error if the CA's response or chain was unreasonably large. // Callers are encouraged to parse the returned value to ensure the certificate is valid and has the expected features. @@ -257,7 +257,7 @@ func (c *Client) RevokeCert(ctx context.Context, key crypto.Signer, cert []byte, func AcceptTOS(tosURL string) bool { return true } // Register creates a new account registration by following the "new-reg" flow. -// It returns registered account. The account is not modified. +// It returns the registered account. The account is not modified. // // The registration may require the caller to agree to the CA's Terms of Service (TOS). // If so, and the account has not indicated the acceptance of the terms (see Account for details), @@ -995,6 +995,7 @@ func keyAuth(pub crypto.PublicKey, token string) (string, error) { // tlsChallengeCert creates a temporary certificate for TLS-SNI challenges // with the given SANs and auto-generated public/private key pair. +// The Subject Common Name is set to the first SAN to aid debugging. // To create a cert with a custom key pair, specify WithKey option. func tlsChallengeCert(san []string, opt []CertOption) (tls.Certificate, error) { var ( @@ -1033,6 +1034,9 @@ func tlsChallengeCert(san []string, opt []CertOption) (tls.Certificate, error) { } } tmpl.DNSNames = san + if len(san) > 0 { + tmpl.Subject.CommonName = san[0] + } der, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) if err != nil { diff --git a/vendor/golang.org/x/crypto/acme/acme_test.go b/vendor/golang.org/x/crypto/acme/acme_test.go index 14832de49..b44af5959 100644 --- a/vendor/golang.org/x/crypto/acme/acme_test.go +++ b/vendor/golang.org/x/crypto/acme/acme_test.go @@ -1186,6 +1186,9 @@ func TestTLSSNI01ChallengeCert(t *testing.T) { if cert.DNSNames[0] != name { t.Errorf("cert.DNSNames[0] != name: %q vs %q", cert.DNSNames[0], name) } + if cn := cert.Subject.CommonName; cn != san { + t.Errorf("cert.Subject.CommonName = %q; want %q", cn, san) + } } func TestTLSSNI02ChallengeCert(t *testing.T) { @@ -1219,6 +1222,9 @@ func TestTLSSNI02ChallengeCert(t *testing.T) { if i >= len(cert.DNSNames) || cert.DNSNames[i] != name { t.Errorf("%v doesn't have %q", cert.DNSNames, name) } + if cn := cert.Subject.CommonName; cn != sanA { + t.Errorf("CommonName = %q; want %q", cn, sanA) + } } func TestTLSChallengeCertOpt(t *testing.T) { diff --git a/vendor/golang.org/x/crypto/acme/autocert/autocert.go b/vendor/golang.org/x/crypto/acme/autocert/autocert.go index a478eff54..94edba986 100644 --- a/vendor/golang.org/x/crypto/acme/autocert/autocert.go +++ b/vendor/golang.org/x/crypto/acme/autocert/autocert.go @@ -83,8 +83,10 @@ func defaultHostPolicy(context.Context, string) error { // It obtains and refreshes certificates automatically, // as well as providing them to a TLS server via tls.Config. // -// To preserve issued certificates and improve overall performance, -// use a cache implementation of Cache. For instance, DirCache. +// You must specify a cache implementation, such as DirCache, +// to reuse obtained certificates across program restarts. +// Otherwise your server is very likely to exceed the certificate +// issuer's request rate limits. type Manager struct { // Prompt specifies a callback function to conditionally accept a CA's Terms of Service (TOS). // The registration may require the caller to agree to the CA's TOS. @@ -369,7 +371,7 @@ func (m *Manager) createCert(ctx context.Context, domain string) (*tls.Certifica // We are the first; state is locked. // Unblock the readers when domain ownership is verified - // and the we got the cert or the process failed. + // and we got the cert or the process failed. defer state.Unlock() state.locked = false @@ -437,7 +439,7 @@ func (m *Manager) certState(domain string) (*certState, error) { return state, nil } -// authorizedCert starts domain ownership verification process and requests a new cert upon success. +// authorizedCert starts the domain ownership verification process and requests a new cert upon success. // The key argument is the certificate private key. func (m *Manager) authorizedCert(ctx context.Context, key crypto.Signer, domain string) (der [][]byte, leaf *x509.Certificate, err error) { if err := m.verify(ctx, domain); err != nil { diff --git a/vendor/golang.org/x/crypto/acme/autocert/example_test.go b/vendor/golang.org/x/crypto/acme/autocert/example_test.go index c6267b8dd..71d61eb1c 100644 --- a/vendor/golang.org/x/crypto/acme/autocert/example_test.go +++ b/vendor/golang.org/x/crypto/acme/autocert/example_test.go @@ -23,6 +23,7 @@ func ExampleNewListener() { func ExampleManager() { m := autocert.Manager{ + Cache: autocert.DirCache("secret-dir"), Prompt: autocert.AcceptTOS, HostPolicy: autocert.HostWhitelist("example.org"), } diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_test.go b/vendor/golang.org/x/crypto/blake2b/blake2b_test.go index 4aa49579a..5d68bbf60 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_test.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_test.go @@ -126,7 +126,7 @@ func testHashes2X(t *testing.T) { t.Fatalf("#%d (single write): error from Read: %v", i, err) } if n, err := h.Read(sum); n != 0 || err != io.EOF { - t.Fatalf("#%d (single write): Read did not return (0, os.EOF) after exhaustion, got (%v, %v)", i, n, err) + t.Fatalf("#%d (single write): Read did not return (0, io.EOF) after exhaustion, got (%v, %v)", i, n, err) } if gotHex := fmt.Sprintf("%x", sum); gotHex != expectedHex { t.Fatalf("#%d (single write): got %s, wanted %s", i, gotHex, expectedHex) diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go index ae8d63e73..5e73146d7 100644 --- a/vendor/golang.org/x/crypto/ocsp/ocsp.go +++ b/vendor/golang.org/x/crypto/ocsp/ocsp.go @@ -295,17 +295,17 @@ const ( // The enumerated reasons for revoking a certificate. See RFC 5280. const ( - Unspecified = iota - KeyCompromise = iota - CACompromise = iota - AffiliationChanged = iota - Superseded = iota - CessationOfOperation = iota - CertificateHold = iota - _ = iota - RemoveFromCRL = iota - PrivilegeWithdrawn = iota - AACompromise = iota + Unspecified = 0 + KeyCompromise = 1 + CACompromise = 2 + AffiliationChanged = 3 + Superseded = 4 + CessationOfOperation = 5 + CertificateHold = 6 + + RemoveFromCRL = 8 + PrivilegeWithdrawn = 9 + AACompromise = 10 ) // Request represents an OCSP request. See RFC 6960. @@ -659,7 +659,7 @@ func CreateRequest(cert, issuer *x509.Certificate, opts *RequestOptions) ([]byte // // The issuer cert is used to puplate the IssuerNameHash and IssuerKeyHash fields. // -// The template is used to populate the SerialNumber, RevocationStatus, RevokedAt, +// The template is used to populate the SerialNumber, Status, RevokedAt, // RevocationReason, ThisUpdate, and NextUpdate fields. // // If template.IssuerHash is not set, SHA1 will be used. diff --git a/vendor/golang.org/x/crypto/scrypt/example_test.go b/vendor/golang.org/x/crypto/scrypt/example_test.go new file mode 100644 index 000000000..6736479b1 --- /dev/null +++ b/vendor/golang.org/x/crypto/scrypt/example_test.go @@ -0,0 +1,26 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package scrypt_test + +import ( + "encoding/base64" + "fmt" + "log" + + "golang.org/x/crypto/scrypt" +) + +func Example() { + // DO NOT use this salt value; generate your own random salt. 8 bytes is + // a good length. + salt := []byte{0xc8, 0x28, 0xf2, 0x58, 0xa7, 0x6a, 0xad, 0x7b} + + dk, err := scrypt.Key([]byte("some password"), salt, 1<<15, 8, 1, 32) + if err != nil { + log.Fatal(err) + } + fmt.Println(base64.StdEncoding.EncodeToString(dk)) + // Output: lGnMz8io0AUkfzn6Pls1qX20Vs7PGN6sbYQ2TQgY12M= +} diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go index 14375c509..ff28aaef6 100644 --- a/vendor/golang.org/x/crypto/scrypt/scrypt.go +++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go @@ -220,9 +220,10 @@ func smix(b []byte, r, N int, v, xy []uint32) { // // dk, err := scrypt.Key([]byte("some password"), salt, 16384, 8, 1, 32) // -// The recommended parameters for interactive logins as of 2009 are N=16384, -// r=8, p=1. They should be increased as memory latency and CPU parallelism -// increases. Remember to get a good random salt. +// The recommended parameters for interactive logins as of 2017 are N=32768, r=8 +// and p=1. The parameters N, r, and p should be increased as memory latency and +// CPU parallelism increases; consider setting N to the highest power of 2 you +// can derive within 100 milliseconds. Remember to get a good random salt. func Key(password, salt []byte, N, r, p, keyLen int) ([]byte, error) { if N <= 1 || N&(N-1) != 0 { return nil, errors.New("scrypt: N must be > 1 and a power of 2") diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt_test.go b/vendor/golang.org/x/crypto/scrypt/scrypt_test.go index e096c3a31..766ed8d90 100644 --- a/vendor/golang.org/x/crypto/scrypt/scrypt_test.go +++ b/vendor/golang.org/x/crypto/scrypt/scrypt_test.go @@ -153,8 +153,10 @@ func TestKey(t *testing.T) { } } +var sink []byte + func BenchmarkKey(b *testing.B) { for i := 0; i < b.N; i++ { - Key([]byte("password"), []byte("salt"), 16384, 8, 1, 64) + sink, _ = Key([]byte("password"), []byte("salt"), 1<<15, 8, 1, 64) } } diff --git a/vendor/golang.org/x/crypto/ssh/buffer.go b/vendor/golang.org/x/crypto/ssh/buffer.go index 6931b5114..1ab07d078 100644 --- a/vendor/golang.org/x/crypto/ssh/buffer.go +++ b/vendor/golang.org/x/crypto/ssh/buffer.go @@ -51,13 +51,12 @@ func (b *buffer) write(buf []byte) { } // eof closes the buffer. Reads from the buffer once all -// the data has been consumed will receive os.EOF. -func (b *buffer) eof() error { +// the data has been consumed will receive io.EOF. +func (b *buffer) eof() { b.Cond.L.Lock() b.closed = true b.Cond.Signal() b.Cond.L.Unlock() - return nil } // Read reads data from the internal buffer in buf. Reads will block