diff --git a/Gopkg.lock b/Gopkg.lock index 063021901..8af091b2b 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -154,7 +154,7 @@ "pkg/term", "pkg/term/windows" ] - revision = "1346a2c89a11f2d111ff20f46d557f1f9ccbbeb7" + revision = "241c904e6f5fff020890a7641558e83a209c0bbd" [[projects]] name = "github.com/docker/go-connections" @@ -193,8 +193,8 @@ "client/routes", "models" ] - revision = "847fec724330b2741336431502db292fc5a45211" - version = "0.2.3" + revision = "1c5ec475d4536388b366b1f075945cd64d8c1cb4" + version = "0.2.4" [[projects]] name = "github.com/fsouza/go-dockerclient" @@ -207,8 +207,8 @@ "internal", "redis" ] - revision = "d1ed5c67e5794de818ea85e6b522fda02623a484" - version = "v1.5.0" + revision = "a69d19351219b6dd56f274f96d85a7014a2ec34e" + version = "v1.6.0" [[projects]] name = "github.com/gin-contrib/cors" @@ -235,8 +235,8 @@ [[projects]] name = "github.com/go-ini/ini" packages = ["."] - revision = "32e4c1e6bc4e7d0d8451aa6b75200d19e37a536a" - version = "v1.32.0" + revision = "6333e38ac20b8949a8dd68baa3650f4dee8f39f0" + version = "v1.33.0" [[projects]] branch = "master" @@ -287,7 +287,7 @@ branch = "master" name = "github.com/go-openapi/spec" packages = ["."] - revision = "1de3e0542de65ad8d75452a595886fdd0befb363" + revision = "d8000b5bfbd1147255710505a27c735b6b2ae2ac" [[projects]] branch = "master" @@ -299,7 +299,7 @@ branch = "master" name = "github.com/go-openapi/swag" packages = ["."] - revision = "0d03ad0b6405ada874d59d97c416b5cf4234e154" + revision = "ceb469cb0fdf2d792f28d771bc05da6c606f55e5" [[projects]] branch = "master" @@ -361,7 +361,7 @@ ".", "reflectx" ] - revision = "05cef0741ade10ca668982355b3f3f0bcf0ff0a8" + revision = "cf35089a197953c69420c8d0cecda90809764b1d" [[projects]] branch = "master" @@ -380,17 +380,7 @@ "jlexer", "jwriter" ] - revision = "32fa128f234d041f196a9f3e0fea5ac9772c08e1" - -[[projects]] - name = "github.com/mattes/migrate" - packages = [ - ".", - "database", - "source" - ] - revision = "035c07716cd373d88456ec4d701402df52584cb4" - version = "v3.0.1" + revision = "f594efddfa171111dc4349cd6e78e8f61dc7936f" [[projects]] name = "github.com/mattn/go-isatty" @@ -441,7 +431,6 @@ version = "v0.1.1" [[projects]] - branch = "master" name = "github.com/openzipkin/zipkin-go" packages = [ "model", @@ -449,6 +438,7 @@ "reporter/http" ] revision = "f197ec29e729f226d23370ea60f0e49b8f44ccf4" + version = "v0.1.0" [[projects]] name = "github.com/patrickmn/go-cache" @@ -491,7 +481,7 @@ "internal/bitbucket.org/ww/goautoneg", "model" ] - revision = "89604d197083d4781071d3c65855d24ecfb0a563" + revision = "e4aa40a9169a88835b849a6efb71e05dc04b88f0" [[projects]] branch = "master" @@ -502,19 +492,7 @@ "nfs", "xfs" ] - revision = "75f2d6163c7a100bed6e971044ea3de30ee3a678" - -[[projects]] - name = "github.com/rdallman/migrate" - packages = [ - ".", - "database/mysql", - "database/postgres", - "database/sqlite3", - "source", - "source/go-bindata" - ] - revision = "bc72eeb997c7334cb5f05f5aefd2d70bc34d71ef" + revision = "54d17b57dd7d4a3aa092476596b3f8a933bde349" [[projects]] name = "github.com/sirupsen/logrus" @@ -547,13 +525,13 @@ "trace", "trace/propagation" ] - revision = "4566f1f203a083514e9c57fecc0836ecc4a0eb11" + revision = "f1af72ab88d638dcc20ea6ecf83c98b59b092559" [[projects]] branch = "master" name = "golang.org/x/crypto" packages = ["ssh/terminal"] - revision = "8c653846df49742c4c85ec37e5d9f8d3ba657895" + revision = "182114d582623c1caa54f73de9c7224e23a48487" [[projects]] branch = "master" @@ -568,7 +546,7 @@ "lex/httplex", "trace" ] - revision = "cbe0f9307d0156177f9dd5dc85da1a31abc5f2fb" + revision = "ae89d30ce0c63142b652837da33d782e2b0a9b25" [[projects]] branch = "master" @@ -577,7 +555,7 @@ "unix", "windows" ] - revision = "f6cff0780e542efa0c8e864dc8fa522808f6a598" + revision = "c28acc882ebcbfbe8ce9f0f14b9ac26ee138dd51" [[projects]] name = "golang.org/x/text" @@ -605,7 +583,7 @@ branch = "master" name = "google.golang.org/genproto" packages = ["googleapis/rpc/status"] - revision = "2b5a72b8730b0b16380010cfe5286c42108d88e7" + revision = "df60624c1e9b9d2973e889c7a1cff73155da81c4" [[projects]] name = "google.golang.org/grpc" @@ -661,6 +639,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "1818456581749a8ec46a6fded1c10e7f706f9e77aa1ac2061d25c3080883376d" + inputs-digest = "4001fea69927fe605d8c5a47d02f119a0eef15b291eff46d6d993309462f5406" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index a68d81256..78952a80b 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -53,10 +53,6 @@ ignored = ["github.com/fnproject/fn/cli"] name = "github.com/docker/distribution" revision = "bc3c7b0525e59d3ecfab3e1568350895fd4a462f" -[[constraint]] - name = "github.com/rdallman/migrate" # TODO change to mattes/migrate w/ https://github.com/mattes/migrate/pull/299 - revision = "bc72eeb997c7334cb5f05f5aefd2d70bc34d71ef" - [[constraint]] # NOTE: locked for a reason - https://github.com/go-sql-driver/mysql/issues/657 name = "github.com/go-sql-driver/mysql" diff --git a/api/datastore/sql/migratex/migrate.go b/api/datastore/sql/migratex/migrate.go index b85b99fff..20661721d 100644 --- a/api/datastore/sql/migratex/migrate.go +++ b/api/datastore/sql/migratex/migrate.go @@ -20,9 +20,8 @@ var ( // TODO doesn't have to be a glob MigrationsTable = "schema_migrations" - ErrLocked = errors.New("database is locked") - ErrDirty = errors.New("database is dirty") - ErrOutOfOrder = errors.New("non-contiguous migration attempted") + ErrLocked = errors.New("database is locked") + ErrDirty = errors.New("database is dirty") ) const ( @@ -30,8 +29,8 @@ const ( ) type Migration interface { - Up(*sqlx.Tx) error - Down(*sqlx.Tx) error + Up(context.Context, *sqlx.Tx) error + Down(context.Context, *sqlx.Tx) error Version() int64 } @@ -45,14 +44,14 @@ var _ Migration = new(MigFields) // MigFields implements Migration and can be used for convenience. type MigFields struct { - UpFunc func(*sqlx.Tx) error - DownFunc func(*sqlx.Tx) error + UpFunc func(context.Context, *sqlx.Tx) error + DownFunc func(context.Context, *sqlx.Tx) error VersionFunc func() int64 } -func (m MigFields) Up(tx *sqlx.Tx) error { return m.UpFunc(tx) } -func (m MigFields) Down(tx *sqlx.Tx) error { return m.DownFunc(tx) } -func (m MigFields) Version() int64 { return m.VersionFunc() } +func (m MigFields) Up(ctx context.Context, tx *sqlx.Tx) error { return m.UpFunc(ctx, tx) } +func (m MigFields) Down(ctx context.Context, tx *sqlx.Tx) error { return m.DownFunc(ctx, tx) } +func (m MigFields) Version() int64 { return m.VersionFunc() } // TODO instance must have `multiStatements` set to true ? @@ -65,13 +64,10 @@ func Down(ctx context.Context, db *sqlx.DB, migs []Migration) error { } func migrate(ctx context.Context, db *sqlx.DB, migs []Migration, up bool) error { - var curVersion int64 + var curVersion int64 // could be NilVersion, is ok err := tx(ctx, db, func(tx *sqlx.Tx) error { - err := ensureVersionTable(ctx, tx) - if err != nil { - return err - } var dirty bool + var err error curVersion, dirty, err = Version(ctx, tx) if dirty { return ErrDirty @@ -90,11 +86,11 @@ func migrate(ctx context.Context, db *sqlx.DB, migs []Migration, up bool) error if up { sort.Sort(sorted(migs)) } else { - migs = []Migration(sort.Reverse(sorted(migs)).(sorted)) + sort.Sort(sort.Reverse(sorted(migs))) } for _, m := range migs { // skip over migrations we have run - if (up && curVersion < m.Version()) || (!up && curVersion > m.Version()) { + if (up && curVersion < m.Version()) || (!up && curVersion >= m.Version()) { // do each individually, for large migrations it's better to checkpoint // than to try to do them all in one big go. @@ -182,9 +178,9 @@ func run(ctx context.Context, db *sqlx.DB, m Migration, up bool) error { // enforce monotonicity if up && curVersion != NilVersion && m.Version() != curVersion+1 { - return ErrOutOfOrder + return fmt.Errorf("non-contiguous migration attempted up: %v != %v", m.Version(), curVersion+1) } else if !up && m.Version() != curVersion { // down is always unraveling - return ErrOutOfOrder + return fmt.Errorf("non-contiguous migration attempted down: %v != %v", m.Version(), curVersion) } // TODO is this robust enough? we could check @@ -194,12 +190,12 @@ func run(ctx context.Context, db *sqlx.DB, m Migration, up bool) error { } // TODO we don't need the dirty bit anymore since we're using transactions? - err = SetVersion(ctx, tx, m.Version(), true) + err = SetVersion(ctx, tx, version, true) if up { - err = m.Up(tx) + err = m.Up(ctx, tx) } else { - err = m.Down(tx) + err = m.Down(ctx, tx) } if err != nil { @@ -230,7 +226,7 @@ func lock(ctx context.Context, tx *sqlx.Tx) error { var query string switch tx.DriverName() { case "postgres", "pgx", "pq-timeouts", "cloudsqlpostgres": - query = `SELECT pg_try_advisory_lock($1)` + query = `SELECT pg_try_advisory_lock(?)` case "mysql", "oci8", "ora", "goracle": query = "SELECT GET_LOCK(?, -1)" case "sqlite3": @@ -260,7 +256,7 @@ func unlock(ctx context.Context, tx *sqlx.Tx) error { var query string switch tx.DriverName() { case "postgres", "pgx", "pq-timeouts", "cloudsqlpostgres": - query = `SELECT pg_advisory_unlock($1)` + query = `SELECT pg_advisory_unlock(?)` case "mysql", "oci8", "ora", "goracle": query = `SELECT RELEASE_LOCK(?)` case "sqlite3": @@ -277,6 +273,11 @@ func unlock(ctx context.Context, tx *sqlx.Tx) error { } func SetVersion(ctx context.Context, tx *sqlx.Tx, version int64, dirty bool) error { + err := ensureVersionTable(ctx, tx) + if err != nil { + return nil + } + // TODO need to handle down migration better // ideally, we have a record of each up/down migration with a timestamp for auditing, // this just nukes the whole table which is kinda lame. @@ -286,7 +287,7 @@ func SetVersion(ctx context.Context, tx *sqlx.Tx, version int64, dirty bool) err } if version >= 0 { - query = tx.Rebind("INSERT INTO `" + MigrationsTable + "` (version, dirty) VALUES (?, ?)") + query = tx.Rebind(`INSERT INTO ` + MigrationsTable + ` (version, dirty) VALUES (?, ?)`) if _, err := tx.ExecContext(ctx, query, version, dirty); err != nil { return err } @@ -296,7 +297,7 @@ func SetVersion(ctx context.Context, tx *sqlx.Tx, version int64, dirty bool) err } func Version(ctx context.Context, tx *sqlx.Tx) (version int64, dirty bool, err error) { - query := tx.Rebind("SELECT version, dirty FROM `" + MigrationsTable + "` LIMIT 1") + query := tx.Rebind(`SELECT version, dirty FROM ` + MigrationsTable + ` LIMIT 1`) err = tx.QueryRowContext(ctx, query).Scan(&version, &dirty) switch { case err == sql.ErrNoRows: diff --git a/api/datastore/sql/migratex/migrate_test.go b/api/datastore/sql/migratex/migrate_test.go index 20fb76950..39312ca07 100644 --- a/api/datastore/sql/migratex/migrate_test.go +++ b/api/datastore/sql/migratex/migrate_test.go @@ -14,15 +14,15 @@ const testsqlite3 = "file::memory:?mode=memory&cache=shared" type tm struct{} -func (t *tm) Up(tx *sqlx.Tx) error { - _, err := tx.Exec(`CREATE TABLE IF NOT EXISTS foo ( +func (t *tm) Up(ctx context.Context, tx *sqlx.Tx) error { + _, err := tx.ExecContext(ctx, `CREATE TABLE IF NOT EXISTS foo ( bar bigint NOT NULL PRIMARY KEY )`) return err } -func (t *tm) Down(tx *sqlx.Tx) error { - _, err := tx.Exec("DROP TABLE foo") +func (t *tm) Down(ctx context.Context, tx *sqlx.Tx) error { + _, err := tx.ExecContext(ctx, "DROP TABLE foo") return err } diff --git a/api/datastore/sql/migrations/1_add_route_created_at.down.sql b/api/datastore/sql/migrations/1_add_route_created_at.down.sql deleted file mode 100644 index 836d62f91..000000000 --- a/api/datastore/sql/migrations/1_add_route_created_at.down.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE routes DROP COLUMN created_at; diff --git a/api/datastore/sql/migrations/1_add_route_created_at.go b/api/datastore/sql/migrations/1_add_route_created_at.go new file mode 100644 index 000000000..ea25c17ab --- /dev/null +++ b/api/datastore/sql/migrations/1_add_route_created_at.go @@ -0,0 +1,26 @@ +package migrations + +import ( + "context" + + "github.com/fnproject/fn/api/datastore/sql/migratex" + "github.com/jmoiron/sqlx" +) + +func up1(ctx context.Context, tx *sqlx.Tx) error { + _, err := tx.ExecContext(ctx, "ALTER TABLE routes ADD created_at text;") + return err +} + +func down1(ctx context.Context, tx *sqlx.Tx) error { + _, err := tx.ExecContext(ctx, "ALTER TABLE routes DROP COLUMN created_at;") + return err +} + +func init() { + Migrations = append(Migrations, &migratex.MigFields{ + VersionFunc: vfunc(1), + UpFunc: up1, + DownFunc: down1, + }) +} diff --git a/api/datastore/sql/migrations/1_add_route_created_at.up.sql b/api/datastore/sql/migrations/1_add_route_created_at.up.sql deleted file mode 100644 index df65b916c..000000000 --- a/api/datastore/sql/migrations/1_add_route_created_at.up.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE routes ADD created_at text; diff --git a/api/datastore/sql/migrations/2_add_call_stats.down.sql b/api/datastore/sql/migrations/2_add_call_stats.down.sql deleted file mode 100644 index 04d25653a..000000000 --- a/api/datastore/sql/migrations/2_add_call_stats.down.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE calls DROP COLUMN stats; diff --git a/api/datastore/sql/migrations/2_add_call_stats.go b/api/datastore/sql/migrations/2_add_call_stats.go new file mode 100644 index 000000000..e536a5a52 --- /dev/null +++ b/api/datastore/sql/migrations/2_add_call_stats.go @@ -0,0 +1,26 @@ +package migrations + +import ( + "context" + + "github.com/fnproject/fn/api/datastore/sql/migratex" + "github.com/jmoiron/sqlx" +) + +func up2(ctx context.Context, tx *sqlx.Tx) error { + _, err := tx.ExecContext(ctx, "ALTER TABLE calls ADD stats text;") + return err +} + +func down2(ctx context.Context, tx *sqlx.Tx) error { + _, err := tx.ExecContext(ctx, "ALTER TABLE calls DROP COLUMN stats;") + return err +} + +func init() { + Migrations = append(Migrations, &migratex.MigFields{ + VersionFunc: vfunc(2), + UpFunc: up2, + DownFunc: down2, + }) +} diff --git a/api/datastore/sql/migrations/2_add_call_stats.up.sql b/api/datastore/sql/migrations/2_add_call_stats.up.sql deleted file mode 100644 index a1aa83e9f..000000000 --- a/api/datastore/sql/migrations/2_add_call_stats.up.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE calls ADD stats text; diff --git a/api/datastore/sql/migrations/3_add_call_error.down.sql b/api/datastore/sql/migrations/3_add_call_error.down.sql deleted file mode 100644 index 5319e0a8b..000000000 --- a/api/datastore/sql/migrations/3_add_call_error.down.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE calls DROP COLUMN error; diff --git a/api/datastore/sql/migrations/3_add_call_error.go b/api/datastore/sql/migrations/3_add_call_error.go new file mode 100644 index 000000000..4f3b16d9a --- /dev/null +++ b/api/datastore/sql/migrations/3_add_call_error.go @@ -0,0 +1,26 @@ +package migrations + +import ( + "context" + + "github.com/fnproject/fn/api/datastore/sql/migratex" + "github.com/jmoiron/sqlx" +) + +func up3(ctx context.Context, tx *sqlx.Tx) error { + _, err := tx.ExecContext(ctx, "ALTER TABLE calls ADD error text;") + return err +} + +func down3(ctx context.Context, tx *sqlx.Tx) error { + _, err := tx.ExecContext(ctx, "ALTER TABLE calls DROP COLUMN error;") + return err +} + +func init() { + Migrations = append(Migrations, &migratex.MigFields{ + VersionFunc: vfunc(3), + UpFunc: up3, + DownFunc: down3, + }) +} diff --git a/api/datastore/sql/migrations/3_add_call_error.up.sql b/api/datastore/sql/migrations/3_add_call_error.up.sql deleted file mode 100644 index f2c4b12a0..000000000 --- a/api/datastore/sql/migrations/3_add_call_error.up.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE calls ADD error text; diff --git a/api/datastore/sql/migrations/4_add_route_updated_at.down.sql b/api/datastore/sql/migrations/4_add_route_updated_at.down.sql deleted file mode 100644 index 3b7031b52..000000000 --- a/api/datastore/sql/migrations/4_add_route_updated_at.down.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE routes DROP COLUMN updated_at; diff --git a/api/datastore/sql/migrations/4_add_route_updated_at.go b/api/datastore/sql/migrations/4_add_route_updated_at.go new file mode 100644 index 000000000..5a2844d3b --- /dev/null +++ b/api/datastore/sql/migrations/4_add_route_updated_at.go @@ -0,0 +1,26 @@ +package migrations + +import ( + "context" + + "github.com/fnproject/fn/api/datastore/sql/migratex" + "github.com/jmoiron/sqlx" +) + +func up4(ctx context.Context, tx *sqlx.Tx) error { + _, err := tx.ExecContext(ctx, "ALTER TABLE routes ADD updated_at VARCHAR(256);") + return err +} + +func down4(ctx context.Context, tx *sqlx.Tx) error { + _, err := tx.ExecContext(ctx, "ALTER TABLE routes DROP COLUMN updated_at;") + return err +} + +func init() { + Migrations = append(Migrations, &migratex.MigFields{ + VersionFunc: vfunc(4), + UpFunc: up4, + DownFunc: down4, + }) +} diff --git a/api/datastore/sql/migrations/4_add_route_updated_at.up.sql b/api/datastore/sql/migrations/4_add_route_updated_at.up.sql deleted file mode 100644 index f110016c4..000000000 --- a/api/datastore/sql/migrations/4_add_route_updated_at.up.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE routes ADD updated_at varchar(256); diff --git a/api/datastore/sql/migrations/5_add_app_created_at.down.sql b/api/datastore/sql/migrations/5_add_app_created_at.down.sql deleted file mode 100644 index 0a50d859d..000000000 --- a/api/datastore/sql/migrations/5_add_app_created_at.down.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE apps DROP COLUMN created_at; diff --git a/api/datastore/sql/migrations/5_add_app_created_at.go b/api/datastore/sql/migrations/5_add_app_created_at.go new file mode 100644 index 000000000..03f62c4f2 --- /dev/null +++ b/api/datastore/sql/migrations/5_add_app_created_at.go @@ -0,0 +1,26 @@ +package migrations + +import ( + "context" + + "github.com/fnproject/fn/api/datastore/sql/migratex" + "github.com/jmoiron/sqlx" +) + +func up5(ctx context.Context, tx *sqlx.Tx) error { + _, err := tx.ExecContext(ctx, "ALTER TABLE apps ADD created_at VARCHAR(256);") + return err +} + +func down5(ctx context.Context, tx *sqlx.Tx) error { + _, err := tx.ExecContext(ctx, "ALTER TABLE apps DROP COLUMN created_at;") + return err +} + +func init() { + Migrations = append(Migrations, &migratex.MigFields{ + VersionFunc: vfunc(5), + UpFunc: up5, + DownFunc: down5, + }) +} diff --git a/api/datastore/sql/migrations/5_add_app_created_at.up.sql b/api/datastore/sql/migrations/5_add_app_created_at.up.sql deleted file mode 100644 index 38aea956f..000000000 --- a/api/datastore/sql/migrations/5_add_app_created_at.up.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE apps ADD created_at varchar(256); diff --git a/api/datastore/sql/migrations/6_add_app_updated_at.down.sql b/api/datastore/sql/migrations/6_add_app_updated_at.down.sql deleted file mode 100644 index a59eaee2e..000000000 --- a/api/datastore/sql/migrations/6_add_app_updated_at.down.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE apps DROP COLUMN updated_at; diff --git a/api/datastore/sql/migrations/6_add_app_updated_at.go b/api/datastore/sql/migrations/6_add_app_updated_at.go new file mode 100644 index 000000000..a860b0fc6 --- /dev/null +++ b/api/datastore/sql/migrations/6_add_app_updated_at.go @@ -0,0 +1,26 @@ +package migrations + +import ( + "context" + + "github.com/fnproject/fn/api/datastore/sql/migratex" + "github.com/jmoiron/sqlx" +) + +func up6(ctx context.Context, tx *sqlx.Tx) error { + _, err := tx.ExecContext(ctx, "ALTER TABLE apps ADD updated_at VARCHAR(256);") + return err +} + +func down6(ctx context.Context, tx *sqlx.Tx) error { + _, err := tx.ExecContext(ctx, "ALTER TABLE apps DROP COLUMN updated_at;") + return err +} + +func init() { + Migrations = append(Migrations, &migratex.MigFields{ + VersionFunc: vfunc(6), + UpFunc: up6, + DownFunc: down6, + }) +} diff --git a/api/datastore/sql/migrations/6_add_app_updated_at.up.sql b/api/datastore/sql/migrations/6_add_app_updated_at.up.sql deleted file mode 100644 index 5b8f715ce..000000000 --- a/api/datastore/sql/migrations/6_add_app_updated_at.up.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE apps ADD updated_at varchar(256); diff --git a/api/datastore/sql/migrations/7_add_route_cpus.down.sql b/api/datastore/sql/migrations/7_add_route_cpus.down.sql deleted file mode 100644 index a4fcefbac..000000000 --- a/api/datastore/sql/migrations/7_add_route_cpus.down.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE routes DROP COLUMN cpus; diff --git a/api/datastore/sql/migrations/7_add_route_cpus.go b/api/datastore/sql/migrations/7_add_route_cpus.go new file mode 100644 index 000000000..b20b21d72 --- /dev/null +++ b/api/datastore/sql/migrations/7_add_route_cpus.go @@ -0,0 +1,26 @@ +package migrations + +import ( + "context" + + "github.com/fnproject/fn/api/datastore/sql/migratex" + "github.com/jmoiron/sqlx" +) + +func up7(ctx context.Context, tx *sqlx.Tx) error { + _, err := tx.ExecContext(ctx, "ALTER TABLE routes ADD cpus int;") + return err +} + +func down7(ctx context.Context, tx *sqlx.Tx) error { + _, err := tx.ExecContext(ctx, "ALTER TABLE routes DROP COLUMN cpus;") + return err +} + +func init() { + Migrations = append(Migrations, &migratex.MigFields{ + VersionFunc: vfunc(7), + UpFunc: up7, + DownFunc: down7, + }) +} diff --git a/api/datastore/sql/migrations/7_add_route_cpus.up.sql b/api/datastore/sql/migrations/7_add_route_cpus.up.sql deleted file mode 100644 index 177446f97..000000000 --- a/api/datastore/sql/migrations/7_add_route_cpus.up.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE routes ADD cpus int; diff --git a/api/datastore/sql/migrations/README.md b/api/datastore/sql/migrations/README.md index c8d2563a7..b8996c09c 100644 --- a/api/datastore/sql/migrations/README.md +++ b/api/datastore/sql/migrations/README.md @@ -2,39 +2,49 @@ All migration files should be of the format: -`[0-9]+_[add|remove]_model[_field]*.[up|down].sql` +`[0-9]+_[add|remove]_model[_field]*.go` The number at the beginning of the file name should be monotonically increasing, from the last highest file number in this directory. E.g. if there -is `11_add_foo_bar.up.sql`, your new file should be `12_add_bar_baz.up.sql`. +is `11_add_foo_bar.go`, your new file should be `12_add_bar_baz.go`. -All `*.up.sql` files must have an accompanying `*.down.sql` file in order to -pass review. +Each migration file have to contain both up and down function: -The contents of each file should contain only 1 ANSI sql query. For help, you -may refer to https://github.com/mattes/migrate/blob/master/MIGRATIONS.md which -illustrates some of the finer points. +```go +package migrations -After creating the file you will need to run, in the same directory as this -README: +import ( + "context" -```sh -$ go generate + "github.com/fnproject/fn/api/datastore/sql/migratex" + "github.com/jmoiron/sqlx" +) + +func up1(ctx context.Context, tx *sqlx.Tx) error { + _, err := tx.ExecContext(ctx, "ALTER TABLE routes ADD created_at text;") + return err +} + +func down1(ctx context.Context, tx *sqlx.Tx) error { + _, err := tx.ExecContext(ctx, "ALTER TABLE routes DROP COLUMN created_at;") + return err +} + +func init() { + Migrations = append(Migrations, &migratex.MigFields{ + VersionFunc: vfunc(1), + UpFunc: up1, + DownFunc: down1, + }) +} ``` -NOTE: You may need to `go get -u github.com/jteeuwen/go-bindata/...` before running `go -generate` in order for it to work. +Each migration must initialize a `migratex.Migration` with corresponding +version and up/down function. -After running `go generate`, the `migrations.go` file should be updated. Check -the updated version of this as well as the new `.sql` file into git. +We have elected to expose fn's specific sql migrations as an exported global +list `migrations.Migrations` from this package, you must simply add your +migration and append it to this list. -After adding the migration, be sure to update the fields in the sql tables in -`sql.go` up one package. For example, if you added a column `foo` to `routes`, -add this field to the routes `CREATE TABLE` query, as well as any queries -where it should be returned. - -After doing this, run the test suite to make sure the sql queries work as -intended and voila. The test suite will ensure that the up and down migrations -work as well as a fresh db. The down migrations will not be tested against -SQLite3 as it does not support `ALTER TABLE DROP COLUMN`, but will still be -tested against postgres and MySQL. +Please note that every database change should be considered as 1 individual +migration (new table, new column, column type change, etc.) diff --git a/api/datastore/sql/migrations/index.go b/api/datastore/sql/migrations/index.go deleted file mode 100644 index 1b186aea7..000000000 --- a/api/datastore/sql/migrations/index.go +++ /dev/null @@ -1,12 +0,0 @@ -package migrations - -//go:generate go-bindata -ignore README.md -ignore migrations.go -ignore index.go -o migrations.go -pkg migrations . - -// migrations are generated from this cwd with go generate. -// install https://github.com/jteeuwen/go-bindata for go generate -// command to work properly. - -// this will generate a go file with go-bindata of all the migration -// files in 1 go file, so that migrations can be run remotely without -// having to carry the migration files around (i.e. since they are -// compiled into the go binary) diff --git a/api/datastore/sql/migrations/migrations.go b/api/datastore/sql/migrations/migrations.go deleted file mode 100644 index b9e002586..000000000 --- a/api/datastore/sql/migrations/migrations.go +++ /dev/null @@ -1,534 +0,0 @@ -// Code generated by go-bindata. -// sources: -// 1_add_route_created_at.down.sql -// 1_add_route_created_at.up.sql -// 2_add_call_stats.down.sql -// 2_add_call_stats.up.sql -// 3_add_call_error.down.sql -// 3_add_call_error.up.sql -// 4_add_route_updated_at.down.sql -// 4_add_route_updated_at.up.sql -// 5_add_app_created_at.down.sql -// 5_add_app_created_at.up.sql -// 6_add_app_updated_at.down.sql -// 6_add_app_updated_at.up.sql -// 7_add_route_cpus.down.sql -// 7_add_route_cpus.up.sql -// DO NOT EDIT! - -package migrations - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -func bindataRead(data []byte, name string) ([]byte, error) { - gz, err := gzip.NewReader(bytes.NewBuffer(data)) - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - - var buf bytes.Buffer - _, err = io.Copy(&buf, gz) - clErr := gz.Close() - - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - if clErr != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -type asset struct { - bytes []byte - info os.FileInfo -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -func (fi bindataFileInfo) Name() string { - return fi.name -} -func (fi bindataFileInfo) Size() int64 { - return fi.size -} -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} -func (fi bindataFileInfo) IsDir() bool { - return false -} -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var __1_add_route_created_atDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\xca\x2f\x2d\x49\x2d\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x09\xf5\xf5\x53\x48\x2e\x4a\x4d\x2c\x49\x4d\x89\x4f\x2c\xb1\xe6\x02\x04\x00\x00\xff\xff\x47\xfd\x3b\xbe\x2b\x00\x00\x00") - -func _1_add_route_created_atDownSqlBytes() ([]byte, error) { - return bindataRead( - __1_add_route_created_atDownSql, - "1_add_route_created_at.down.sql", - ) -} - -func _1_add_route_created_atDownSql() (*asset, error) { - bytes, err := _1_add_route_created_atDownSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "1_add_route_created_at.down.sql", size: 43, mode: os.FileMode(420), modTime: time.Unix(1510963763, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __1_add_route_created_atUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\xca\x2f\x2d\x49\x2d\x56\x70\x74\x71\x51\x48\x2e\x4a\x4d\x2c\x49\x4d\x89\x4f\x2c\x51\x28\x49\xad\x28\xb1\xe6\x02\x04\x00\x00\xff\xff\x3b\x59\x9c\x54\x28\x00\x00\x00") - -func _1_add_route_created_atUpSqlBytes() ([]byte, error) { - return bindataRead( - __1_add_route_created_atUpSql, - "1_add_route_created_at.up.sql", - ) -} - -func _1_add_route_created_atUpSql() (*asset, error) { - bytes, err := _1_add_route_created_atUpSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "1_add_route_created_at.up.sql", size: 40, mode: os.FileMode(420), modTime: time.Unix(1510963763, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __2_add_call_statsDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\x4e\xcc\xc9\x29\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x09\xf5\xf5\x53\x28\x2e\x49\x2c\x29\xb6\xe6\x02\x04\x00\x00\xff\xff\xd3\x09\xeb\x22\x25\x00\x00\x00") - -func _2_add_call_statsDownSqlBytes() ([]byte, error) { - return bindataRead( - __2_add_call_statsDownSql, - "2_add_call_stats.down.sql", - ) -} - -func _2_add_call_statsDownSql() (*asset, error) { - bytes, err := _2_add_call_statsDownSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "2_add_call_stats.down.sql", size: 37, mode: os.FileMode(420), modTime: time.Unix(1511917353, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __2_add_call_statsUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\x4e\xcc\xc9\x29\x56\x70\x74\x71\x51\x28\x2e\x49\x2c\x29\x56\x28\x49\xad\x28\xb1\xe6\x02\x04\x00\x00\xff\xff\x29\xde\x11\xe8\x22\x00\x00\x00") - -func _2_add_call_statsUpSqlBytes() ([]byte, error) { - return bindataRead( - __2_add_call_statsUpSql, - "2_add_call_stats.up.sql", - ) -} - -func _2_add_call_statsUpSql() (*asset, error) { - bytes, err := _2_add_call_statsUpSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "2_add_call_stats.up.sql", size: 34, mode: os.FileMode(420), modTime: time.Unix(1511917353, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __3_add_call_errorDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\x4e\xcc\xc9\x29\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x09\xf5\xf5\x53\x48\x2d\x2a\xca\x2f\xb2\xe6\x02\x04\x00\x00\xff\xff\xc1\x14\x26\x51\x25\x00\x00\x00") - -func _3_add_call_errorDownSqlBytes() ([]byte, error) { - return bindataRead( - __3_add_call_errorDownSql, - "3_add_call_error.down.sql", - ) -} - -func _3_add_call_errorDownSql() (*asset, error) { - bytes, err := _3_add_call_errorDownSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "3_add_call_error.down.sql", size: 37, mode: os.FileMode(420), modTime: time.Unix(1511989827, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __3_add_call_errorUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\x4e\xcc\xc9\x29\x56\x70\x74\x71\x51\x48\x2d\x2a\xca\x2f\x52\x28\x49\xad\x28\xb1\xe6\x02\x04\x00\x00\xff\xff\xaf\xba\x27\xcd\x22\x00\x00\x00") - -func _3_add_call_errorUpSqlBytes() ([]byte, error) { - return bindataRead( - __3_add_call_errorUpSql, - "3_add_call_error.up.sql", - ) -} - -func _3_add_call_errorUpSql() (*asset, error) { - bytes, err := _3_add_call_errorUpSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "3_add_call_error.up.sql", size: 34, mode: os.FileMode(420), modTime: time.Unix(1511989827, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __4_add_route_updated_atDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\xca\x2f\x2d\x49\x2d\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x09\xf5\xf5\x53\x28\x2d\x48\x49\x2c\x49\x4d\x89\x4f\x2c\xb1\xe6\x02\x04\x00\x00\xff\xff\xa4\x67\xb0\xea\x2b\x00\x00\x00") - -func _4_add_route_updated_atDownSqlBytes() ([]byte, error) { - return bindataRead( - __4_add_route_updated_atDownSql, - "4_add_route_updated_at.down.sql", - ) -} - -func _4_add_route_updated_atDownSql() (*asset, error) { - bytes, err := _4_add_route_updated_atDownSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "4_add_route_updated_at.down.sql", size: 43, mode: os.FileMode(420), modTime: time.Unix(1514060619, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __4_add_route_updated_atUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\xca\x2f\x2d\x49\x2d\x56\x70\x74\x71\x51\x28\x2d\x48\x49\x2c\x49\x4d\x89\x4f\x2c\x51\x28\x4b\x2c\x4a\xce\x48\x2c\xd2\x30\x32\x35\xd3\xb4\xe6\x02\x04\x00\x00\xff\xff\x54\xf7\xac\x11\x30\x00\x00\x00") - -func _4_add_route_updated_atUpSqlBytes() ([]byte, error) { - return bindataRead( - __4_add_route_updated_atUpSql, - "4_add_route_updated_at.up.sql", - ) -} - -func _4_add_route_updated_atUpSql() (*asset, error) { - bytes, err := _4_add_route_updated_atUpSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "4_add_route_updated_at.up.sql", size: 48, mode: os.FileMode(420), modTime: time.Unix(1514060619, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __5_add_app_created_atDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\x2c\x28\x28\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x09\xf5\xf5\x53\x48\x2e\x4a\x4d\x2c\x49\x4d\x89\x4f\x2c\xb1\xe6\x02\x04\x00\x00\xff\xff\xd2\xde\x5c\x98\x29\x00\x00\x00") - -func _5_add_app_created_atDownSqlBytes() ([]byte, error) { - return bindataRead( - __5_add_app_created_atDownSql, - "5_add_app_created_at.down.sql", - ) -} - -func _5_add_app_created_atDownSql() (*asset, error) { - bytes, err := _5_add_app_created_atDownSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "5_add_app_created_at.down.sql", size: 41, mode: os.FileMode(420), modTime: time.Unix(1514060619, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __5_add_app_created_atUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\x2c\x28\x28\x56\x70\x74\x71\x51\x48\x2e\x4a\x4d\x2c\x49\x4d\x89\x4f\x2c\x51\x28\x4b\x2c\x4a\xce\x48\x2c\xd2\x30\x32\x35\xd3\xb4\xe6\x02\x04\x00\x00\xff\xff\x76\x6c\x0f\x45\x2e\x00\x00\x00") - -func _5_add_app_created_atUpSqlBytes() ([]byte, error) { - return bindataRead( - __5_add_app_created_atUpSql, - "5_add_app_created_at.up.sql", - ) -} - -func _5_add_app_created_atUpSql() (*asset, error) { - bytes, err := _5_add_app_created_atUpSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "5_add_app_created_at.up.sql", size: 46, mode: os.FileMode(420), modTime: time.Unix(1514060619, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __6_add_app_updated_atDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\x2c\x28\x28\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x09\xf5\xf5\x53\x28\x2d\x48\x49\x2c\x49\x4d\x89\x4f\x2c\xb1\xe6\x02\x04\x00\x00\xff\xff\x31\x44\xd7\xcc\x29\x00\x00\x00") - -func _6_add_app_updated_atDownSqlBytes() ([]byte, error) { - return bindataRead( - __6_add_app_updated_atDownSql, - "6_add_app_updated_at.down.sql", - ) -} - -func _6_add_app_updated_atDownSql() (*asset, error) { - bytes, err := _6_add_app_updated_atDownSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "6_add_app_updated_at.down.sql", size: 41, mode: os.FileMode(420), modTime: time.Unix(1514060619, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __6_add_app_updated_atUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\x2c\x28\x28\x56\x70\x74\x71\x51\x28\x2d\x48\x49\x2c\x49\x4d\x89\x4f\x2c\x51\x28\x4b\x2c\x4a\xce\x48\x2c\xd2\x30\x32\x35\xd3\xb4\xe6\x02\x04\x00\x00\xff\xff\x65\x01\x8b\x34\x2e\x00\x00\x00") - -func _6_add_app_updated_atUpSqlBytes() ([]byte, error) { - return bindataRead( - __6_add_app_updated_atUpSql, - "6_add_app_updated_at.up.sql", - ) -} - -func _6_add_app_updated_atUpSql() (*asset, error) { - bytes, err := _6_add_app_updated_atUpSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "6_add_app_updated_at.up.sql", size: 46, mode: os.FileMode(420), modTime: time.Unix(1514060619, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __7_add_route_cpusDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\xca\x2f\x2d\x49\x2d\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x09\xf5\xf5\x53\x48\x2e\x28\x2d\xb6\xe6\x02\x04\x00\x00\xff\xff\xec\x60\x24\xd0\x25\x00\x00\x00") - -func _7_add_route_cpusDownSqlBytes() ([]byte, error) { - return bindataRead( - __7_add_route_cpusDownSql, - "7_add_route_cpus.down.sql", - ) -} - -func _7_add_route_cpusDownSql() (*asset, error) { - bytes, err := _7_add_route_cpusDownSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "7_add_route_cpus.down.sql", size: 37, mode: os.FileMode(420), modTime: time.Unix(1515624756, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __7_add_route_cpusUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\xca\x2f\x2d\x49\x2d\x56\x70\x74\x71\x51\x48\x2e\x28\x2d\x56\xc8\xcc\x2b\xb1\xe6\x02\x04\x00\x00\xff\xff\xf1\x18\xf8\xa9\x21\x00\x00\x00") - -func _7_add_route_cpusUpSqlBytes() ([]byte, error) { - return bindataRead( - __7_add_route_cpusUpSql, - "7_add_route_cpus.up.sql", - ) -} - -func _7_add_route_cpusUpSql() (*asset, error) { - bytes, err := _7_add_route_cpusUpSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "7_add_route_cpus.up.sql", size: 33, mode: os.FileMode(420), modTime: time.Unix(1515628068, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "1_add_route_created_at.down.sql": _1_add_route_created_atDownSql, - "1_add_route_created_at.up.sql": _1_add_route_created_atUpSql, - "2_add_call_stats.down.sql": _2_add_call_statsDownSql, - "2_add_call_stats.up.sql": _2_add_call_statsUpSql, - "3_add_call_error.down.sql": _3_add_call_errorDownSql, - "3_add_call_error.up.sql": _3_add_call_errorUpSql, - "4_add_route_updated_at.down.sql": _4_add_route_updated_atDownSql, - "4_add_route_updated_at.up.sql": _4_add_route_updated_atUpSql, - "5_add_app_created_at.down.sql": _5_add_app_created_atDownSql, - "5_add_app_created_at.up.sql": _5_add_app_created_atUpSql, - "6_add_app_updated_at.down.sql": _6_add_app_updated_atDownSql, - "6_add_app_updated_at.up.sql": _6_add_app_updated_atUpSql, - "7_add_route_cpus.down.sql": _7_add_route_cpusDownSql, - "7_add_route_cpus.up.sql": _7_add_route_cpusUpSql, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"} -// AssetDir("data/img") would return []string{"a.png", "b.png"} -// AssetDir("foo.txt") and AssetDir("notexist") would return an error -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - cannonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(cannonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} - -var _bintree = &bintree{nil, map[string]*bintree{ - "1_add_route_created_at.down.sql": &bintree{_1_add_route_created_atDownSql, map[string]*bintree{}}, - "1_add_route_created_at.up.sql": &bintree{_1_add_route_created_atUpSql, map[string]*bintree{}}, - "2_add_call_stats.down.sql": &bintree{_2_add_call_statsDownSql, map[string]*bintree{}}, - "2_add_call_stats.up.sql": &bintree{_2_add_call_statsUpSql, map[string]*bintree{}}, - "3_add_call_error.down.sql": &bintree{_3_add_call_errorDownSql, map[string]*bintree{}}, - "3_add_call_error.up.sql": &bintree{_3_add_call_errorUpSql, map[string]*bintree{}}, - "4_add_route_updated_at.down.sql": &bintree{_4_add_route_updated_atDownSql, map[string]*bintree{}}, - "4_add_route_updated_at.up.sql": &bintree{_4_add_route_updated_atUpSql, map[string]*bintree{}}, - "5_add_app_created_at.down.sql": &bintree{_5_add_app_created_atDownSql, map[string]*bintree{}}, - "5_add_app_created_at.up.sql": &bintree{_5_add_app_created_atUpSql, map[string]*bintree{}}, - "6_add_app_updated_at.down.sql": &bintree{_6_add_app_updated_atDownSql, map[string]*bintree{}}, - "6_add_app_updated_at.up.sql": &bintree{_6_add_app_updated_atUpSql, map[string]*bintree{}}, - "7_add_route_cpus.down.sql": &bintree{_7_add_route_cpusDownSql, map[string]*bintree{}}, - "7_add_route_cpus.up.sql": &bintree{_7_add_route_cpusUpSql, map[string]*bintree{}}, -}} - -// RestoreAsset restores an asset under the given directory -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) - if err != nil { - return err - } - return nil -} - -// RestoreAssets restores an asset under the given directory recursively -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - cannonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) -} diff --git a/api/datastore/sql/migrations/migs.go b/api/datastore/sql/migrations/migs.go new file mode 100644 index 000000000..c7c8df9fe --- /dev/null +++ b/api/datastore/sql/migrations/migs.go @@ -0,0 +1,10 @@ +package migrations + +import ( + "github.com/fnproject/fn/api/datastore/sql/migratex" +) + +// Migrations is the list of fn specific sql migrations to run +var Migrations []migratex.Migration + +func vfunc(v int64) func() int64 { return func() int64 { return v } } diff --git a/api/datastore/sql/sql.go b/api/datastore/sql/sql.go index b71618400..849e84beb 100644 --- a/api/datastore/sql/sql.go +++ b/api/datastore/sql/sql.go @@ -14,6 +14,7 @@ import ( "time" "github.com/fnproject/fn/api/common" + "github.com/fnproject/fn/api/datastore/sql/migratex" "github.com/fnproject/fn/api/datastore/sql/migrations" "github.com/fnproject/fn/api/models" "github.com/go-sql-driver/mysql" @@ -23,12 +24,6 @@ import ( _ "github.com/lib/pq" "github.com/mattn/go-sqlite3" _ "github.com/mattn/go-sqlite3" - "github.com/rdallman/migrate" - _ "github.com/rdallman/migrate/database/mysql" - _ "github.com/rdallman/migrate/database/postgres" - _ "github.com/rdallman/migrate/database/sqlite3" - "github.com/rdallman/migrate/source" - "github.com/rdallman/migrate/source/go-bindata" "github.com/sirupsen/logrus" ) @@ -147,7 +142,9 @@ func newDS(ctx context.Context, url *url.URL) (*sqlStore, error) { db.SetMaxIdleConns(maxIdleConns) log.WithFields(logrus.Fields{"max_idle_connections": maxIdleConns, "datastore": driver}).Info("datastore dialed") - err = runMigrations(url.String(), checkExistence(db)) // original url string + sdb := &sqlStore{db: db} + + err = sdb.runMigrations(ctx, checkExistence(db), migrations.Migrations) if err != nil { log.WithError(err).Error("error running migrations") return nil, err @@ -164,7 +161,7 @@ func newDS(ctx context.Context, url *url.URL) (*sqlStore, error) { } } - return &sqlStore{db: db}, nil + return sdb, nil } func pingWithRetry(ctx context.Context, attempts int, sleep time.Duration, db *sqlx.DB) (err error) { @@ -201,52 +198,29 @@ func checkExistence(db *sqlx.DB) bool { // check if the db already existed, if the db is brand new then we can skip // over all the migrations BUT we must be sure to set the right migration // number so that only current migrations are skipped, not any future ones. -func runMigrations(url string, exists bool) error { - m, err := migrator(url) - if err != nil { - return err - } - defer m.Close() - - if !exists { +func (ds *sqlStore) runMigrations(ctx context.Context, dbExists bool, migrations []migratex.Migration) error { + if !dbExists { // set to highest and bail - return m.Force(latestVersion(migrations.AssetNames())) + return ds.Tx(func(tx *sqlx.Tx) error { + return migratex.SetVersion(ctx, tx, latestVersion(migrations), false) + }) } // run any migrations needed to get to latest, if any - err = m.Up() - if err == migrate.ErrNoChange { // we don't care, but want other errors - err = nil - } - return err -} - -func migrator(url string) (*migrate.Migrate, error) { - s := bindata.Resource(migrations.AssetNames(), - func(name string) ([]byte, error) { - return migrations.Asset(name) - }) - - d, err := bindata.WithInstance(s) - if err != nil { - return nil, err - } - - return migrate.NewWithSourceInstance("go-bindata", d, url) + return migratex.Up(ctx, ds.db, migrations) } // latest version will find the latest version from a list of migration // names (not from the db) -func latestVersion(migs []string) int { - var highest uint - for _, m := range migs { - mig, _ := source.Parse(m) - if mig.Version > highest { - highest = mig.Version +func latestVersion(migs []migratex.Migration) int64 { + var highest int64 + for _, mig := range migs { + if mig.Version() > highest { + highest = mig.Version() } } - return int(highest) + return highest } // clear is for tests only, be careful, it deletes all records. diff --git a/api/datastore/sql/sql_test.go b/api/datastore/sql/sql_test.go index c6add723f..9bd262c63 100644 --- a/api/datastore/sql/sql_test.go +++ b/api/datastore/sql/sql_test.go @@ -1,14 +1,15 @@ package sql import ( + "context" "net/url" "os" "testing" - "context" - "github.com/fnproject/fn/api/datastore/internal/datastoretest" "github.com/fnproject/fn/api/datastore/internal/datastoreutil" + "github.com/fnproject/fn/api/datastore/sql/migratex" + "github.com/fnproject/fn/api/datastore/sql/migrations" "github.com/fnproject/fn/api/models" ) @@ -23,12 +24,7 @@ func newWithMigrations(ctx context.Context, url *url.URL) (*sqlStore, error) { return nil, err } - m, err := migrator(url.String()) - if err != nil { - return nil, err - } - - err = m.Down() + err = migratex.Down(ctx, ds.db, migrations.Migrations) if err != nil { return nil, err } diff --git a/test/fn-api-tests/exec_test.go b/test/fn-api-tests/exec_test.go index 2dd6ae614..04ce1dee1 100644 --- a/test/fn-api-tests/exec_test.go +++ b/test/fn-api-tests/exec_test.go @@ -360,9 +360,10 @@ func TestOversizedLog(t *testing.T) { if err != nil { t.Error(err.Error()) } else { - if len(logObj.Payload) >= size { + log := logObj.Payload.Log.Log + if len(log) >= size { t.Errorf("Log entry suppose to be truncated up to expected size %v, got %v", - size/1024, len(logObj.Payload)) + size/1024, len(log)) } } DeleteApp(t, s.Context, s.Client, s.AppName) diff --git a/vendor/github.com/docker/docker/Dockerfile b/vendor/github.com/docker/docker/Dockerfile index 49a8c5996..63e2dc3f0 100644 --- a/vendor/github.com/docker/docker/Dockerfile +++ b/vendor/github.com/docker/docker/Dockerfile @@ -32,28 +32,21 @@ # the case. Therefore, you don't have to disable it anymore. # -FROM buildpack-deps:stretch AS base +FROM golang:1.9.4 AS base +# FIXME(vdemeester) this is kept for other script depending on it to not fail right away +# Remove this once the other scripts uses something else to detect the version +ENV GO_VERSION 1.9.4 # allow replacing httpredir or deb mirror ARG APT_MIRROR=deb.debian.org RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list - -FROM base AS golang -# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines -# will need updating, to avoid errors. Ping #docker-maintainers on IRC -# with a heads-up. -# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored -ENV GO_VERSION 1.9.4 -RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \ - | tar -xzC /usr/local -ENV PATH=/usr/local/go/bin:/go/bin:$PATH GOPATH=/go - - FROM base AS criu # Install CRIU for checkpoint/restore support ENV CRIU_VERSION 3.6 # Install dependancy packages specific to criu -RUN apt-get update && apt-get install -y \ +RUN case $(uname -m) in \ + x86_64) \ + apt-get update && apt-get install -y \ libnet-dev \ libprotobuf-c0-dev \ libprotobuf-dev \ @@ -66,10 +59,15 @@ RUN apt-get update && apt-get install -y \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ - && make PREFIX=/opt/criu install-criu + && make PREFIX=/opt/criu install-criu ;\ + ;; \ + armv7l|aarch64|ppc64le|s390x) \ + mkdir -p /opt/criu; \ + ;; \ + esac -FROM golang AS registry +FROM base AS registry # Install two versions of the registry. The first is an older version that # only supports schema1 manifests. The second is a newer version that supports # both. This allows integration-cli tests to cover push/pull with both schema1 @@ -82,14 +80,18 @@ RUN set -x \ && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -buildmode=pie -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && case $(uname -m) in \ + x86_64|ppc64le|s390x) \ + (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1"); \ + GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ + go build -buildmode=pie -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ + ;; \ + esac \ && rm -rf "$GOPATH" -FROM golang AS notary +FROM base AS notary # Install notary and notary-server ENV NOTARY_VERSION v0.5.0 RUN set -x \ @@ -113,7 +115,7 @@ RUN git clone https://github.com/docker/docker-py.git /docker-py \ -FROM golang AS swagger +FROM base AS swagger # Install go-swagger for validating swagger.yaml ENV GO_SWAGGER_COMMIT c28258affb0b6251755d92489ef685af8d4ff3eb RUN set -x \ @@ -124,56 +126,57 @@ RUN set -x \ && rm -rf "$GOPATH" -FROM golang AS frozen-images +FROM base AS frozen-images RUN apt-get update && apt-get install -y jq ca-certificates --no-install-recommends # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / RUN /download-frozen-image-v2.sh /docker-frozen-images \ buildpack-deps:jessie@sha256:dd86dced7c9cd2a724e779730f0a53f93b7ef42228d4344b25ce9a42a1486251 \ - busybox:1.27-glibc@sha256:8c8f261a462eead45ab8e610d3e8f7a1e4fd1cd9bed5bc0a0c386784ab105d8e \ + busybox:latest@sha256:bbc3a03235220b170ba48a157dd097dd1379299370e1ed99ce976df0355d24f0 \ + busybox:glibc@sha256:0b55a30394294ab23b9afd58fab94e61a923f5834fba7ddbae7f8e0c11ba85e6 \ debian:jessie@sha256:287a20c5f73087ab406e6b364833e3fb7b3ae63ca0eb3486555dc27ed32c6e60 \ hello-world:latest@sha256:be0cd392e45be79ffeffa6b05338b98ebb16c87b255f48e297ec7f98e123905c # See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) # Just a little hack so we don't have to install these deps twice, once for runc and once for dockerd -FROM golang AS runtime-dev +FROM base AS runtime-dev RUN apt-get update && apt-get install -y \ libapparmor-dev \ libseccomp-dev -FROM golang AS tomlv +FROM base AS tomlv ENV INSTALL_BINARY_NAME=tomlv COPY hack/dockerfile/install/install.sh ./install.sh COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ RUN PREFIX=/opt/$INSTALL_BINARY_NAME ./install.sh $INSTALL_BINARY_NAME -FROM golang AS vndr +FROM base AS vndr ENV INSTALL_BINARY_NAME=vndr COPY hack/dockerfile/install/install.sh ./install.sh COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ RUN PREFIX=/opt/$INSTALL_BINARY_NAME ./install.sh $INSTALL_BINARY_NAME -FROM golang AS containerd +FROM base AS containerd RUN apt-get update && apt-get install -y btrfs-tools ENV INSTALL_BINARY_NAME=containerd COPY hack/dockerfile/install/install.sh ./install.sh COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ RUN PREFIX=/opt/$INSTALL_BINARY_NAME ./install.sh $INSTALL_BINARY_NAME -FROM golang AS proxy +FROM base AS proxy ENV INSTALL_BINARY_NAME=proxy COPY hack/dockerfile/install/install.sh ./install.sh COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ RUN PREFIX=/opt/$INSTALL_BINARY_NAME ./install.sh $INSTALL_BINARY_NAME -FROM golang AS gometalinter +FROM base AS gometalinter ENV INSTALL_BINARY_NAME=gometalinter COPY hack/dockerfile/install/install.sh ./install.sh COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ RUN PREFIX=/opt/$INSTALL_BINARY_NAME ./install.sh $INSTALL_BINARY_NAME -FROM golang AS dockercli +FROM base AS dockercli ENV INSTALL_BINARY_NAME=dockercli COPY hack/dockerfile/install/install.sh ./install.sh COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ @@ -215,7 +218,7 @@ RUN apt-get update && apt-get install -y \ libudev-dev \ libsystemd-dev \ binutils-mingw-w64 \ - g++-mingw-w64-x86-64 \ + g++-mingw-w64-x86-64 \ net-tools \ pigz \ python-backports.ssl-match-hostname \ @@ -231,6 +234,8 @@ RUN apt-get update && apt-get install -y \ vim-common \ xfsprogs \ zip \ + bzip2 \ + xz-utils \ --no-install-recommends COPY --from=swagger /usr/local/bin/swagger* /usr/local/bin/ COPY --from=frozen-images /docker-frozen-images /docker-frozen-images @@ -242,7 +247,6 @@ COPY --from=runc /opt/runc/ /usr/local/bin/ COPY --from=containerd /opt/containerd/ /usr/local/bin/ COPY --from=proxy /opt/proxy/ /usr/local/bin/ COPY --from=dockercli /opt/dockercli /usr/local/cli -COPY --from=golang /usr/local/go /usr/local/go COPY --from=registry /usr/local/bin/registry* /usr/local/bin/ COPY --from=notary /usr/local/bin/notary* /usr/local/bin/ COPY --from=criu /opt/criu/ /usr/local/ diff --git a/vendor/github.com/docker/docker/Dockerfile.aarch64 b/vendor/github.com/docker/docker/Dockerfile.aarch64 deleted file mode 100644 index 068d80516..000000000 --- a/vendor/github.com/docker/docker/Dockerfile.aarch64 +++ /dev/null @@ -1,172 +0,0 @@ -# This file describes the standard way to build Docker on aarch64, using docker -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. -# docker build -t docker -f Dockerfile.aarch64 . -# -# # Mount your source in an interactive container for quick testing: -# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash -# -# # Run the test suite: -# docker run --privileged docker hack/make.sh test-unit test-integration test-docker-py -# -# Note: AppArmor used to mess with privileged mode, but this is no longer -# the case. Therefore, you don't have to disable it anymore. -# - -FROM debian:stretch - -# allow replacing httpredir or deb mirror -ARG APT_MIRROR=deb.debian.org -RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list - -# Packaged dependencies -RUN apt-get update && apt-get install -y \ - apparmor \ - apt-utils \ - aufs-tools \ - automake \ - bash-completion \ - bsdmainutils \ - btrfs-tools \ - build-essential \ - cmake \ - createrepo \ - curl \ - dpkg-sig \ - gcc \ - git \ - iptables \ - jq \ - less \ - libapparmor-dev \ - libcap-dev \ - libdevmapper-dev \ - libnl-3-dev \ - libprotobuf-c0-dev \ - libprotobuf-dev \ - libseccomp-dev \ - libsystemd-dev \ - libtool \ - libudev-dev \ - mercurial \ - net-tools \ - pigz \ - pkg-config \ - protobuf-compiler \ - protobuf-c-compiler \ - python-backports.ssl-match-hostname \ - python-dev \ - python-mock \ - python-pip \ - python-requests \ - python-setuptools \ - python-websocket \ - python-wheel \ - tar \ - thin-provisioning-tools \ - vim \ - vim-common \ - xfsprogs \ - zip \ - --no-install-recommends - -# Install Go -# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored -ENV GO_VERSION 1.9.4 -RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-arm64.tar.gz" \ - | tar -xzC /usr/local - -ENV PATH /go/bin:/usr/local/go/bin:$PATH -ENV GOPATH /go - -# Only install one version of the registry, because old version which support -# schema1 manifests is not working on ARM64, we should skip integration-cli -# tests for schema1 manifests on ARM64. -ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -buildmode=pie -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ - && rm -rf "$GOPATH" - -# Install notary and notary-server -ENV NOTARY_VERSION v0.5.0 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ - && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -buildmode=pie -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -buildmode=pie -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ - && rm -rf "$GOPATH" - -# Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT 8b246db271a85d6541dc458838627e89c683e42f -# To run integration tests docker-pycreds is required. -RUN git clone https://github.com/docker/docker-py.git /docker-py \ - && cd /docker-py \ - && git checkout -q $DOCKER_PY_COMMIT \ - && pip install docker-pycreds==0.2.1 \ - && pip install -r test-requirements.txt - -# Install yamllint for validating swagger.yaml -RUN pip install yamllint==1.5.0 - -# Install go-swagger for validating swagger.yaml -ENV GO_SWAGGER_COMMIT c28258affb0b6251755d92489ef685af8d4ff3eb -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/go-swagger/go-swagger.git "$GOPATH/src/github.com/go-swagger/go-swagger" \ - && (cd "$GOPATH/src/github.com/go-swagger/go-swagger" && git checkout -q "$GO_SWAGGER_COMMIT") \ - && go build -o /usr/local/bin/swagger github.com/go-swagger/go-swagger/cmd/swagger \ - && rm -rf "$GOPATH" - -# Set user.email so crosbymichael's in-container merge commits go smoothly -RUN git config --global user.email 'docker-dummy@example.com' - -# Add an unprivileged user to be used for tests which need it -RUN groupadd -r docker -RUN useradd --create-home --gid docker unprivilegeduser - -VOLUME /var/lib/docker -WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor seccomp selinux - -# Let us use a .bashrc file -RUN ln -sfv $PWD/.bashrc ~/.bashrc - -# Register Docker's bash completion. -RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker - -# Get useful and necessary Hub images so we can "docker load" locally instead of pulling -COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ -RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ - buildpack-deps:jessie@sha256:dd86dced7c9cd2a724e779730f0a53f93b7ef42228d4344b25ce9a42a1486251 \ - busybox:1.27-glibc@sha256:8c8f261a462eead45ab8e610d3e8f7a1e4fd1cd9bed5bc0a0c386784ab105d8e \ - debian:jessie@sha256:287a20c5f73087ab406e6b364833e3fb7b3ae63ca0eb3486555dc27ed32c6e60 \ - hello-world:latest@sha256:be0cd392e45be79ffeffa6b05338b98ebb16c87b255f48e297ec7f98e123905c -# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) -# - -# Install tomlv, vndr, runc, containerd, tini, proxy dockercli -# Please edit hack/dockerfile/install/.installer to update them. -COPY hack/dockerfile/install hack/dockerfile/install -RUN for i in tomlv vndr tini gometalinter proxy dockercli runc containerd; \ - do hack/dockerfile/install/install.sh $i; \ - done -ENV PATH=/usr/local/cli:$PATH - - -# Wrap all commands in the "docker-in-docker" script to allow nested containers -ENTRYPOINT ["hack/dind"] - -# Options for hack/validate/gometalinter -ENV GOMETALINTER_OPTS="--deadline=4m -j2" - -# Upload docker source -COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.armhf b/vendor/github.com/docker/docker/Dockerfile.armhf deleted file mode 100644 index 0e03fb0a1..000000000 --- a/vendor/github.com/docker/docker/Dockerfile.armhf +++ /dev/null @@ -1,155 +0,0 @@ -# This file describes the standard way to build Docker on ARMv7, using docker -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. -# docker build -t docker -f Dockerfile.armhf . -# -# # Mount your source in an interactive container for quick testing: -# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash -# -# # Run the test suite: -# docker run --privileged docker hack/make.sh test-unit test-integration test-docker-py -# -# Note: AppArmor used to mess with privileged mode, but this is no longer -# the case. Therefore, you don't have to disable it anymore. -# - -FROM arm32v7/debian:stretch - -# allow replacing httpredir or deb mirror -ARG APT_MIRROR=deb.debian.org -RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list - -# Packaged dependencies -RUN apt-get update && apt-get install -y \ - apparmor \ - aufs-tools \ - automake \ - bash-completion \ - btrfs-tools \ - build-essential \ - createrepo \ - curl \ - cmake \ - dpkg-sig \ - git \ - iptables \ - jq \ - net-tools \ - libapparmor-dev \ - libcap-dev \ - libdevmapper-dev \ - libseccomp-dev \ - libsystemd-dev \ - libtool \ - libudev-dev \ - mercurial \ - pigz \ - pkg-config \ - python-backports.ssl-match-hostname \ - python-dev \ - python-mock \ - python-pip \ - python-requests \ - python-setuptools \ - python-websocket \ - python-wheel \ - xfsprogs \ - tar \ - thin-provisioning-tools \ - vim-common \ - --no-install-recommends \ - && pip install awscli==1.10.15 - -# Install Go -# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored -ENV GO_VERSION 1.9.4 -RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" \ - | tar -xzC /usr/local -ENV PATH /go/bin:/usr/local/go/bin:$PATH -ENV GOPATH /go - -# We're building for armhf, which is ARMv7, so let's be explicit about that -ENV GOARCH arm -ENV GOARM 7 - -# Install two versions of the registry. The first is an older version that -# only supports schema1 manifests. The second is a newer version that supports -# both. This allows integration-cli tests to cover push/pull with both schema1 -# and schema2 manifests. -ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd -ENV REGISTRY_COMMIT cb08de17d74bef86ce6c5abe8b240e282f5750be -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -buildmode=pie -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -buildmode=pie -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ - && rm -rf "$GOPATH" - -# Install notary and notary-server -ENV NOTARY_VERSION v0.5.0 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ - && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -buildmode=pie -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -buildmode=pie -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ - && rm -rf "$GOPATH" - -# Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT 8b246db271a85d6541dc458838627e89c683e42f -# To run integration tests docker-pycreds is required. -RUN git clone https://github.com/docker/docker-py.git /docker-py \ - && cd /docker-py \ - && git checkout -q $DOCKER_PY_COMMIT \ - && pip install docker-pycreds==0.2.1 \ - && pip install -r test-requirements.txt - -# Set user.email so crosbymichael's in-container merge commits go smoothly -RUN git config --global user.email 'docker-dummy@example.com' - -# Add an unprivileged user to be used for tests which need it -RUN groupadd -r docker -RUN useradd --create-home --gid docker unprivilegeduser - -VOLUME /var/lib/docker -WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor seccomp selinux - -# Let us use a .bashrc file -RUN ln -sfv $PWD/.bashrc ~/.bashrc - -# Register Docker's bash completion. -RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker - -# Get useful and necessary Hub images so we can "docker load" locally instead of pulling -COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ -RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ - buildpack-deps:jessie@sha256:dd86dced7c9cd2a724e779730f0a53f93b7ef42228d4344b25ce9a42a1486251 \ - busybox:1.27-glibc@sha256:8c8f261a462eead45ab8e610d3e8f7a1e4fd1cd9bed5bc0a0c386784ab105d8e \ - debian:jessie@sha256:287a20c5f73087ab406e6b364833e3fb7b3ae63ca0eb3486555dc27ed32c6e60 \ - hello-world:latest@sha256:be0cd392e45be79ffeffa6b05338b98ebb16c87b255f48e297ec7f98e123905c -# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) - -# Install tomlv, vndr, runc, containerd, tini, proxy dockercli -# Please edit hack/dockerfile/install/.installer to update them. -COPY hack/dockerfile/install hack/dockerfile/install -RUN for i in tomlv vndr tini gometalinter proxy dockercli runc containerd; \ - do hack/dockerfile/install/install.sh $i; \ - done -ENV PATH=/usr/local/cli:$PATH - -ENTRYPOINT ["hack/dind"] - -# Options for hack/validate/gometalinter -ENV GOMETALINTER_OPTS="--deadline=10m -j2" - -# Upload docker source -COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.e2e b/vendor/github.com/docker/docker/Dockerfile.e2e index ce13170e3..bd700c98e 100644 --- a/vendor/github.com/docker/docker/Dockerfile.e2e +++ b/vendor/github.com/docker/docker/Dockerfile.e2e @@ -17,7 +17,8 @@ WORKDIR /go/src/github.com/docker/docker/ COPY contrib/download-frozen-image-v2.sh contrib/download-frozen-image-v2.sh RUN contrib/download-frozen-image-v2.sh /output/docker-frozen-images \ buildpack-deps:jessie@sha256:dd86dced7c9cd2a724e779730f0a53f93b7ef42228d4344b25ce9a42a1486251 \ - busybox:1.27-glibc@sha256:8c8f261a462eead45ab8e610d3e8f7a1e4fd1cd9bed5bc0a0c386784ab105d8e \ + busybox:latest@sha256:bbc3a03235220b170ba48a157dd097dd1379299370e1ed99ce976df0355d24f0 \ + busybox:glibc@sha256:0b55a30394294ab23b9afd58fab94e61a923f5834fba7ddbae7f8e0c11ba85e6 \ debian:jessie@sha256:287a20c5f73087ab406e6b364833e3fb7b3ae63ca0eb3486555dc27ed32c6e60 \ hello-world:latest@sha256:be0cd392e45be79ffeffa6b05338b98ebb16c87b255f48e297ec7f98e123905c diff --git a/vendor/github.com/docker/docker/Dockerfile.ppc64le b/vendor/github.com/docker/docker/Dockerfile.ppc64le deleted file mode 100644 index 07f4b0149..000000000 --- a/vendor/github.com/docker/docker/Dockerfile.ppc64le +++ /dev/null @@ -1,151 +0,0 @@ -# This file describes the standard way to build Docker on ppc64le, using docker -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. -# docker build -t docker -f Dockerfile.ppc64le . -# -# # Mount your source in an interactive container for quick testing: -# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash -# -# # Run the test suite: -# docker run --privileged docker hack/make.sh test-unit test-integration test-docker-py -# -# Note: AppArmor used to mess with privileged mode, but this is no longer -# the case. Therefore, you don't have to disable it anymore. -# - -FROM ppc64le/debian:stretch - -# allow replacing httpredir or deb mirror -ARG APT_MIRROR=deb.debian.org -RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list - -# Packaged dependencies -RUN apt-get update && apt-get install -y \ - apparmor \ - apt-utils \ - aufs-tools \ - automake \ - bash-completion \ - btrfs-tools \ - build-essential \ - cmake \ - createrepo \ - curl \ - dpkg-sig \ - git \ - iptables \ - jq \ - net-tools \ - libapparmor-dev \ - libcap-dev \ - libdevmapper-dev \ - libseccomp-dev \ - libsystemd-dev \ - libtool \ - libudev-dev \ - mercurial \ - pigz \ - pkg-config \ - python-backports.ssl-match-hostname \ - python-dev \ - python-mock \ - python-pip \ - python-requests \ - python-setuptools \ - python-websocket \ - python-wheel \ - xfsprogs \ - tar \ - thin-provisioning-tools \ - vim-common \ - --no-install-recommends - -# Install Go -# NOTE: official ppc64le go binaries weren't available until go 1.6.4 and 1.7.4 -# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored -ENV GO_VERSION 1.9.4 -RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" \ - | tar -xzC /usr/local - -ENV PATH /go/bin:/usr/local/go/bin:$PATH -ENV GOPATH /go - -# Install two versions of the registry. The first is an older version that -# only supports schema1 manifests. The second is a newer version that supports -# both. This allows integration-cli tests to cover push/pull with both schema1 -# and schema2 manifests. -ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd -ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -buildmode=pie -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -buildmode=pie -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ - && rm -rf "$GOPATH" - -# Install notary and notary-server -ENV NOTARY_VERSION v0.5.0 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ - && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -buildmode=pie -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -buildmode=pie -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ - && rm -rf "$GOPATH" - -# Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT 8b246db271a85d6541dc458838627e89c683e42f -# To run integration tests docker-pycreds is required. -RUN git clone https://github.com/docker/docker-py.git /docker-py \ - && cd /docker-py \ - && git checkout -q $DOCKER_PY_COMMIT \ - && pip install docker-pycreds==0.2.1 \ - && pip install -r test-requirements.txt - -# Set user.email so crosbymichael's in-container merge commits go smoothly -RUN git config --global user.email 'docker-dummy@example.com' - -# Add an unprivileged user to be used for tests which need it -RUN groupadd -r docker -RUN useradd --create-home --gid docker unprivilegeduser - -VOLUME /var/lib/docker -WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor seccomp selinux - -# Let us use a .bashrc file -RUN ln -sfv $PWD/.bashrc ~/.bashrc - -# Register Docker's bash completion. -RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker - -# Get useful and necessary Hub images so we can "docker load" locally instead of pulling -COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ -RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ - buildpack-deps:jessie@sha256:dd86dced7c9cd2a724e779730f0a53f93b7ef42228d4344b25ce9a42a1486251 \ - busybox:1.27-glibc@sha256:8c8f261a462eead45ab8e610d3e8f7a1e4fd1cd9bed5bc0a0c386784ab105d8e \ - debian:jessie@sha256:287a20c5f73087ab406e6b364833e3fb7b3ae63ca0eb3486555dc27ed32c6e60 \ - hello-world:latest@sha256:be0cd392e45be79ffeffa6b05338b98ebb16c87b255f48e297ec7f98e123905c -# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) - -# Install tomlv, vndr, runc, containerd, tini, proxy dockercli -# Please edit hack/dockerfile/install/.installer to update them. -COPY hack/dockerfile/install hack/dockerfile/install -RUN for i in tomlv vndr tini gometalinter proxy dockercli runc containerd; \ - do hack/dockerfile/install/install.sh $i; \ - done -ENV PATH=/usr/local/cli:$PATH - -# Wrap all commands in the "docker-in-docker" script to allow nested containers -ENTRYPOINT ["hack/dind"] - -# Upload docker source -COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.s390x b/vendor/github.com/docker/docker/Dockerfile.s390x deleted file mode 100644 index dc55d5723..000000000 --- a/vendor/github.com/docker/docker/Dockerfile.s390x +++ /dev/null @@ -1,145 +0,0 @@ -# This file describes the standard way to build Docker on s390x, using docker -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. -# docker build -t docker -f Dockerfile.s390x . -# -# # Mount your source in an interactive container for quick testing: -# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash -# -# # Run the test suite: -# docker run --privileged docker hack/make.sh test-unit test-integration test-docker-py -# -# Note: AppArmor used to mess with privileged mode, but this is no longer -# the case. Therefore, you don't have to disable it anymore. -# - -FROM s390x/debian:stretch - -# Packaged dependencies -RUN apt-get update && apt-get install -y \ - apparmor \ - apt-utils \ - aufs-tools \ - automake \ - bash-completion \ - btrfs-tools \ - build-essential \ - cmake \ - createrepo \ - curl \ - dpkg-sig \ - git \ - iptables \ - jq \ - net-tools \ - libapparmor-dev \ - libcap-dev \ - libdevmapper-dev \ - libseccomp-dev \ - libsystemd-dev \ - libtool \ - libudev-dev \ - mercurial \ - pigz \ - pkg-config \ - python-backports.ssl-match-hostname \ - python-dev \ - python-mock \ - python-pip \ - python-requests \ - python-setuptools \ - python-websocket \ - python-wheel \ - xfsprogs \ - tar \ - thin-provisioning-tools \ - vim-common \ - --no-install-recommends - -# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored -ENV GO_VERSION 1.9.4 -RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" \ - | tar -xzC /usr/local - -ENV PATH /go/bin:/usr/local/go/bin:$PATH -ENV GOPATH /go - -# Install two versions of the registry. The first is an older version that -# only supports schema1 manifests. The second is a newer version that supports -# both. This allows integration-cli tests to cover push/pull with both schema1 -# and schema2 manifests. -ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd -ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -buildmode=pie -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -buildmode=pie -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ - && rm -rf "$GOPATH" - -# Install notary and notary-server -ENV NOTARY_VERSION v0.5.0 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ - && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -buildmode=pie -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -buildmode=pie -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ - && rm -rf "$GOPATH" - -# Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT 8b246db271a85d6541dc458838627e89c683e42f -# To run integration tests docker-pycreds is required. -RUN git clone https://github.com/docker/docker-py.git /docker-py \ - && cd /docker-py \ - && git checkout -q $DOCKER_PY_COMMIT \ - && pip install docker-pycreds==0.2.1 \ - && pip install -r test-requirements.txt - -# Set user.email so crosbymichael's in-container merge commits go smoothly -RUN git config --global user.email 'docker-dummy@example.com' - -# Add an unprivileged user to be used for tests which need it -RUN groupadd -r docker -RUN useradd --create-home --gid docker unprivilegeduser - -VOLUME /var/lib/docker -WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor selinux seccomp - -# Let us use a .bashrc file -RUN ln -sfv $PWD/.bashrc ~/.bashrc - -# Register Docker's bash completion. -RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker - -# Get useful and necessary Hub images so we can "docker load" locally instead of pulling -COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ -RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ - buildpack-deps:jessie@sha256:dd86dced7c9cd2a724e779730f0a53f93b7ef42228d4344b25ce9a42a1486251 \ - busybox:1.27-glibc@sha256:8c8f261a462eead45ab8e610d3e8f7a1e4fd1cd9bed5bc0a0c386784ab105d8e \ - debian:jessie@sha256:287a20c5f73087ab406e6b364833e3fb7b3ae63ca0eb3486555dc27ed32c6e60 \ - hello-world:latest@sha256:be0cd392e45be79ffeffa6b05338b98ebb16c87b255f48e297ec7f98e123905c -# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) - -# Install tomlv, vndr, runc, containerd, tini, proxy dockercli -# Please edit hack/dockerfile/install/.installer to update them. -COPY hack/dockerfile/install hack/dockerfile/install -RUN for i in tomlv vndr tini gometalinter proxy dockercli runc containerd; \ - do hack/dockerfile/install/install.sh $i; \ - done -ENV PATH=/usr/local/cli:$PATH - -# Wrap all commands in the "docker-in-docker" script to allow nested containers -ENTRYPOINT ["hack/dind"] - -# Upload docker source -COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/container/container_unit_test.go b/vendor/github.com/docker/docker/container/container_unit_test.go index 858124abd..863a47a1f 100644 --- a/vendor/github.com/docker/docker/container/container_unit_test.go +++ b/vendor/github.com/docker/docker/container/container_unit_test.go @@ -3,6 +3,7 @@ package container // import "github.com/docker/docker/container" import ( "fmt" "io/ioutil" + "os" "path/filepath" "testing" @@ -74,6 +75,7 @@ func TestContainerSecretReferenceDestTarget(t *testing.T) { func TestContainerLogPathSetForJSONFileLogger(t *testing.T) { containerRoot, err := ioutil.TempDir("", "TestContainerLogPathSetForJSONFileLogger") require.NoError(t, err) + defer os.RemoveAll(containerRoot) c := &Container{ Config: &container.Config{}, @@ -86,8 +88,9 @@ func TestContainerLogPathSetForJSONFileLogger(t *testing.T) { Root: containerRoot, } - _, err = c.StartLogger() + logger, err := c.StartLogger() require.NoError(t, err) + defer logger.Close() expectedLogPath, err := filepath.Abs(filepath.Join(containerRoot, fmt.Sprintf("%s-json.log", c.ID))) require.NoError(t, err) @@ -97,6 +100,7 @@ func TestContainerLogPathSetForJSONFileLogger(t *testing.T) { func TestContainerLogPathSetForRingLogger(t *testing.T) { containerRoot, err := ioutil.TempDir("", "TestContainerLogPathSetForRingLogger") require.NoError(t, err) + defer os.RemoveAll(containerRoot) c := &Container{ Config: &container.Config{}, @@ -112,8 +116,9 @@ func TestContainerLogPathSetForRingLogger(t *testing.T) { Root: containerRoot, } - _, err = c.StartLogger() + logger, err := c.StartLogger() require.NoError(t, err) + defer logger.Close() expectedLogPath, err := filepath.Abs(filepath.Join(containerRoot, fmt.Sprintf("%s-json.log", c.ID))) require.NoError(t, err) diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go index 7395036cc..4af6e3dd9 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go @@ -18,6 +18,7 @@ import ( containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/events" containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster/convert" executorpkg "github.com/docker/docker/daemon/cluster/executor" "github.com/docker/libnetwork" @@ -155,7 +156,11 @@ func (c *containerAdapter) createNetworks(ctx context.Context) error { if _, ok := err.(libnetwork.NetworkNameError); ok { continue } - + // We will continue if CreateManagedNetwork returns PredefinedNetworkError error. + // Other callers still can treat it as Error. + if _, ok := err.(daemon.PredefinedNetworkError); ok { + continue + } return err } } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go index 3926648e2..ca6251023 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go @@ -2223,6 +2223,38 @@ func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { return err } +func (devices *DeviceSet) unmountAndDeactivateAll(dir string) { + files, err := ioutil.ReadDir(dir) + if err != nil { + logrus.Warnf("devmapper: unmountAndDeactivate: %s", err) + return + } + + for _, d := range files { + if !d.IsDir() { + continue + } + + name := d.Name() + fullname := path.Join(dir, name) + + // We use MNT_DETACH here in case it is still busy in some running + // container. This means it'll go away from the global scope directly, + // and the device will be released when that container dies. + if err := unix.Unmount(fullname, unix.MNT_DETACH); err != nil && err != unix.EINVAL { + logrus.Warnf("devmapper: Shutdown unmounting %s, error: %s", fullname, err) + } + + if devInfo, err := devices.lookupDevice(name); err != nil { + logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", name, err) + } else { + if err := devices.deactivateDevice(devInfo); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate %s, error: %s", devInfo.Hash, err) + } + } + } +} + // Shutdown shuts down the device by unmounting the root. func (devices *DeviceSet) Shutdown(home string) error { logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix) @@ -2244,45 +2276,7 @@ func (devices *DeviceSet) Shutdown(home string) error { // will be killed and we will not get a chance to save deviceset // metadata. Hence save this early before trying to deactivate devices. devices.saveDeviceSetMetaData() - - // ignore the error since it's just a best effort to not try to unmount something that's mounted - mounts, _ := mount.GetMounts() - mounted := make(map[string]bool, len(mounts)) - for _, mnt := range mounts { - mounted[mnt.Mountpoint] = true - } - - if err := filepath.Walk(path.Join(home, "mnt"), func(p string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - return nil - } - - if mounted[p] { - // We use MNT_DETACH here in case it is still busy in some running - // container. This means it'll go away from the global scope directly, - // and the device will be released when that container dies. - if err := unix.Unmount(p, unix.MNT_DETACH); err != nil { - logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err) - } - } - - if devInfo, err := devices.lookupDevice(path.Base(p)); err != nil { - logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", path.Base(p), err) - } else { - if err := devices.deactivateDevice(devInfo); err != nil { - logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", devInfo.Hash, err) - } - } - - return nil - }); err != nil && !os.IsNotExist(err) { - devices.Unlock() - return err - } - + devices.unmountAndDeactivateAll(path.Join(home, "mnt")) devices.Unlock() info, _ := devices.lookupDeviceWithLock("") diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go index 53e05c3b7..6bed6634c 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go @@ -9,16 +9,16 @@ import ( "path" "strconv" - "github.com/sirupsen/logrus" - "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/devicemapper" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/locker" "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/system" units "github.com/docker/go-units" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) func init() { @@ -122,12 +122,18 @@ func (d *Driver) GetMetadata(id string) (map[string]string, error) { // Cleanup unmounts a device. func (d *Driver) Cleanup() error { err := d.DeviceSet.Shutdown(d.home) + umountErr := mount.RecursiveUnmount(d.home) - if err2 := mount.RecursiveUnmount(d.home); err == nil { - err = err2 + // in case we have two errors, prefer the one from Shutdown() + if err != nil { + return err } - return err + if umountErr != nil { + return errors.Wrapf(umountErr, "error unmounting %s", d.home) + } + + return nil } // CreateReadWrite creates a layer that is writable for use as a container @@ -145,7 +151,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { return d.DeviceSet.AddDevice(id, parent, storageOpt) } -// Remove removes a device with a given id, unmounts the filesystem. +// Remove removes a device with a given id, unmounts the filesystem, and removes the mount point. func (d *Driver) Remove(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) @@ -160,7 +166,21 @@ func (d *Driver) Remove(id string) error { if err := d.DeviceSet.DeleteDevice(id, false); err != nil { return fmt.Errorf("failed to remove device %s: %v", id, err) } - return system.EnsureRemoveAll(path.Join(d.home, "mnt", id)) + + // Most probably the mount point is already removed on Put() + // (see DeviceSet.UnmountDevice()), but just in case it was not + // let's try to remove it here as well, ignoring errors as + // an older kernel can return EBUSY if e.g. the mount was leaked + // to other mount namespaces. A failure to remove the container's + // mount point is not important and should not be treated + // as a failure to remove the container. + mp := path.Join(d.home, "mnt", id) + err := unix.Rmdir(mp) + if err != nil && !os.IsNotExist(err) { + logrus.WithField("storage-driver", "devicemapper").Warnf("unable to remove mount point %q: %s", mp, err) + } + + return nil } // Get mounts a device with given id into the root filesystem diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go index 8058eafa0..78d05b079 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go @@ -6,30 +6,8 @@ import ( "bytes" "fmt" "os" - "path/filepath" - - "golang.org/x/sys/unix" ) -// FIXME: this is copy-pasted from the aufs driver. -// It should be moved into the core. - -// Mounted returns true if a mount point exists. -func Mounted(mountpoint string) (bool, error) { - var mntpointSt unix.Stat_t - if err := unix.Stat(mountpoint, &mntpointSt); err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - var parentSt unix.Stat_t - if err := unix.Stat(filepath.Join(mountpoint, ".."), &parentSt); err != nil { - return false, err - } - return mntpointSt.Dev != parentSt.Dev, nil -} - type probeData struct { fsName string magic string diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go index 7d637f857..5a8045088 100644 --- a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go @@ -95,7 +95,7 @@ func New(info logger.Info) (logger.Logger, error) { return b, nil } - writer, err := loggerutils.NewLogFile(info.LogPath, capval, maxFiles, marshalFunc, decodeFunc) + writer, err := loggerutils.NewLogFile(info.LogPath, capval, maxFiles, marshalFunc, decodeFunc, 0640) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go index b533726c4..e646afc23 100644 --- a/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go @@ -31,13 +31,14 @@ type LogFile struct { notifyRotate *pubsub.Publisher marshal logger.MarshalFunc createDecoder makeDecoderFunc + perms os.FileMode } type makeDecoderFunc func(rdr io.Reader) func() (*logger.Message, error) //NewLogFile creates new LogFile -func NewLogFile(logPath string, capacity int64, maxFiles int, marshaller logger.MarshalFunc, decodeFunc makeDecoderFunc) (*LogFile, error) { - log, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640) +func NewLogFile(logPath string, capacity int64, maxFiles int, marshaller logger.MarshalFunc, decodeFunc makeDecoderFunc, perms os.FileMode) (*LogFile, error) { + log, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, perms) if err != nil { return nil, err } @@ -55,6 +56,7 @@ func NewLogFile(logPath string, capacity int64, maxFiles int, marshaller logger. notifyRotate: pubsub.NewPublisher(0, 1), marshal: marshaller, createDecoder: decodeFunc, + perms: perms, }, nil } @@ -100,7 +102,7 @@ func (w *LogFile) checkCapacityAndRotate() error { if err := rotate(name, w.maxFiles); err != nil { return err } - file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0640) + file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, w.perms) if err != nil { return err } diff --git a/vendor/github.com/docker/docker/daemon/logger/ring.go b/vendor/github.com/docker/docker/daemon/logger/ring.go index e940c62da..887f2ea18 100644 --- a/vendor/github.com/docker/docker/daemon/logger/ring.go +++ b/vendor/github.com/docker/docker/daemon/logger/ring.go @@ -93,7 +93,10 @@ func (r *RingLogger) Close() error { } if err := r.l.Log(msg); err != nil { - logrus.WithField("driver", r.l.Name()).WithField("container", r.logInfo.ContainerID).Errorf("Error writing log message: %v", r.l) + logrus.WithField("driver", r.l.Name()). + WithField("container", r.logInfo.ContainerID). + WithError(err). + Errorf("Error writing log message") logErr = true } } @@ -114,7 +117,10 @@ func (r *RingLogger) run() { return } if err := r.l.Log(msg); err != nil { - logrus.WithField("driver", r.l.Name()).WithField("container", r.logInfo.ContainerID).Errorf("Error writing log message: %v", r.l) + logrus.WithField("driver", r.l.Name()). + WithField("container", r.logInfo.ContainerID). + WithError(err). + Errorf("Error writing log message") } } } diff --git a/vendor/github.com/docker/docker/daemon/network.go b/vendor/github.com/docker/docker/daemon/network.go index 2c2a96b34..e5dcd0686 100644 --- a/vendor/github.com/docker/docker/daemon/network.go +++ b/vendor/github.com/docker/docker/daemon/network.go @@ -24,6 +24,16 @@ import ( "golang.org/x/net/context" ) +// PredefinedNetworkError is returned when user tries to create predefined network that already exists. +type PredefinedNetworkError string + +func (pnr PredefinedNetworkError) Error() string { + return fmt.Sprintf("operation is not permitted on predefined %s network ", string(pnr)) +} + +// Forbidden denotes the type of this error +func (pnr PredefinedNetworkError) Forbidden() {} + // NetworkControllerEnabled checks if the networking stack is enabled. // This feature depends on OS primitives and it's disabled in systems like Windows. func (daemon *Daemon) NetworkControllerEnabled() bool { @@ -267,9 +277,8 @@ func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.N } func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) { - if runconfig.IsPreDefinedNetwork(create.Name) && !agent { - err := fmt.Errorf("%s is a pre-defined network and cannot be created", create.Name) - return nil, errdefs.Forbidden(err) + if runconfig.IsPreDefinedNetwork(create.Name) { + return nil, PredefinedNetworkError(create.Name) } var warning string diff --git a/vendor/github.com/docker/docker/daemon/oci_linux.go b/vendor/github.com/docker/docker/daemon/oci_linux.go index 15bcb705b..8d5eebb88 100644 --- a/vendor/github.com/docker/docker/daemon/oci_linux.go +++ b/vendor/github.com/docker/docker/daemon/oci_linux.go @@ -667,7 +667,7 @@ func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []c if s.Root.Readonly { for i, m := range s.Mounts { switch m.Destination { - case "/proc", "/dev/pts", "/dev/mqueue", "/dev": + case "/proc", "/dev/pts", "/dev/shm", "/dev/mqueue", "/dev": continue } if _, ok := userMounts[m.Destination]; !ok { diff --git a/vendor/github.com/docker/docker/daemon/oci_linux_test.go b/vendor/github.com/docker/docker/daemon/oci_linux_test.go index 4af0ba96d..f6bda7974 100644 --- a/vendor/github.com/docker/docker/daemon/oci_linux_test.go +++ b/vendor/github.com/docker/docker/daemon/oci_linux_test.go @@ -48,3 +48,41 @@ func TestTmpfsDevShmNoDupMount(t *testing.T) { err = setMounts(&d, &s, c, ms) assert.NoError(t, err) } + +// TestIpcPrivateVsReadonly checks that in case of IpcMode: private +// and ReadonlyRootfs: true (as in "docker run --ipc private --read-only") +// the resulting /dev/shm mount is NOT made read-only. +// https://github.com/moby/moby/issues/36503 +func TestIpcPrivateVsReadonly(t *testing.T) { + d := Daemon{ + // some empty structs to avoid getting a panic + // caused by a null pointer dereference + idMappings: &idtools.IDMappings{}, + configStore: &config.Config{}, + } + c := &container.Container{ + HostConfig: &containertypes.HostConfig{ + IpcMode: containertypes.IpcMode("private"), + ReadonlyRootfs: true, + }, + } + + // We can't call createSpec() so mimick the minimal part + // of its code flow, just enough to reproduce the issue. + ms, err := d.setupMounts(c) + assert.NoError(t, err) + + s := oci.DefaultSpec() + s.Root.Readonly = c.HostConfig.ReadonlyRootfs + + err = setMounts(&d, &s, c, ms) + assert.NoError(t, err) + + // Find the /dev/shm mount in ms, check it does not have ro + for _, m := range s.Mounts { + if m.Destination != "/dev/shm" { + continue + } + assert.Equal(t, false, inSlice(m.Options, "ro")) + } +} diff --git a/vendor/github.com/docker/docker/daemon/stats/collector.go b/vendor/github.com/docker/docker/daemon/stats/collector.go index 39c76128b..6b7479dfd 100644 --- a/vendor/github.com/docker/docker/daemon/stats/collector.go +++ b/vendor/github.com/docker/docker/daemon/stats/collector.go @@ -1,6 +1,8 @@ package stats // import "github.com/docker/docker/daemon/stats" import ( + "bufio" + "sync" "time" "github.com/docker/docker/api/types" @@ -9,6 +11,37 @@ import ( "github.com/sirupsen/logrus" ) +// Collector manages and provides container resource stats +type Collector struct { + m sync.Mutex + supervisor supervisor + interval time.Duration + publishers map[*container.Container]*pubsub.Publisher + bufReader *bufio.Reader + + // The following fields are not set on Windows currently. + clockTicksPerSecond uint64 +} + +// NewCollector creates a stats collector that will poll the supervisor with the specified interval +func NewCollector(supervisor supervisor, interval time.Duration) *Collector { + s := &Collector{ + interval: interval, + supervisor: supervisor, + publishers: make(map[*container.Container]*pubsub.Publisher), + bufReader: bufio.NewReaderSize(nil, 128), + } + + platformNewStatsCollector(s) + + return s +} + +type supervisor interface { + // GetContainerStats collects all the stats related to a container + GetContainerStats(container *container.Container) (*types.StatsJSON, error) +} + // Collect registers the container with the collector and adds it to // the event loop for collection on the specified interval returning // a channel for the subscriber to receive on. @@ -57,7 +90,7 @@ func (s *Collector) Run() { // it will grow enough in first iteration var pairs []publishersPair - for range time.Tick(s.interval) { + for { // it does not make sense in the first iteration, // but saves allocations in further iterations pairs = pairs[:0] @@ -72,12 +105,6 @@ func (s *Collector) Run() { continue } - systemUsage, err := s.getSystemCPUUsage() - if err != nil { - logrus.Errorf("collecting system cpu usage: %v", err) - continue - } - onlineCPUs, err := s.getNumberOnlineCPUs() if err != nil { logrus.Errorf("collecting system online cpu count: %v", err) @@ -89,6 +116,14 @@ func (s *Collector) Run() { switch err.(type) { case nil: + // Sample system CPU usage close to container usage to avoid + // noise in metric calculations. + systemUsage, err := s.getSystemCPUUsage() + if err != nil { + logrus.WithError(err).WithField("container_id", pair.container.ID).Errorf("collecting system cpu usage") + continue + } + // FIXME: move to containerd on Linux (not Windows) stats.CPUStats.SystemUsage = systemUsage stats.CPUStats.OnlineCPUs = onlineCPUs @@ -106,6 +141,8 @@ func (s *Collector) Run() { logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err) } } + + time.Sleep(s.interval) } } diff --git a/vendor/github.com/docker/docker/daemon/stats/types.go b/vendor/github.com/docker/docker/daemon/stats/types.go deleted file mode 100644 index cdd47d60a..000000000 --- a/vendor/github.com/docker/docker/daemon/stats/types.go +++ /dev/null @@ -1,42 +0,0 @@ -package stats // import "github.com/docker/docker/daemon/stats" - -import ( - "bufio" - "sync" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/pubsub" -) - -type supervisor interface { - // GetContainerStats collects all the stats related to a container - GetContainerStats(container *container.Container) (*types.StatsJSON, error) -} - -// NewCollector creates a stats collector that will poll the supervisor with the specified interval -func NewCollector(supervisor supervisor, interval time.Duration) *Collector { - s := &Collector{ - interval: interval, - supervisor: supervisor, - publishers: make(map[*container.Container]*pubsub.Publisher), - bufReader: bufio.NewReaderSize(nil, 128), - } - - platformNewStatsCollector(s) - - return s -} - -// Collector manages and provides container resource stats -type Collector struct { - m sync.Mutex - supervisor supervisor - interval time.Duration - publishers map[*container.Container]*pubsub.Publisher - bufReader *bufio.Reader - - // The following fields are not set on Windows currently. - clockTicksPerSecond uint64 -} diff --git a/vendor/github.com/docker/docker/distribution/config.go b/vendor/github.com/docker/docker/distribution/config.go index efd0d3fe7..44aacd732 100644 --- a/vendor/github.com/docker/docker/distribution/config.go +++ b/vendor/github.com/docker/docker/distribution/config.go @@ -19,6 +19,7 @@ import ( "github.com/docker/docker/registry" "github.com/docker/libtrust" "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" "golang.org/x/net/context" ) @@ -86,7 +87,8 @@ type ImagePushConfig struct { type ImageConfigStore interface { Put([]byte) (digest.Digest, error) Get(digest.Digest) ([]byte, error) - RootFSAndOSFromConfig([]byte) (*image.RootFS, string, error) + RootFSFromConfig([]byte) (*image.RootFS, error) + PlatformFromConfig([]byte) (*specs.Platform, error) } // PushLayerProvider provides layers to be pushed by ChainID. @@ -140,18 +142,26 @@ func (s *imageConfigStore) Get(d digest.Digest) ([]byte, error) { return img.RawJSON(), nil } -func (s *imageConfigStore) RootFSAndOSFromConfig(c []byte) (*image.RootFS, string, error) { +func (s *imageConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { var unmarshalledConfig image.Image if err := json.Unmarshal(c, &unmarshalledConfig); err != nil { - return nil, "", err + return nil, err + } + return unmarshalledConfig.RootFS, nil +} + +func (s *imageConfigStore) PlatformFromConfig(c []byte) (*specs.Platform, error) { + var unmarshalledConfig image.Image + if err := json.Unmarshal(c, &unmarshalledConfig); err != nil { + return nil, err } // fail immediately on Windows when downloading a non-Windows image // and vice versa. Exception on Windows if Linux Containers are enabled. if runtime.GOOS == "windows" && unmarshalledConfig.OS == "linux" && !system.LCOWSupported() { - return nil, "", fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) + return nil, fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) } else if runtime.GOOS != "windows" && unmarshalledConfig.OS == "windows" { - return nil, "", fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) + return nil, fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) } os := unmarshalledConfig.OS @@ -159,9 +169,9 @@ func (s *imageConfigStore) RootFSAndOSFromConfig(c []byte) (*image.RootFS, strin os = runtime.GOOS } if !system.IsOSSupported(os) { - return nil, "", system.ErrNotSupportedOperatingSystem + return nil, system.ErrNotSupportedOperatingSystem } - return unmarshalledConfig.RootFS, os, nil + return &specs.Platform{OS: os, OSVersion: unmarshalledConfig.OSVersion}, nil } type storeLayerProvider struct { diff --git a/vendor/github.com/docker/docker/distribution/pull_v2.go b/vendor/github.com/docker/docker/distribution/pull_v2.go index 2f1dac9a3..a1316920f 100644 --- a/vendor/github.com/docker/docker/distribution/pull_v2.go +++ b/vendor/github.com/docker/docker/distribution/pull_v2.go @@ -30,6 +30,7 @@ import ( refstore "github.com/docker/docker/reference" "github.com/docker/docker/registry" digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/net/context" @@ -584,11 +585,11 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s }() var ( - configJSON []byte // raw serialized image config - downloadedRootFS *image.RootFS // rootFS from registered layers - configRootFS *image.RootFS // rootFS from configuration - release func() // release resources from rootFS download - configOS string // for LCOW when registering downloaded layers + configJSON []byte // raw serialized image config + downloadedRootFS *image.RootFS // rootFS from registered layers + configRootFS *image.RootFS // rootFS from configuration + release func() // release resources from rootFS download + configPlatform *specs.Platform // for LCOW when registering downloaded layers ) // https://github.com/docker/docker/issues/24766 - Err on the side of caution, @@ -600,14 +601,16 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s // check to block Windows images being pulled on Linux is implemented, it // may be necessary to perform the same type of serialisation. if runtime.GOOS == "windows" { - configJSON, configRootFS, configOS, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) + configJSON, configRootFS, configPlatform, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) if err != nil { return "", "", err } - if configRootFS == nil { return "", "", errRootFSInvalid } + if err := checkImageCompatibility(configPlatform.OS, configPlatform.OSVersion); err != nil { + return "", "", err + } if len(descriptors) != len(configRootFS.DiffIDs) { return "", "", errRootFSMismatch @@ -615,8 +618,8 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s // Early bath if the requested OS doesn't match that of the configuration. // This avoids doing the download, only to potentially fail later. - if !strings.EqualFold(configOS, requestedOS) { - return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configOS, requestedOS) + if !strings.EqualFold(configPlatform.OS, requestedOS) { + return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configPlatform.OS, requestedOS) } // Populate diff ids in descriptors to avoid downloading foreign layers @@ -698,16 +701,20 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s return imageID, manifestDigest, nil } -func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, string, error) { +func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, *specs.Platform, error) { select { case configJSON := <-configChan: - rootfs, os, err := s.RootFSAndOSFromConfig(configJSON) + rootfs, err := s.RootFSFromConfig(configJSON) if err != nil { - return nil, nil, "", err + return nil, nil, nil, err } - return configJSON, rootfs, os, nil + platform, err := s.PlatformFromConfig(configJSON) + if err != nil { + return nil, nil, nil, err + } + return configJSON, rootfs, platform, nil case err := <-errChan: - return nil, nil, "", err + return nil, nil, nil, err // Don't need a case for ctx.Done in the select because cancellation // will trigger an error in p.pullSchema2ImageConfig. } @@ -736,6 +743,10 @@ func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mf } manifestDigest := manifestMatches[0].Digest + if err := checkImageCompatibility(manifestMatches[0].Platform.OS, manifestMatches[0].Platform.OSVersion); err != nil { + return "", "", err + } + manSvc, err := p.repo.Manifests(ctx) if err != nil { return "", "", err diff --git a/vendor/github.com/docker/docker/distribution/pull_v2_unix.go b/vendor/github.com/docker/docker/distribution/pull_v2_unix.go index 9ddf0ac6e..39c166fbc 100644 --- a/vendor/github.com/docker/docker/distribution/pull_v2_unix.go +++ b/vendor/github.com/docker/docker/distribution/pull_v2_unix.go @@ -27,3 +27,8 @@ func filterManifests(manifests []manifestlist.ManifestDescriptor, os string) []m } return matches } + +// checkImageCompatibility is a Windows-specific function. No-op on Linux +func checkImageCompatibility(imageOS, imageOSVersion string) error { + return nil +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v2_windows.go b/vendor/github.com/docker/docker/distribution/pull_v2_windows.go index 6102dc5b0..7527f6271 100644 --- a/vendor/github.com/docker/docker/distribution/pull_v2_windows.go +++ b/vendor/github.com/docker/docker/distribution/pull_v2_windows.go @@ -1,11 +1,13 @@ package distribution // import "github.com/docker/docker/distribution" import ( + "errors" "fmt" "net/http" "os" "runtime" "sort" + "strconv" "strings" "github.com/docker/distribution" @@ -63,7 +65,6 @@ func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekClo func filterManifests(manifests []manifestlist.ManifestDescriptor, os string) []manifestlist.ManifestDescriptor { osVersion := "" if os == "windows" { - // TODO: Add UBR (Update Build Release) component after build version := system.GetOSVersion() osVersion = fmt.Sprintf("%d.%d.%d", version.MajorVersion, version.MinorVersion, version.Build) logrus.Debugf("will prefer entries with version %s", osVersion) @@ -71,10 +72,11 @@ func filterManifests(manifests []manifestlist.ManifestDescriptor, os string) []m var matches []manifestlist.ManifestDescriptor for _, manifestDescriptor := range manifests { - // TODO: Consider filtering out greater versions, including only greater UBR if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == os { matches = append(matches, manifestDescriptor) - logrus.Debugf("found match for %s/%s with media type %s, digest %s", os, runtime.GOARCH, manifestDescriptor.MediaType, manifestDescriptor.Digest.String()) + logrus.Debugf("found match for %s/%s %s with media type %s, digest %s", os, runtime.GOARCH, manifestDescriptor.Platform.OSVersion, manifestDescriptor.MediaType, manifestDescriptor.Digest.String()) + } else { + logrus.Debugf("ignoring %s/%s %s with media type %s, digest %s", os, runtime.GOARCH, manifestDescriptor.Platform.OSVersion, manifestDescriptor.MediaType, manifestDescriptor.Digest.String()) } } if os == "windows" { @@ -107,3 +109,22 @@ func (mbv manifestsByVersion) Len() int { func (mbv manifestsByVersion) Swap(i, j int) { mbv.list[i], mbv.list[j] = mbv.list[j], mbv.list[i] } + +// checkImageCompatibility blocks pulling incompatible images based on a later OS build +// Fixes https://github.com/moby/moby/issues/36184. +func checkImageCompatibility(imageOS, imageOSVersion string) error { + if imageOS == "windows" { + hostOSV := system.GetOSVersion() + splitImageOSVersion := strings.Split(imageOSVersion, ".") // eg 10.0.16299.nnnn + if len(splitImageOSVersion) >= 3 { + if imageOSBuild, err := strconv.Atoi(splitImageOSVersion[2]); err == nil { + if imageOSBuild > int(hostOSV.Build) { + errMsg := fmt.Sprintf("a Windows version %s.%s.%s-based image is incompatible with a %s host", splitImageOSVersion[0], splitImageOSVersion[1], splitImageOSVersion[2], hostOSV.ToString()) + logrus.Debugf(errMsg) + return errors.New(errMsg) + } + } + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/distribution/push_v2.go b/vendor/github.com/docker/docker/distribution/push_v2.go index ca066afc7..7b7155169 100644 --- a/vendor/github.com/docker/docker/distribution/push_v2.go +++ b/vendor/github.com/docker/docker/distribution/push_v2.go @@ -118,12 +118,17 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id return fmt.Errorf("could not find image from tag %s: %v", reference.FamiliarString(ref), err) } - rootfs, os, err := p.config.ImageStore.RootFSAndOSFromConfig(imgConfig) + rootfs, err := p.config.ImageStore.RootFSFromConfig(imgConfig) if err != nil { return fmt.Errorf("unable to get rootfs for image %s: %s", reference.FamiliarString(ref), err) } - l, err := p.config.LayerStores[os].Get(rootfs.ChainID()) + platform, err := p.config.ImageStore.PlatformFromConfig(imgConfig) + if err != nil { + return fmt.Errorf("unable to get platform for image %s: %s", reference.FamiliarString(ref), err) + } + + l, err := p.config.LayerStores[platform.OS].Get(rootfs.ChainID()) if err != nil { return fmt.Errorf("failed to get top layer from image: %v", err) } diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go index 615ff3c79..e6a2275b2 100644 --- a/vendor/github.com/docker/docker/errdefs/defs.go +++ b/vendor/github.com/docker/docker/errdefs/defs.go @@ -35,7 +35,7 @@ type ErrForbidden interface { // ErrSystem signals that some internal error occurred. // An example of this would be a failed mount request. type ErrSystem interface { - ErrSystem() + System() } // ErrNotModified signals that an action can't be performed because it's already in the desired state diff --git a/vendor/github.com/docker/docker/errdefs/helpers_test.go b/vendor/github.com/docker/docker/errdefs/helpers_test.go index 42994e365..f1c88704c 100644 --- a/vendor/github.com/docker/docker/errdefs/helpers_test.go +++ b/vendor/github.com/docker/docker/errdefs/helpers_test.go @@ -12,6 +12,9 @@ type causal interface { } func TestNotFound(t *testing.T) { + if IsNotFound(errTest) { + t.Fatalf("did not expect not found error, got %T", errTest) + } e := NotFound(errTest) if !IsNotFound(e) { t.Fatalf("expected not found error, got: %T", e) @@ -22,6 +25,9 @@ func TestNotFound(t *testing.T) { } func TestConflict(t *testing.T) { + if IsConflict(errTest) { + t.Fatalf("did not expect conflcit error, got %T", errTest) + } e := Conflict(errTest) if !IsConflict(e) { t.Fatalf("expected conflcit error, got: %T", e) @@ -32,6 +38,9 @@ func TestConflict(t *testing.T) { } func TestForbidden(t *testing.T) { + if IsForbidden(errTest) { + t.Fatalf("did not expect forbidden error, got %T", errTest) + } e := Forbidden(errTest) if !IsForbidden(e) { t.Fatalf("expected forbidden error, got: %T", e) @@ -42,6 +51,9 @@ func TestForbidden(t *testing.T) { } func TestInvalidParameter(t *testing.T) { + if IsInvalidParameter(errTest) { + t.Fatalf("did not expect invalid argument error, got %T", errTest) + } e := InvalidParameter(errTest) if !IsInvalidParameter(e) { t.Fatalf("expected invalid argument error, got %T", e) @@ -52,6 +64,9 @@ func TestInvalidParameter(t *testing.T) { } func TestNotImplemented(t *testing.T) { + if IsNotImplemented(errTest) { + t.Fatalf("did not expect not implemented error, got %T", errTest) + } e := NotImplemented(errTest) if !IsNotImplemented(e) { t.Fatalf("expected not implemented error, got %T", e) @@ -62,6 +77,9 @@ func TestNotImplemented(t *testing.T) { } func TestNotModified(t *testing.T) { + if IsNotModified(errTest) { + t.Fatalf("did not expect not modified error, got %T", errTest) + } e := NotModified(errTest) if !IsNotModified(e) { t.Fatalf("expected not modified error, got %T", e) @@ -72,6 +90,9 @@ func TestNotModified(t *testing.T) { } func TestAlreadyExists(t *testing.T) { + if IsAlreadyExists(errTest) { + t.Fatalf("did not expect already exists error, got %T", errTest) + } e := AlreadyExists(errTest) if !IsAlreadyExists(e) { t.Fatalf("expected already exists error, got %T", e) @@ -82,6 +103,9 @@ func TestAlreadyExists(t *testing.T) { } func TestUnauthorized(t *testing.T) { + if IsUnauthorized(errTest) { + t.Fatalf("did not expect unauthorized error, got %T", errTest) + } e := Unauthorized(errTest) if !IsUnauthorized(e) { t.Fatalf("expected unauthorized error, got %T", e) @@ -92,6 +116,9 @@ func TestUnauthorized(t *testing.T) { } func TestUnknown(t *testing.T) { + if IsUnknown(errTest) { + t.Fatalf("did not expect unknown error, got %T", errTest) + } e := Unknown(errTest) if !IsUnknown(e) { t.Fatalf("expected unknown error, got %T", e) @@ -102,9 +129,12 @@ func TestUnknown(t *testing.T) { } func TestCancelled(t *testing.T) { + if IsCancelled(errTest) { + t.Fatalf("did not expect cancelled error, got %T", errTest) + } e := Cancelled(errTest) if !IsCancelled(e) { - t.Fatalf("expected canclled error, got %T", e) + t.Fatalf("expected cancelled error, got %T", e) } if cause := e.(causal).Cause(); cause != errTest { t.Fatalf("causual should be errTest, got: %v", cause) @@ -112,6 +142,9 @@ func TestCancelled(t *testing.T) { } func TestDeadline(t *testing.T) { + if IsDeadline(errTest) { + t.Fatalf("did not expect deadline error, got %T", errTest) + } e := Deadline(errTest) if !IsDeadline(e) { t.Fatalf("expected deadline error, got %T", e) @@ -121,7 +154,10 @@ func TestDeadline(t *testing.T) { } } -func TestIsDataLoss(t *testing.T) { +func TestDataLoss(t *testing.T) { + if IsDataLoss(errTest) { + t.Fatalf("did not expect data loss error, got %T", errTest) + } e := DataLoss(errTest) if !IsDataLoss(e) { t.Fatalf("expected data loss error, got %T", e) @@ -130,3 +166,29 @@ func TestIsDataLoss(t *testing.T) { t.Fatalf("causual should be errTest, got: %v", cause) } } + +func TestUnavailable(t *testing.T) { + if IsUnavailable(errTest) { + t.Fatalf("did not expect unavaillable error, got %T", errTest) + } + e := Unavailable(errTest) + if !IsUnavailable(e) { + t.Fatalf("expected unavaillable error, got %T", e) + } + if cause := e.(causal).Cause(); cause != errTest { + t.Fatalf("causual should be errTest, got: %v", cause) + } +} + +func TestSystem(t *testing.T) { + if IsSystem(errTest) { + t.Fatalf("did not expect system error, got %T", errTest) + } + e := System(errTest) + if !IsSystem(e) { + t.Fatalf("expected system error, got %T", e) + } + if cause := e.(causal).Cause(); cause != errTest { + t.Fatalf("causual should be errTest, got: %v", cause) + } +} diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go index 36ea6667f..cc26e4b75 100644 --- a/vendor/github.com/docker/docker/errdefs/is.go +++ b/vendor/github.com/docker/docker/errdefs/is.go @@ -21,7 +21,7 @@ func getImplementer(err error) error { ErrDeadline, ErrDataLoss, ErrUnknown: - return e + return err case causer: return getImplementer(e.Cause()) default: diff --git a/vendor/github.com/docker/docker/hack/dockerfile/install/containerd.installer b/vendor/github.com/docker/docker/hack/dockerfile/install/containerd.installer index 5731a6d56..d6ab8371a 100755 --- a/vendor/github.com/docker/docker/hack/dockerfile/install/containerd.installer +++ b/vendor/github.com/docker/docker/hack/dockerfile/install/containerd.installer @@ -14,10 +14,15 @@ install_containerd() { ( - if [ "$1" == "static" ]; then - export BUILDTAGS='static_build netgo' - export EXTRA_FLAGS='-buildmod pie' - export EXTRA_LDFLAGS='-extldflags "-fno-PIC -static"' + export BUILDTAGS='static_build netgo' + export EXTRA_FLAGS='-buildmode=pie' + export EXTRA_LDFLAGS='-extldflags "-fno-PIC -static"' + + # Reset build flags to nothing if we want a dynbinary + if [ "$1" == "dynamic" ]; then + export BUILDTAGS='' + export EXTRA_FLAGS='' + export EXTRA_LDFLAGS='' fi make diff --git a/vendor/github.com/docker/docker/hack/dockerfile/install/proxy.installer b/vendor/github.com/docker/docker/hack/dockerfile/install/proxy.installer index 598f67c3f..ed9ea7cbc 100755 --- a/vendor/github.com/docker/docker/hack/dockerfile/install/proxy.installer +++ b/vendor/github.com/docker/docker/hack/dockerfile/install/proxy.installer @@ -23,6 +23,7 @@ install_proxy() { install_proxy_dynamic() { export PROXY_LDFLAGS="-linkmode=external" install_proxy + export BUILD_MODE="-buildmode=pie" _install_proxy } @@ -31,7 +32,7 @@ _install_proxy() { git clone https://github.com/docker/libnetwork.git "$GOPATH/src/github.com/docker/libnetwork" cd "$GOPATH/src/github.com/docker/libnetwork" git checkout -q "$LIBNETWORK_COMMIT" - go build -buildmode=pie -ldflags="$PROXY_LDFLAGS" -o ${PREFIX}/docker-proxy github.com/docker/libnetwork/cmd/proxy + go build $BUILD_MODE -ldflags="$PROXY_LDFLAGS" -o ${PREFIX}/docker-proxy github.com/docker/libnetwork/cmd/proxy } diff --git a/vendor/github.com/docker/docker/hack/dockerfile/install/runc.installer b/vendor/github.com/docker/docker/hack/dockerfile/install/runc.installer index 923ae706e..054f95d66 100755 --- a/vendor/github.com/docker/docker/hack/dockerfile/install/runc.installer +++ b/vendor/github.com/docker/docker/hack/dockerfile/install/runc.installer @@ -1,7 +1,7 @@ #!/bin/sh # When updating RUNC_COMMIT, also update runc in vendor.conf accordingly -RUNC_COMMIT=6c55f98695e902427906eed2c799e566e3d3dfb5 +RUNC_COMMIT=4fc53a81fb7c994640722ac585fa9ca548971871 install_runc() { # Do not build with ambient capabilities support @@ -11,7 +11,12 @@ install_runc() { git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" cd "$GOPATH/src/github.com/opencontainers/runc" git checkout -q "$RUNC_COMMIT" - make BUILDTAGS="$RUNC_BUILDTAGS" $1 + if [ -z "$1" ]; then + target=static + else + target="$1" + fi + make BUILDTAGS="$RUNC_BUILDTAGS" "$target" mkdir -p ${PREFIX} cp runc ${PREFIX}/docker-runc } diff --git a/vendor/github.com/docker/docker/hack/make/.detect-daemon-osarch b/vendor/github.com/docker/docker/hack/make/.detect-daemon-osarch index c472710d1..91e2c53c7 100644 --- a/vendor/github.com/docker/docker/hack/make/.detect-daemon-osarch +++ b/vendor/github.com/docker/docker/hack/make/.detect-daemon-osarch @@ -34,35 +34,10 @@ export DOCKER_CLIENT_GOOS="${DOCKER_CLIENT_OSARCH%/*}" export DOCKER_CLIENT_GOARCH="${DOCKER_CLIENT_OSARCH##*/}" DOCKER_CLIENT_GOARCH=${DOCKER_CLIENT_GOARCH:=amd64} -# Retrieve the architecture used in contrib/builder/(deb|rpm)/$PACKAGE_ARCH/ -PACKAGE_ARCH='amd64' -case "${DOCKER_ENGINE_GOARCH:-$DOCKER_CLIENT_GOARCH}" in - arm) - PACKAGE_ARCH='armhf' - ;; - arm64) - PACKAGE_ARCH='aarch64' - ;; - amd64|ppc64le|s390x) - PACKAGE_ARCH="${DOCKER_ENGINE_GOARCH:-$DOCKER_CLIENT_GOARCH}" - ;; - *) - echo >&2 "warning: not sure how to convert '$DOCKER_ENGINE_GOARCH' to a 'Docker' arch, assuming '$PACKAGE_ARCH'" - ;; -esac -export PACKAGE_ARCH - DOCKERFILE='Dockerfile' -case "$PACKAGE_ARCH" in - amd64) - case "${DOCKER_ENGINE_GOOS:-$DOCKER_CLIENT_GOOS}" in - windows) - DOCKERFILE='Dockerfile.windows' - ;; - esac - ;; - *) - DOCKERFILE="Dockerfile.$PACKAGE_ARCH" - ;; -esac + +if [ "${DOCKER_ENGINE_GOOS:-$DOCKER_CLIENT_GOOS}" = "windows" ]; then + DOCKERFILE='Dockerfile.windows' +fi + export DOCKERFILE diff --git a/vendor/github.com/docker/docker/integration-cli/check_test.go b/vendor/github.com/docker/docker/integration-cli/check_test.go index dd802fbc1..a9f94fb57 100644 --- a/vendor/github.com/docker/docker/integration-cli/check_test.go +++ b/vendor/github.com/docker/docker/integration-cli/check_test.go @@ -85,7 +85,7 @@ type DockerSuite struct { } func (s *DockerSuite) OnTimeout(c *check.C) { - if !testEnv.IsLocalDaemon() { + if testEnv.IsRemoteDaemon() { return } path := filepath.Join(os.Getenv("DEST"), "docker.pid") diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go index a91c91abe..ed1941022 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go @@ -1713,7 +1713,7 @@ func (s *DockerSuite) TestContainersAPICreateMountsValidation(c *check.C) { Type: "bind", Source: notExistPath, Target: destPath}}}, - msg: "bind source path does not exist", + msg: "bind mount source path does not exist: " + notExistPath, }, { config: containertypes.Config{ diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_events_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_events_test.go deleted file mode 100644 index a95422f58..000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_events_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package main - -import ( - "encoding/json" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/docker/docker/integration-cli/checker" - "github.com/docker/docker/integration-cli/request" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestEventsAPIEmptyOutput(c *check.C) { - type apiResp struct { - resp *http.Response - err error - } - chResp := make(chan *apiResp) - go func() { - resp, body, err := request.Get("/events") - body.Close() - chResp <- &apiResp{resp, err} - }() - - select { - case r := <-chResp: - c.Assert(r.err, checker.IsNil) - c.Assert(r.resp.StatusCode, checker.Equals, http.StatusOK) - case <-time.After(3 * time.Second): - c.Fatal("timeout waiting for events api to respond, should have responded immediately") - } -} - -func (s *DockerSuite) TestEventsAPIBackwardsCompatible(c *check.C) { - since := daemonTime(c).Unix() - ts := strconv.FormatInt(since, 10) - - out := runSleepingContainer(c, "--name=foo", "-d") - containerID := strings.TrimSpace(out) - c.Assert(waitRun(containerID), checker.IsNil) - - q := url.Values{} - q.Set("since", ts) - - _, body, err := request.Get("/events?" + q.Encode()) - c.Assert(err, checker.IsNil) - defer body.Close() - - dec := json.NewDecoder(body) - var containerCreateEvent *jsonmessage.JSONMessage - for { - var event jsonmessage.JSONMessage - if err := dec.Decode(&event); err != nil { - if err == io.EOF { - break - } - c.Fatal(err) - } - if event.Status == "create" && event.ID == containerID { - containerCreateEvent = &event - break - } - } - - c.Assert(containerCreateEvent, checker.Not(checker.IsNil)) - c.Assert(containerCreateEvent.Status, checker.Equals, "create") - c.Assert(containerCreateEvent.ID, checker.Equals, containerID) - c.Assert(containerCreateEvent.From, checker.Equals, "busybox") -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go index fd0b4e627..b5edfd2c0 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go @@ -910,6 +910,8 @@ func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *check.C) { // Test case for 30178 func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *check.C) { + // Issue #36386 can be a independent one, which is worth further investigation. + c.Skip("Root cause of Issue #36386 is needed") d := s.AddDaemon(c, true, true) out, err := d.Cmd("network", "create", "-d", "overlay", "lb") diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go index 5dd1d8b35..fc2dafe41 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go @@ -1439,6 +1439,7 @@ func (s *DockerSuite) TestBuildRelativeCopy(c *check.C) { )) } +// FIXME(vdemeester) should be unit test func (s *DockerSuite) TestBuildBlankName(c *check.C) { name := "testbuildblankname" testCases := []struct { @@ -2066,6 +2067,7 @@ func (s *DockerSuite) TestBuildNoContext(c *check.C) { } } +// FIXME(vdemeester) migrate to docker/cli e2e func (s *DockerSuite) TestBuildDockerfileStdin(c *check.C) { name := "stdindockerfile" tmpDir, err := ioutil.TempDir("", "fake-context") @@ -2085,6 +2087,7 @@ CMD ["cat", "/foo"]`), c.Assert(strings.TrimSpace(string(res)), checker.Equals, `[cat /foo]`) } +// FIXME(vdemeester) migrate to docker/cli tests (unit or e2e) func (s *DockerSuite) TestBuildDockerfileStdinConflict(c *check.C) { name := "stdindockerfiletarcontext" icmd.RunCmd(icmd.Cmd{ @@ -2401,6 +2404,7 @@ func (s *DockerSuite) TestBuildDockerignoringDockerfile(c *check.C) { build.WithFile("Dockerfile", dockerfile), build.WithFile(".dockerignore", "Dockerfile\n"), )) + // FIXME(vdemeester) why twice ? buildImageSuccessfully(c, name, build.WithBuildContext(c, build.WithFile("Dockerfile", dockerfile), build.WithFile(".dockerignore", "./Dockerfile\n"), @@ -2420,6 +2424,7 @@ func (s *DockerSuite) TestBuildDockerignoringRenamedDockerfile(c *check.C) { build.WithFile("MyDockerfile", dockerfile), build.WithFile(".dockerignore", "MyDockerfile\n"), )) + // FIXME(vdemeester) why twice ? buildImageSuccessfully(c, name, cli.WithFlags("-f", "MyDockerfile"), build.WithBuildContext(c, build.WithFile("Dockerfile", "Should not use me"), build.WithFile("MyDockerfile", dockerfile), @@ -3045,6 +3050,7 @@ func (s *DockerSuite) TestBuildAddTarXzGz(c *check.C) { buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) } +// FIXME(vdemeester) most of the from git tests could be moved to `docker/cli` e2e tests func (s *DockerSuite) TestBuildFromGit(c *check.C) { name := "testbuildfromgit" git := fakegit.New(c, "repo", map[string]string{ @@ -3422,6 +3428,7 @@ func (s *DockerSuite) TestBuildLabelsCache(c *check.C) { } +// FIXME(vdemeester) port to docker/cli e2e tests (api tests should test suppressOutput option though) func (s *DockerSuite) TestBuildNotVerboseSuccess(c *check.C) { // This test makes sure that -q works correctly when build is successful: // stdout has only the image ID (long image ID) and stderr is empty. @@ -3472,6 +3479,7 @@ func (s *DockerSuite) TestBuildNotVerboseSuccess(c *check.C) { } +// FIXME(vdemeester) migrate to docker/cli tests func (s *DockerSuite) TestBuildNotVerboseFailureWithNonExistImage(c *check.C) { // This test makes sure that -q works correctly when build fails by // comparing between the stderr output in quiet mode and in stdout @@ -3492,6 +3500,7 @@ func (s *DockerSuite) TestBuildNotVerboseFailureWithNonExistImage(c *check.C) { } } +// FIXME(vdemeester) migrate to docker/cli tests func (s *DockerSuite) TestBuildNotVerboseFailure(c *check.C) { // This test makes sure that -q works correctly when build fails by // comparing between the stderr output in quiet mode and in stdout @@ -3519,6 +3528,7 @@ func (s *DockerSuite) TestBuildNotVerboseFailure(c *check.C) { } } +// FIXME(vdemeester) migrate to docker/cli tests func (s *DockerSuite) TestBuildNotVerboseFailureRemote(c *check.C) { // This test ensures that when given a wrong URL, stderr in quiet mode and // stderr in verbose mode are identical. @@ -3548,6 +3558,7 @@ func (s *DockerSuite) TestBuildNotVerboseFailureRemote(c *check.C) { } } +// FIXME(vdemeester) migrate to docker/cli tests func (s *DockerSuite) TestBuildStderr(c *check.C) { // This test just makes sure that no non-error output goes // to stderr @@ -3688,67 +3699,6 @@ CMD cat /foo/file`), } -// FIXME(vdemeester) part of this should be unit test, other part should be clearer -func (s *DockerSuite) TestBuildRenamedDockerfile(c *check.C) { - ctx := fakecontext.New(c, "", fakecontext.WithFiles(map[string]string{ - "Dockerfile": "FROM busybox\nRUN echo from Dockerfile", - "files/Dockerfile": "FROM busybox\nRUN echo from files/Dockerfile", - "files/dFile": "FROM busybox\nRUN echo from files/dFile", - "dFile": "FROM busybox\nRUN echo from dFile", - "files/dFile2": "FROM busybox\nRUN echo from files/dFile2", - })) - defer ctx.Close() - - cli.Docker(cli.Args("build", "-t", "test1", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{ - Out: "from Dockerfile", - }) - - cli.Docker(cli.Args("build", "-f", filepath.Join("files", "Dockerfile"), "-t", "test2", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{ - Out: "from files/Dockerfile", - }) - - cli.Docker(cli.Args("build", fmt.Sprintf("--file=%s", filepath.Join("files", "dFile")), "-t", "test3", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{ - Out: "from files/dFile", - }) - - cli.Docker(cli.Args("build", "--file=dFile", "-t", "test4", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{ - Out: "from dFile", - }) - - dirWithNoDockerfile, err := ioutil.TempDir(os.TempDir(), "test5") - c.Assert(err, check.IsNil) - nonDockerfileFile := filepath.Join(dirWithNoDockerfile, "notDockerfile") - if _, err = os.Create(nonDockerfileFile); err != nil { - c.Fatal(err) - } - cli.Docker(cli.Args("build", fmt.Sprintf("--file=%s", nonDockerfileFile), "-t", "test5", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: fmt.Sprintf("unable to prepare context: the Dockerfile (%s) must be within the build context", nonDockerfileFile), - }) - - cli.Docker(cli.Args("build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test6", ".."), cli.InDir(filepath.Join(ctx.Dir, "files"))).Assert(c, icmd.Expected{ - Out: "from Dockerfile", - }) - - cli.Docker(cli.Args("build", "-f", filepath.Join(ctx.Dir, "files", "Dockerfile"), "-t", "test7", ".."), cli.InDir(filepath.Join(ctx.Dir, "files"))).Assert(c, icmd.Expected{ - Out: "from files/Dockerfile", - }) - - cli.Docker(cli.Args("build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test8", "."), cli.InDir(filepath.Join(ctx.Dir, "files"))).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: "must be within the build context", - }) - - tmpDir := os.TempDir() - cli.Docker(cli.Args("build", "-t", "test9", ctx.Dir), cli.InDir(tmpDir)).Assert(c, icmd.Expected{ - Out: "from Dockerfile", - }) - - cli.Docker(cli.Args("build", "-f", "dFile2", "-t", "test10", "."), cli.InDir(filepath.Join(ctx.Dir, "files"))).Assert(c, icmd.Expected{ - Out: "from files/dFile2", - }) -} - func (s *DockerSuite) TestBuildFromMixedcaseDockerfile(c *check.C) { testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows testRequires(c, DaemonIsLinux) @@ -3772,6 +3722,7 @@ func (s *DockerSuite) TestBuildFromMixedcaseDockerfile(c *check.C) { }) } +// FIXME(vdemeester) should migrate to docker/cli tests func (s *DockerSuite) TestBuildFromURLWithF(c *check.C) { server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{"baz": `FROM busybox RUN echo from baz @@ -3798,6 +3749,7 @@ RUN find /tmp/`})) } +// FIXME(vdemeester) should migrate to docker/cli tests func (s *DockerSuite) TestBuildFromStdinWithF(c *check.C) { testRequires(c, DaemonIsLinux) // TODO Windows: This test is flaky; no idea why ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox @@ -3840,61 +3792,6 @@ func (s *DockerSuite) TestBuildFromOfficialNames(c *check.C) { } } -func (s *DockerSuite) TestBuildDockerfileOutsideContext(c *check.C) { - testRequires(c, UnixCli, DaemonIsLinux) // uses os.Symlink: not implemented in windows at the time of writing (go-1.4.2) - - name := "testbuilddockerfileoutsidecontext" - tmpdir, err := ioutil.TempDir("", name) - c.Assert(err, check.IsNil) - defer os.RemoveAll(tmpdir) - ctx := filepath.Join(tmpdir, "context") - if err := os.MkdirAll(ctx, 0755); err != nil { - c.Fatal(err) - } - if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte("FROM scratch\nENV X Y"), 0644); err != nil { - c.Fatal(err) - } - wd, err := os.Getwd() - if err != nil { - c.Fatal(err) - } - defer os.Chdir(wd) - if err := os.Chdir(ctx); err != nil { - c.Fatal(err) - } - if err := ioutil.WriteFile(filepath.Join(tmpdir, "outsideDockerfile"), []byte("FROM scratch\nENV x y"), 0644); err != nil { - c.Fatal(err) - } - if err := os.Symlink(filepath.Join("..", "outsideDockerfile"), filepath.Join(ctx, "dockerfile1")); err != nil { - c.Fatal(err) - } - if err := os.Symlink(filepath.Join(tmpdir, "outsideDockerfile"), filepath.Join(ctx, "dockerfile2")); err != nil { - c.Fatal(err) - } - - for _, dockerfilePath := range []string{ - filepath.Join("..", "outsideDockerfile"), - filepath.Join(ctx, "dockerfile1"), - filepath.Join(ctx, "dockerfile2"), - } { - result := dockerCmdWithResult("build", "-t", name, "--no-cache", "-f", dockerfilePath, ".") - result.Assert(c, icmd.Expected{ - Err: "must be within the build context", - ExitCode: 1, - }) - deleteImages(name) - } - - os.Chdir(tmpdir) - - // Path to Dockerfile should be resolved relative to working directory, not relative to context. - // There is a Dockerfile in the context, but since there is no Dockerfile in the current directory, the following should fail - out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "Dockerfile", ctx) - if err == nil { - c.Fatalf("Expected error. Out: %s", out) - } -} - // FIXME(vdemeester) should be a unit test func (s *DockerSuite) TestBuildSpaces(c *check.C) { // Test to make sure that leading/trailing spaces on a command @@ -4186,6 +4083,7 @@ func (s *DockerTrustSuite) TestTrustedBuildUntrustedTag(c *check.C) { }) } +// FIXME(vdemeester) should migrate to docker/cli e2e tests func (s *DockerTrustSuite) TestBuildContextDirIsSymlink(c *check.C) { testRequires(c, DaemonIsLinux) tempDir, err := ioutil.TempDir("", "test-build-dir-is-symlink-") @@ -5130,6 +5028,7 @@ func (s *DockerSuite) TestBuildCacheRootSource(c *check.C) { } // #19375 +// FIXME(vdemeester) should migrate to docker/cli tests func (s *DockerSuite) TestBuildFailsGitNotCallable(c *check.C) { buildImage("gitnotcallable", cli.WithEnvironmentVariables("PATH="), build.WithContextPath("github.com/docker/v1.10-migrator.git")).Assert(c, icmd.Expected{ @@ -6447,6 +6346,7 @@ CMD echo foo c.Assert(strings.TrimSpace(out), checker.Equals, `["/bin/sh","-c","echo foo"]`) } +// FIXME(vdemeester) should migrate to docker/cli tests func (s *DockerSuite) TestBuildIidFile(c *check.C) { tmpDir, err := ioutil.TempDir("", "TestBuildIidFile") if err != nil { @@ -6471,6 +6371,7 @@ ENV BAR BAZ`), c.Assert(d.String(), checker.Equals, getIDByName(c, name)) } +// FIXME(vdemeester) should migrate to docker/cli tests func (s *DockerSuite) TestBuildIidFileCleanupOnFail(c *check.C) { tmpDir, err := ioutil.TempDir("", "TestBuildIidFileCleanupOnFail") if err != nil { @@ -6493,6 +6394,7 @@ func (s *DockerSuite) TestBuildIidFileCleanupOnFail(c *check.C) { c.Assert(os.IsNotExist(err), check.Equals, true) } +// FIXME(vdemeester) should migrate to docker/cli tests func (s *DockerSuite) TestBuildIidFileSquash(c *check.C) { testRequires(c, ExperimentalDaemon) tmpDir, err := ioutil.TempDir("", "TestBuildIidFileSquash") diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_config_inspect_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_config_inspect_test.go deleted file mode 100644 index ba4e80f07..000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_config_inspect_test.go +++ /dev/null @@ -1,68 +0,0 @@ -// +build !windows - -package main - -import ( - "encoding/json" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/integration-cli/checker" - "github.com/go-check/check" -) - -func (s *DockerSwarmSuite) TestConfigInspect(c *check.C) { - d := s.AddDaemon(c, true, true) - - testName := "test_config" - id := d.CreateConfig(c, swarm.ConfigSpec{ - Annotations: swarm.Annotations{ - Name: testName, - }, - Data: []byte("TESTINGDATA"), - }) - c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) - - config := d.GetConfig(c, id) - c.Assert(config.Spec.Name, checker.Equals, testName) - - out, err := d.Cmd("config", "inspect", testName) - c.Assert(err, checker.IsNil, check.Commentf(out)) - - var configs []swarm.Config - c.Assert(json.Unmarshal([]byte(out), &configs), checker.IsNil) - c.Assert(configs, checker.HasLen, 1) -} - -func (s *DockerSwarmSuite) TestConfigInspectMultiple(c *check.C) { - d := s.AddDaemon(c, true, true) - - testNames := []string{ - "test0", - "test1", - } - for _, n := range testNames { - id := d.CreateConfig(c, swarm.ConfigSpec{ - Annotations: swarm.Annotations{ - Name: n, - }, - Data: []byte("TESTINGDATA"), - }) - c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) - - config := d.GetConfig(c, id) - c.Assert(config.Spec.Name, checker.Equals, n) - - } - - args := []string{ - "config", - "inspect", - } - args = append(args, testNames...) - out, err := d.Cmd(args...) - c.Assert(err, checker.IsNil, check.Commentf(out)) - - var configs []swarm.Config - c.Assert(json.Unmarshal([]byte(out), &configs), checker.IsNil) - c.Assert(configs, checker.HasLen, 2) -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_from_container_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_from_container_test.go index 0a282f5c0..499be5452 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_from_container_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_from_container_test.go @@ -8,8 +8,6 @@ import ( "github.com/go-check/check" ) -// docker cp CONTAINER:PATH LOCALPATH - // Try all of the test cases from the archive package which implements the // internals of `docker cp` and ensure that the behavior matches when actually // copying to and from containers. @@ -20,67 +18,9 @@ import ( // 3. DST parent directory must exist. // 4. If DST exists as a file, it must not end with a trailing separator. -// First get these easy error cases out of the way. - -// Test for error when SRC does not exist. -func (s *DockerSuite) TestCpFromErrSrcNotExists(c *check.C) { - containerID := makeTestContainer(c, testContainerOptions{}) - - tmpDir := getTestDir(c, "test-cp-from-err-src-not-exists") - defer os.RemoveAll(tmpDir) - - err := runDockerCp(c, containerCpPath(containerID, "file1"), tmpDir, nil) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) -} - -// Test for error when SRC ends in a trailing -// path separator but it exists as a file. -func (s *DockerSuite) TestCpFromErrSrcNotDir(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{addContent: true}) - - tmpDir := getTestDir(c, "test-cp-from-err-src-not-dir") - defer os.RemoveAll(tmpDir) - - err := runDockerCp(c, containerCpPathTrailingSep(containerID, "file1"), tmpDir, nil) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) -} - -// Test for error when DST ends in a trailing -// path separator but exists as a file. -func (s *DockerSuite) TestCpFromErrDstNotDir(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{addContent: true}) - - tmpDir := getTestDir(c, "test-cp-from-err-dst-not-dir") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - // Try with a file source. - srcPath := containerCpPath(containerID, "/file1") - dstPath := cpPathTrailingSep(tmpDir, "file1") - - err := runDockerCp(c, srcPath, dstPath, nil) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) - - // Try with a directory source. - srcPath = containerCpPath(containerID, "/dir1") - - err = runDockerCp(c, srcPath, dstPath, nil) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) -} - // Check that copying from a container to a local symlink copies to the symlink // target and does not overwrite the local symlink itself. +// TODO: move to docker/cli and/or integration/container/copy_test.go func (s *DockerSuite) TestCpFromSymlinkDestination(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{addContent: true}) diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_test.go index 24c1fe228..77567a3b9 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_test.go @@ -2,15 +2,11 @@ package main import ( "os" - "runtime" - "strings" "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" ) -// docker cp LOCALPATH CONTAINER:PATH - // Try all of the test cases from the archive package which implements the // internals of `docker cp` and ensure that the behavior matches when actually // copying to and from containers. @@ -21,124 +17,6 @@ import ( // 3. DST parent directory must exist. // 4. If DST exists as a file, it must not end with a trailing separator. -// First get these easy error cases out of the way. - -// Test for error when SRC does not exist. -func (s *DockerSuite) TestCpToErrSrcNotExists(c *check.C) { - containerID := makeTestContainer(c, testContainerOptions{}) - - tmpDir := getTestDir(c, "test-cp-to-err-src-not-exists") - defer os.RemoveAll(tmpDir) - - srcPath := cpPath(tmpDir, "file1") - dstPath := containerCpPath(containerID, "file1") - _, srcStatErr := os.Stat(srcPath) - c.Assert(os.IsNotExist(srcStatErr), checker.True) - - err := runDockerCp(c, srcPath, dstPath, nil) - if runtime.GOOS == "windows" { - // Go 1.9+ on Windows returns a different error for `os.Stat()`, see - // https://github.com/golang/go/commit/6144c7270e5812d9de8fb97456ee4e5ae657fcbb#diff-f63e1a4b4377b2fe0b05011db3df9599 - // - // Go 1.8: CreateFile C:\not-exist: The system cannot find the file specified. - // Go 1.9: GetFileAttributesEx C:\not-exist: The system cannot find the file specified. - // - // Due to the CLI using a different version than the daemon, comparing the - // error message won't work, so just hard-code the common part here. - // - // TODO this should probably be a test in the CLI repository instead - c.Assert(strings.ToLower(err.Error()), checker.Contains, "cannot find the file specified") - c.Assert(strings.ToLower(err.Error()), checker.Contains, strings.ToLower(tmpDir)) - } else { - c.Assert(strings.ToLower(err.Error()), checker.Contains, strings.ToLower(srcStatErr.Error())) - } -} - -// Test for error when SRC ends in a trailing -// path separator but it exists as a file. -func (s *DockerSuite) TestCpToErrSrcNotDir(c *check.C) { - containerID := makeTestContainer(c, testContainerOptions{}) - - tmpDir := getTestDir(c, "test-cp-to-err-src-not-dir") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - srcPath := cpPathTrailingSep(tmpDir, "file1") - dstPath := containerCpPath(containerID, "testDir") - - err := runDockerCp(c, srcPath, dstPath, nil) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) -} - -// Test for error when SRC is a valid file or directory, -// but the DST parent directory does not exist. -func (s *DockerSuite) TestCpToErrDstParentNotExists(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{addContent: true}) - - tmpDir := getTestDir(c, "test-cp-to-err-dst-parent-not-exists") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - // Try with a file source. - srcPath := cpPath(tmpDir, "file1") - dstPath := containerCpPath(containerID, "/notExists", "file1") - - err := runDockerCp(c, srcPath, dstPath, nil) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) - - // Try with a directory source. - srcPath = cpPath(tmpDir, "dir1") - - err = runDockerCp(c, srcPath, dstPath, nil) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) -} - -// Test for error when DST ends in a trailing path separator but exists as a -// file. Also test that we cannot overwrite an existing directory with a -// non-directory and cannot overwrite an existing -func (s *DockerSuite) TestCpToErrDstNotDir(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{addContent: true}) - - tmpDir := getTestDir(c, "test-cp-to-err-dst-not-dir") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - // Try with a file source. - srcPath := cpPath(tmpDir, "dir1/file1-1") - dstPath := containerCpPathTrailingSep(containerID, "file1") - - // The client should encounter an error trying to stat the destination - // and then be unable to copy since the destination is asserted to be a - // directory but does not exist. - err := runDockerCp(c, srcPath, dstPath, nil) - c.Assert(err, checker.NotNil) - - c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExist error, but got %T: %s", err, err)) - - // Try with a directory source. - srcPath = cpPath(tmpDir, "dir1") - - // The client should encounter an error trying to stat the destination and - // then decide to extract to the parent directory instead with a rebased - // name in the source archive, but this directory would overwrite the - // existing file with the same name. - err = runDockerCp(c, srcPath, dstPath, nil) - c.Assert(err, checker.NotNil) - - c.Assert(isCannotOverwriteNonDirWithDir(err), checker.True, check.Commentf("expected CannotOverwriteNonDirWithDir error, but got %T: %s", err, err)) -} - // Check that copying from a local path to a symlink in a container copies to // the symlink target and does not overwrite the container symlink itself. func (s *DockerSuite) TestCpToSymlinkDestination(c *check.C) { diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils_test.go index 402a87ea9..79a016f0c 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils_test.go @@ -228,18 +228,10 @@ func getTestDir(c *check.C, label string) (tmpDir string) { return } -func isCpNotExist(err error) bool { - return strings.Contains(strings.ToLower(err.Error()), "could not find the file") -} - func isCpDirNotExist(err error) bool { return strings.Contains(err.Error(), archive.ErrDirNotExists.Error()) } -func isCpNotDir(err error) bool { - return strings.Contains(err.Error(), archive.ErrNotDirectory.Error()) || strings.Contains(err.Error(), "filename, directory name, or volume label syntax is incorrect") -} - func isCpCannotCopyDir(err error) bool { return strings.Contains(err.Error(), archive.ErrCannotCopyDir.Error()) } @@ -248,10 +240,6 @@ func isCpCannotCopyReadOnly(err error) bool { return strings.Contains(err.Error(), "marked read-only") } -func isCannotOverwriteNonDirWithDir(err error) bool { - return strings.Contains(err.Error(), "cannot overwrite non-directory") -} - func fileContentEquals(c *check.C, filename, contents string) (err error) { c.Logf("checking that file %q contains %q\n", filename, contents) diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_events_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_events_unix_test.go index afac998e0..680030807 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_events_unix_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_events_unix_test.go @@ -49,7 +49,7 @@ func (s *DockerSuite) TestEventsRedirectStdout(c *check.C) { } func (s *DockerSuite) TestEventsOOMDisableFalse(c *check.C) { - testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, swapMemorySupport) + testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, swapMemorySupport, NotPpc64le) errChan := make(chan error) go func() { @@ -79,7 +79,7 @@ func (s *DockerSuite) TestEventsOOMDisableFalse(c *check.C) { } func (s *DockerSuite) TestEventsOOMDisableTrue(c *check.C) { - testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, NotArm, swapMemorySupport) + testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, NotArm, swapMemorySupport, NotPpc64le) errChan := make(chan error) observer, err := newEventObserver(c) diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_export_import_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_export_import_test.go index 45f29d54e..6405c1bb5 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_export_import_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_export_import_test.go @@ -9,25 +9,8 @@ import ( "github.com/gotestyourself/gotestyourself/icmd" ) -// export an image and try to import it into a new one -func (s *DockerSuite) TestExportContainerAndImportImage(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := "testexportcontainerandimportimage" - - dockerCmd(c, "run", "--name", containerID, "busybox", "true") - - out, _ := dockerCmd(c, "export", containerID) - - result := icmd.RunCmd(icmd.Cmd{ - Command: []string{dockerBinary, "import", "-", "repo/testexp:v1"}, - Stdin: strings.NewReader(out), - }) - result.Assert(c, icmd.Success) - - cleanedImageID := strings.TrimSpace(result.Combined()) - c.Assert(cleanedImageID, checker.Not(checker.Equals), "", check.Commentf("output should have been an image id")) -} - +// TODO: Move this test to docker/cli, as it is essentially the same test +// as TestExportContainerAndImportImage except output to a file. // Used to test output flag in the export command func (s *DockerSuite) TestExportContainerWithOutputAndImportImage(c *check.C) { testRequires(c, DaemonIsLinux) diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_network_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_network_unix_test.go index 90e4f6c1f..4e9edba5a 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_network_unix_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_network_unix_test.go @@ -1541,10 +1541,10 @@ func (s *DockerSuite) TestUserDefinedNetworkConnectDisconnectAlias(c *check.C) { dockerCmd(c, "network", "create", "-d", "bridge", "net1") dockerCmd(c, "network", "create", "-d", "bridge", "net2") - cid, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo", "busybox", "top") + cid, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo", "busybox:glibc", "top") c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox:glibc", "top") c.Assert(waitRun("second"), check.IsNil) // ping first container and its alias @@ -1581,7 +1581,7 @@ func (s *DockerSuite) TestUserDefinedNetworkConnectDisconnectAlias(c *check.C) { c.Assert(err, check.IsNil) // verify the alias option is rejected when running on predefined network - out, _, err := dockerCmdWithError("run", "--rm", "--name=any", "--net-alias=any", "busybox", "top") + out, _, err := dockerCmdWithError("run", "--rm", "--name=any", "--net-alias=any", "busybox:glibc", "top") c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) @@ -1595,10 +1595,10 @@ func (s *DockerSuite) TestUserDefinedNetworkConnectivity(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "network", "create", "-d", "bridge", "br.net1") - dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c1.net1", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c1.net1", "busybox:glibc", "top") c.Assert(waitRun("c1.net1"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c2.net1", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c2.net1", "busybox:glibc", "top") c.Assert(waitRun("c2.net1"), check.IsNil) // ping first container by its unqualified name @@ -1643,9 +1643,9 @@ func (s *DockerSuite) TestDockerNetworkInternalMode(c *check.C) { nr := getNetworkResource(c, "internal") c.Assert(nr.Internal, checker.True) - dockerCmd(c, "run", "-d", "--net=internal", "--name=first", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=internal", "--name=first", "busybox:glibc", "top") c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=internal", "--name=second", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=internal", "--name=second", "busybox:glibc", "top") c.Assert(waitRun("second"), check.IsNil) out, _, err := dockerCmdWithError("exec", "first", "ping", "-W", "4", "-c", "1", "www.google.com") c.Assert(err, check.NotNil) diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_rm_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_rm_test.go deleted file mode 100644 index d281704a7..000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_rm_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package main - -import ( - "io/ioutil" - "os" - - "github.com/docker/docker/integration-cli/checker" - "github.com/docker/docker/integration-cli/cli/build" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestRmContainerWithRemovedVolume(c *check.C) { - testRequires(c, SameHostDaemon) - - prefix, slash := getPrefixAndSlashFromDaemonPlatform() - - tempDir, err := ioutil.TempDir("", "test-rm-container-with-removed-volume-") - if err != nil { - c.Fatalf("failed to create temporary directory: %s", tempDir) - } - defer os.RemoveAll(tempDir) - - dockerCmd(c, "run", "--name", "losemyvolumes", "-v", tempDir+":"+prefix+slash+"test", "busybox", "true") - - err = os.RemoveAll(tempDir) - c.Assert(err, check.IsNil) - - dockerCmd(c, "rm", "-v", "losemyvolumes") -} - -func (s *DockerSuite) TestRmContainerWithVolume(c *check.C) { - prefix, slash := getPrefixAndSlashFromDaemonPlatform() - - dockerCmd(c, "run", "--name", "foo", "-v", prefix+slash+"srv", "busybox", "true") - - dockerCmd(c, "rm", "-v", "foo") -} - -func (s *DockerSuite) TestRmContainerRunning(c *check.C) { - createRunningContainer(c, "foo") - - res, _, err := dockerCmdWithError("rm", "foo") - c.Assert(err, checker.NotNil, check.Commentf("Expected error, can't rm a running container")) - c.Assert(res, checker.Contains, "cannot remove a running container") -} - -func (s *DockerSuite) TestRmContainerForceRemoveRunning(c *check.C) { - createRunningContainer(c, "foo") - - // Stop then remove with -f - dockerCmd(c, "rm", "-f", "foo") -} - -func (s *DockerSuite) TestRmContainerOrphaning(c *check.C) { - dockerfile1 := `FROM busybox:latest - ENTRYPOINT ["true"]` - img := "test-container-orphaning" - dockerfile2 := `FROM busybox:latest - ENTRYPOINT ["true"] - MAINTAINER Integration Tests` - - // build first dockerfile - buildImageSuccessfully(c, img, build.WithDockerfile(dockerfile1)) - img1 := getIDByName(c, img) - // run container on first image - dockerCmd(c, "run", img) - // rebuild dockerfile with a small addition at the end - buildImageSuccessfully(c, img, build.WithDockerfile(dockerfile2)) - // try to remove the image, should not error out. - out, _, err := dockerCmdWithError("rmi", img) - c.Assert(err, check.IsNil, check.Commentf("Expected to removing the image, but failed: %s", out)) - - // check if we deleted the first image - out, _ = dockerCmd(c, "images", "-q", "--no-trunc") - c.Assert(out, checker.Contains, img1, check.Commentf("Orphaned container (could not find %q in docker images): %s", img1, out)) - -} - -func (s *DockerSuite) TestRmInvalidContainer(c *check.C) { - out, _, err := dockerCmdWithError("rm", "unknown") - c.Assert(err, checker.NotNil, check.Commentf("Expected error on rm unknown container, got none")) - c.Assert(out, checker.Contains, "No such container") -} - -func createRunningContainer(c *check.C, name string) { - runSleepingContainer(c, "-dt", "--name", name) -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go index 134bcb80a..871fee7d0 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go @@ -294,7 +294,7 @@ func (s *DockerSuite) TestUserDefinedNetworkAlias(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) dockerCmd(c, "network", "create", "-d", "bridge", "net1") - cid1, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo1", "--net-alias=foo2", "busybox", "top") + cid1, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo1", "--net-alias=foo2", "busybox:glibc", "top") c.Assert(waitRun("first"), check.IsNil) // Check if default short-id alias is added automatically @@ -302,7 +302,7 @@ func (s *DockerSuite) TestUserDefinedNetworkAlias(c *check.C) { aliases := inspectField(c, id, "NetworkSettings.Networks.net1.Aliases") c.Assert(aliases, checker.Contains, stringid.TruncateID(id)) - cid2, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox", "top") + cid2, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox:glibc", "top") c.Assert(waitRun("second"), check.IsNil) // Check if default short-id alias is added automatically diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_run_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_run_unix_test.go index 952cf0a4a..d817bc3b2 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_run_unix_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_run_unix_test.go @@ -615,7 +615,7 @@ func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceWriteIOps(c *check.C) } func (s *DockerSuite) TestRunOOMExitCode(c *check.C) { - testRequires(c, memoryLimitSupport, swapMemorySupport) + testRequires(c, memoryLimitSupport, swapMemorySupport, NotPpc64le) errChan := make(chan error) go func() { defer close(errChan) diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_secret_inspect_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_secret_inspect_test.go deleted file mode 100644 index 429e9ad10..000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_secret_inspect_test.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build !windows - -package main - -import ( - "encoding/json" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/integration-cli/checker" - "github.com/go-check/check" -) - -func (s *DockerSwarmSuite) TestSecretInspectMultiple(c *check.C) { - d := s.AddDaemon(c, true, true) - - testNames := []string{ - "test0", - "test1", - } - for _, n := range testNames { - id := d.CreateSecret(c, swarm.SecretSpec{ - Annotations: swarm.Annotations{ - Name: n, - }, - Data: []byte("TESTINGDATA"), - }) - c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) - - secret := d.GetSecret(c, id) - c.Assert(secret.Spec.Name, checker.Equals, n) - - } - - args := []string{ - "secret", - "inspect", - } - args = append(args, testNames...) - out, err := d.Cmd(args...) - c.Assert(err, checker.IsNil, check.Commentf(out)) - - var secrets []swarm.Secret - c.Assert(json.Unmarshal([]byte(out), &secrets), checker.IsNil) - c.Assert(secrets, checker.HasLen, 2) -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_service_update_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_update_test.go index 8f0717074..7b590a0f4 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_service_update_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_update_test.go @@ -11,41 +11,6 @@ import ( "github.com/go-check/check" ) -func (s *DockerSwarmSuite) TestServiceUpdatePort(c *check.C) { - d := s.AddDaemon(c, true, true) - - serviceName := "TestServiceUpdatePort" - serviceArgs := append([]string{"service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "-p", "8080:8081", defaultSleepImage}, sleepCommandForDaemonPlatform()...) - - // Create a service with a port mapping of 8080:8081. - out, err := d.Cmd(serviceArgs...) - c.Assert(err, checker.IsNil) - waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) - - // Update the service: changed the port mapping from 8080:8081 to 8082:8083. - _, err = d.Cmd("service", "update", "--detach", "--publish-add", "8082:8083", "--publish-rm", "8081", serviceName) - c.Assert(err, checker.IsNil) - - // Inspect the service and verify port mapping - expected := []swarm.PortConfig{ - { - Protocol: "tcp", - PublishedPort: 8082, - TargetPort: 8083, - PublishMode: "ingress", - }, - } - - out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.EndpointSpec.Ports }}", serviceName) - c.Assert(err, checker.IsNil) - - var portConfig []swarm.PortConfig - if err := json.Unmarshal([]byte(out), &portConfig); err != nil { - c.Fatalf("invalid JSON in inspect result: %v (%s)", err, out) - } - c.Assert(portConfig, checker.DeepEquals, expected) -} - func (s *DockerSwarmSuite) TestServiceUpdateLabel(c *check.C) { d := s.AddDaemon(c, true, true) out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name=test", "busybox", "top") diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go index 4a5ec9a56..8add18e1e 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go @@ -345,13 +345,13 @@ func (s *DockerSwarmSuite) TestSwarmContainerEndpointOptions(c *check.C) { c.Assert(err, checker.IsNil, check.Commentf(out)) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - _, err = d.Cmd("run", "-d", "--net=foo", "--name=first", "--net-alias=first-alias", "busybox", "top") + _, err = d.Cmd("run", "-d", "--net=foo", "--name=first", "--net-alias=first-alias", "busybox:glibc", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) - _, err = d.Cmd("run", "-d", "--net=foo", "--name=second", "busybox", "top") + _, err = d.Cmd("run", "-d", "--net=foo", "--name=second", "busybox:glibc", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) - _, err = d.Cmd("run", "-d", "--net=foo", "--net-alias=third-alias", "busybox", "top") + _, err = d.Cmd("run", "-d", "--net=foo", "--net-alias=third-alias", "busybox:glibc", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) // ping first container and its alias, also ping third and anonymous container by its alias diff --git a/vendor/github.com/docker/docker/integration-cli/docker_experimental_network_test.go b/vendor/github.com/docker/docker/integration-cli/docker_experimental_network_test.go index fb2033163..9d6be6960 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_experimental_network_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_experimental_network_test.go @@ -143,8 +143,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkMacvlanMultiSubnet(c *check.C) { // Ensure the network was created assertNwIsAvailable(c, "dualstackbridge") // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.100.0/24 and 2001:db8:abc2::/64 - dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=first", "--ip", "172.28.100.20", "--ip6", "2001:db8:abc2::20", "busybox", "top") - dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=second", "--ip", "172.28.100.21", "--ip6", "2001:db8:abc2::21", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=first", "--ip", "172.28.100.20", "--ip6", "2001:db8:abc2::20", "busybox:glibc", "top") + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=second", "--ip", "172.28.100.21", "--ip6", "2001:db8:abc2::21", "busybox:glibc", "top") // Inspect and store the v4 address from specified container on the network dualstackbridge ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.IPAddress") @@ -160,8 +160,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkMacvlanMultiSubnet(c *check.C) { c.Assert(err, check.IsNil) // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.102.0/24 and 2001:db8:abc4::/64 - dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=third", "--ip", "172.28.102.20", "--ip6", "2001:db8:abc4::20", "busybox", "top") - dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=fourth", "--ip", "172.28.102.21", "--ip6", "2001:db8:abc4::21", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=third", "--ip", "172.28.102.20", "--ip6", "2001:db8:abc4::20", "busybox:glibc", "top") + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=fourth", "--ip", "172.28.102.21", "--ip6", "2001:db8:abc4::21", "busybox:glibc", "top") // Inspect and store the v4 address from specified container on the network dualstackbridge ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.IPAddress") @@ -198,8 +198,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkIpvlanL2MultiSubnet(c *check.C) { // Ensure the network was created assertNwIsAvailable(c, "dualstackl2") // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.200.0/24 and 2001:db8:abc8::/64 - dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=first", "--ip", "172.28.200.20", "--ip6", "2001:db8:abc8::20", "busybox", "top") - dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=second", "--ip", "172.28.200.21", "--ip6", "2001:db8:abc8::21", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=first", "--ip", "172.28.200.20", "--ip6", "2001:db8:abc8::20", "busybox:glibc", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=second", "--ip", "172.28.200.21", "--ip6", "2001:db8:abc8::21", "busybox:glibc", "top") // Inspect and store the v4 address from specified container on the network dualstackl2 ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.IPAddress") @@ -214,8 +214,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkIpvlanL2MultiSubnet(c *check.C) { c.Assert(err, check.IsNil) // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.202.0/24 and 2001:db8:abc6::/64 - dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=third", "--ip", "172.28.202.20", "--ip6", "2001:db8:abc6::20", "busybox", "top") - dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=fourth", "--ip", "172.28.202.21", "--ip6", "2001:db8:abc6::21", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=third", "--ip", "172.28.202.20", "--ip6", "2001:db8:abc6::20", "busybox:glibc", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=fourth", "--ip", "172.28.202.21", "--ip6", "2001:db8:abc6::21", "busybox:glibc", "top") // Inspect and store the v4 address from specified container on the network dualstackl2 ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.IPAddress") @@ -253,8 +253,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkIpvlanL3MultiSubnet(c *check.C) { assertNwIsAvailable(c, "dualstackl3") // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.10.0/24 and 2001:db8:abc9::/64 - dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=first", "--ip", "172.28.10.20", "--ip6", "2001:db8:abc9::20", "busybox", "top") - dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=second", "--ip", "172.28.10.21", "--ip6", "2001:db8:abc9::21", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=first", "--ip", "172.28.10.20", "--ip6", "2001:db8:abc9::20", "busybox:glibc", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=second", "--ip", "172.28.10.21", "--ip6", "2001:db8:abc9::21", "busybox:glibc", "top") // Inspect and store the v4 address from specified container on the network dualstackl3 ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackl3.IPAddress") @@ -269,8 +269,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkIpvlanL3MultiSubnet(c *check.C) { c.Assert(err, check.IsNil) // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.12.0/24 and 2001:db8:abc7::/64 - dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=third", "--ip", "172.28.12.20", "--ip6", "2001:db8:abc7::20", "busybox", "top") - dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=fourth", "--ip", "172.28.12.21", "--ip6", "2001:db8:abc7::21", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=third", "--ip", "172.28.12.20", "--ip6", "2001:db8:abc7::20", "busybox:glibc", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=fourth", "--ip", "172.28.12.21", "--ip6", "2001:db8:abc7::21", "busybox:glibc", "top") // Inspect and store the v4 address from specified container on the network dualstackl3 ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackl3.IPAddress") @@ -356,9 +356,9 @@ func (s *DockerSuite) TestDockerNetworkMacVlanBridgeNilParent(c *check.C) { assertNwIsAvailable(c, "dm-nil-parent") // start two containers on the same subnet - dockerCmd(c, "run", "-d", "--net=dm-nil-parent", "--name=first", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dm-nil-parent", "--name=first", "busybox:glibc", "top") c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=dm-nil-parent", "--name=second", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dm-nil-parent", "--name=second", "busybox:glibc", "top") c.Assert(waitRun("second"), check.IsNil) // intra-network communications should succeed @@ -375,9 +375,9 @@ func (s *DockerSuite) TestDockerNetworkMacVlanBridgeInternalMode(c *check.C) { c.Assert(nr.Internal, checker.True) // start two containers on the same subnet - cli.DockerCmd(c, "run", "-d", "--net=dm-internal", "--name=first", "busybox", "top") + cli.DockerCmd(c, "run", "-d", "--net=dm-internal", "--name=first", "busybox:glibc", "top") c.Assert(waitRun("first"), check.IsNil) - cli.DockerCmd(c, "run", "-d", "--net=dm-internal", "--name=second", "busybox", "top") + cli.DockerCmd(c, "run", "-d", "--net=dm-internal", "--name=second", "busybox:glibc", "top") c.Assert(waitRun("second"), check.IsNil) // access outside of the network should fail @@ -395,9 +395,9 @@ func (s *DockerSuite) TestDockerNetworkIpvlanL2NilParent(c *check.C) { assertNwIsAvailable(c, "di-nil-parent") // start two containers on the same subnet - dockerCmd(c, "run", "-d", "--net=di-nil-parent", "--name=first", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=di-nil-parent", "--name=first", "busybox:glibc", "top") c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=di-nil-parent", "--name=second", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=di-nil-parent", "--name=second", "busybox:glibc", "top") c.Assert(waitRun("second"), check.IsNil) // intra-network communications should succeed @@ -414,9 +414,9 @@ func (s *DockerSuite) TestDockerNetworkIpvlanL2InternalMode(c *check.C) { c.Assert(nr.Internal, checker.True) // start two containers on the same subnet - cli.DockerCmd(c, "run", "-d", "--net=di-internal", "--name=first", "busybox", "top") + cli.DockerCmd(c, "run", "-d", "--net=di-internal", "--name=first", "busybox:glibc", "top") c.Assert(waitRun("first"), check.IsNil) - cli.DockerCmd(c, "run", "-d", "--net=di-internal", "--name=second", "busybox", "top") + cli.DockerCmd(c, "run", "-d", "--net=di-internal", "--name=second", "busybox:glibc", "top") c.Assert(waitRun("second"), check.IsNil) // access outside of the network should fail @@ -434,9 +434,9 @@ func (s *DockerSuite) TestDockerNetworkIpvlanL3NilParent(c *check.C) { assertNwIsAvailable(c, "di-nil-parent-l3") // start two containers on separate subnets - dockerCmd(c, "run", "-d", "--ip=172.28.220.10", "--net=di-nil-parent-l3", "--name=first", "busybox", "top") + dockerCmd(c, "run", "-d", "--ip=172.28.220.10", "--net=di-nil-parent-l3", "--name=first", "busybox:glibc", "top") c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--ip=172.28.230.10", "--net=di-nil-parent-l3", "--name=second", "busybox", "top") + dockerCmd(c, "run", "-d", "--ip=172.28.230.10", "--net=di-nil-parent-l3", "--name=second", "busybox:glibc", "top") c.Assert(waitRun("second"), check.IsNil) // intra-network communications should succeed @@ -454,9 +454,9 @@ func (s *DockerSuite) TestDockerNetworkIpvlanL3InternalMode(c *check.C) { c.Assert(nr.Internal, checker.True) // start two containers on separate subnets - cli.DockerCmd(c, "run", "-d", "--ip=172.28.220.10", "--net=di-internal-l3", "--name=first", "busybox", "top") + cli.DockerCmd(c, "run", "-d", "--ip=172.28.220.10", "--net=di-internal-l3", "--name=first", "busybox:glibc", "top") c.Assert(waitRun("first"), check.IsNil) - cli.DockerCmd(c, "run", "-d", "--ip=172.28.230.10", "--net=di-internal-l3", "--name=second", "busybox", "top") + cli.DockerCmd(c, "run", "-d", "--ip=172.28.230.10", "--net=di-internal-l3", "--name=second", "busybox:glibc", "top") c.Assert(waitRun("second"), check.IsNil) // access outside of the network should fail @@ -496,9 +496,9 @@ func (s *DockerSuite) TestDockerNetworkMacVlanSubinterface(c *check.C) { assertNwIsAvailable(c, netName) // start containers on 802.1q tagged '-o parent' sub-interface - dockerCmd(c, "run", "-d", "--net=dm-subinterface", "--name=first", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dm-subinterface", "--name=first", "busybox:glibc", "top") c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=dm-subinterface", "--name=second", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dm-subinterface", "--name=second", "busybox:glibc", "top") c.Assert(waitRun("second"), check.IsNil) // verify containers can communicate _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/load/frozen.go b/vendor/github.com/docker/docker/integration-cli/fixtures/load/frozen.go index 7e104fe76..5701a216a 100644 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/load/frozen.go +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/load/frozen.go @@ -37,9 +37,6 @@ func FrozenImagesLinux(client client.APIClient, images ...string) error { if img == "hello-world:frozen" { srcName = "hello-world:latest" } - if img == "busybox:1.27-glibc" { - img = "busybox:latest" - } loadImages = append(loadImages, struct{ srcName, destName string }{ srcName: srcName, destName: img, diff --git a/vendor/github.com/docker/docker/integration/config/config_test.go b/vendor/github.com/docker/docker/integration/config/config_test.go index c152be59b..912f55a12 100644 --- a/vendor/github.com/docker/docker/integration/config/config_test.go +++ b/vendor/github.com/docker/docker/integration/config/config_test.go @@ -1,7 +1,8 @@ -package config +package config // import "github.com/docker/docker/integration/config" import ( "bytes" + "encoding/json" "sort" "testing" "time" @@ -327,3 +328,27 @@ func waitAndAssert(t *testing.T, timeout time.Duration, f func(*testing.T) bool) time.Sleep(100 * time.Millisecond) } } + +func TestConfigInspect(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + + defer setupTest(t)() + d := swarm.NewSwarm(t, testEnv) + defer d.Stop(t) + client, err := client.NewClientWithOpts(client.WithHost((d.Sock()))) + require.NoError(t, err) + + ctx := context.Background() + + testName := t.Name() + configID := createConfig(ctx, t, client, testName, []byte("TESTINGDATA"), nil) + + insp, body, err := client.ConfigInspectWithRaw(ctx, configID) + require.NoError(t, err) + assert.Equal(t, insp.Spec.Name, testName) + + var config swarmtypes.Config + err = json.Unmarshal(body, &config) + require.NoError(t, err) + assert.Equal(t, config, insp) +} diff --git a/vendor/github.com/docker/docker/integration/config/main_test.go b/vendor/github.com/docker/docker/integration/config/main_test.go index 32df97d2c..3c8f0483f 100644 --- a/vendor/github.com/docker/docker/integration/config/main_test.go +++ b/vendor/github.com/docker/docker/integration/config/main_test.go @@ -1,4 +1,4 @@ -package config +package config // import "github.com/docker/docker/integration/config" import ( "fmt" diff --git a/vendor/github.com/docker/docker/integration/container/copy_test.go b/vendor/github.com/docker/docker/integration/container/copy_test.go new file mode 100644 index 000000000..43dc31f2f --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/copy_test.go @@ -0,0 +1,65 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "fmt" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/testutil" + "github.com/gotestyourself/gotestyourself/skip" + "github.com/stretchr/testify/require" +) + +func TestCopyFromContainerPathDoesNotExist(t *testing.T) { + defer setupTest(t)() + + ctx := context.Background() + apiclient := testEnv.APIClient() + cid := container.Create(t, ctx, apiclient) + + _, _, err := apiclient.CopyFromContainer(ctx, cid, "/dne") + require.True(t, client.IsErrNotFound(err)) + expected := fmt.Sprintf("No such container:path: %s:%s", cid, "/dne") + testutil.ErrorContains(t, err, expected) +} + +func TestCopyFromContainerPathIsNotDir(t *testing.T) { + defer setupTest(t)() + skip.If(t, testEnv.OSType == "windows") + + ctx := context.Background() + apiclient := testEnv.APIClient() + cid := container.Create(t, ctx, apiclient) + + _, _, err := apiclient.CopyFromContainer(ctx, cid, "/etc/passwd/") + require.Contains(t, err.Error(), "not a directory") +} + +func TestCopyToContainerPathDoesNotExist(t *testing.T) { + defer setupTest(t)() + skip.If(t, testEnv.OSType == "windows") + + ctx := context.Background() + apiclient := testEnv.APIClient() + cid := container.Create(t, ctx, apiclient) + + err := apiclient.CopyToContainer(ctx, cid, "/dne", nil, types.CopyToContainerOptions{}) + require.True(t, client.IsErrNotFound(err)) + expected := fmt.Sprintf("No such container:path: %s:%s", cid, "/dne") + testutil.ErrorContains(t, err, expected) +} + +func TestCopyToContainerPathIsNotDir(t *testing.T) { + defer setupTest(t)() + skip.If(t, testEnv.OSType == "windows") + + ctx := context.Background() + apiclient := testEnv.APIClient() + cid := container.Create(t, ctx, apiclient) + + err := apiclient.CopyToContainer(ctx, cid, "/etc/passwd/", nil, types.CopyToContainerOptions{}) + require.Contains(t, err.Error(), "not a directory") +} diff --git a/vendor/github.com/docker/docker/integration/container/daemon_linux_test.go b/vendor/github.com/docker/docker/integration/container/daemon_linux_test.go index 872c7ab4c..5077770f5 100644 --- a/vendor/github.com/docker/docker/integration/container/daemon_linux_test.go +++ b/vendor/github.com/docker/docker/integration/container/daemon_linux_test.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/integration/container" import ( "context" @@ -9,8 +9,9 @@ import ( "testing" "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/integration/internal/container" + "github.com/gotestyourself/gotestyourself/skip" "github.com/stretchr/testify/assert" "golang.org/x/sys/unix" ) @@ -26,6 +27,7 @@ import ( // the container process, then start dockerd back up and attempt to start the // container again. func TestContainerStartOnDaemonRestart(t *testing.T) { + skip.If(t, testEnv.IsRemoteDaemon(), "cannot start daemon on remote test run") t.Parallel() d := daemon.New(t, "", "dockerd", daemon.Config{}) @@ -36,22 +38,14 @@ func TestContainerStartOnDaemonRestart(t *testing.T) { assert.NoError(t, err, "error creating client") ctx := context.Background() - c, err := client.ContainerCreate(ctx, - &container.Config{ - Image: "busybox", - Cmd: []string{"top"}, - }, - nil, - nil, - "", - ) - assert.NoError(t, err, "error creating test container") - defer client.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{Force: true}) - err = client.ContainerStart(ctx, c.ID, types.ContainerStartOptions{}) + cID := container.Create(t, ctx, client) + defer client.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{Force: true}) + + err = client.ContainerStart(ctx, cID, types.ContainerStartOptions{}) assert.NoError(t, err, "error starting test container") - inspect, err := client.ContainerInspect(ctx, c.ID) + inspect, err := client.ContainerInspect(ctx, cID) assert.NoError(t, err, "error getting inspect data") ppid := getContainerdShimPid(t, inspect) @@ -67,7 +61,7 @@ func TestContainerStartOnDaemonRestart(t *testing.T) { d.Start(t, "--iptables=false") - err = client.ContainerStart(ctx, c.ID, types.ContainerStartOptions{}) + err = client.ContainerStart(ctx, cID, types.ContainerStartOptions{}) assert.NoError(t, err, "failed to start test container") } diff --git a/vendor/github.com/docker/docker/integration/container/diff_test.go b/vendor/github.com/docker/docker/integration/container/diff_test.go index df6023699..de5ff4e21 100644 --- a/vendor/github.com/docker/docker/integration/container/diff_test.go +++ b/vendor/github.com/docker/docker/integration/container/diff_test.go @@ -10,13 +10,11 @@ import ( "github.com/docker/docker/integration/internal/request" "github.com/docker/docker/pkg/archive" "github.com/gotestyourself/gotestyourself/poll" - "github.com/gotestyourself/gotestyourself/skip" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -// ensure that an added file shows up in docker diff -func TestDiffFilenameShownInOutput(t *testing.T) { +func TestDiff(t *testing.T) { defer setupTest(t)() client := request.NewAPIClient(t) ctx := context.Background() @@ -27,72 +25,19 @@ func TestDiffFilenameShownInOutput(t *testing.T) { // it will take a few seconds to exit. Also there's no way in Windows to // differentiate between an Add or a Modify, and all files are under // a "Files/" prefix. - lookingFor := containertypes.ContainerChangeResponseItem{Kind: archive.ChangeAdd, Path: "/foo/bar"} + expected := []containertypes.ContainerChangeResponseItem{ + {Kind: archive.ChangeAdd, Path: "/foo"}, + {Kind: archive.ChangeAdd, Path: "/foo/bar"}, + } if testEnv.OSType == "windows" { poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond), poll.WithTimeout(60*time.Second)) - lookingFor = containertypes.ContainerChangeResponseItem{Kind: archive.ChangeModify, Path: "Files/foo/bar"} - } - - items, err := client.ContainerDiff(ctx, cID) - require.NoError(t, err) - assert.Contains(t, items, lookingFor) -} - -// test to ensure GH #3840 doesn't occur any more -func TestDiffEnsureInitLayerFilesAreIgnored(t *testing.T) { - skip.If(t, testEnv.DaemonInfo.OSType != "linux") - - defer setupTest(t)() - client := request.NewAPIClient(t) - ctx := context.Background() - - // this is a list of files which shouldn't show up in `docker diff` - initLayerFiles := []string{"/etc/resolv.conf", "/etc/hostname", "/etc/hosts", "/.dockerenv"} - containerCount := 5 - - // we might not run into this problem from the first run, so start a few containers - for i := 0; i < containerCount; i++ { - cID := container.Run(t, ctx, client, container.WithCmd("sh", "-c", `echo foo > /root/bar`)) - - items, err := client.ContainerDiff(ctx, cID) - require.NoError(t, err) - for _, item := range items { - assert.NotContains(t, initLayerFiles, item.Path) + expected = []containertypes.ContainerChangeResponseItem{ + {Kind: archive.ChangeModify, Path: "Files/foo"}, + {Kind: archive.ChangeModify, Path: "Files/foo/bar"}, } } -} - -func TestDiffEnsureDefaultDevs(t *testing.T) { - skip.If(t, testEnv.DaemonInfo.OSType != "linux") - - defer setupTest(t)() - client := request.NewAPIClient(t) - ctx := context.Background() - - cID := container.Run(t, ctx, client, container.WithCmd("sleep", "0")) items, err := client.ContainerDiff(ctx, cID) require.NoError(t, err) - - expected := []containertypes.ContainerChangeResponseItem{ - {Kind: archive.ChangeModify, Path: "/dev"}, - {Kind: archive.ChangeAdd, Path: "/dev/full"}, // busybox - {Kind: archive.ChangeModify, Path: "/dev/ptmx"}, // libcontainer - {Kind: archive.ChangeAdd, Path: "/dev/mqueue"}, - {Kind: archive.ChangeAdd, Path: "/dev/kmsg"}, - {Kind: archive.ChangeAdd, Path: "/dev/fd"}, - {Kind: archive.ChangeAdd, Path: "/dev/ptmx"}, - {Kind: archive.ChangeAdd, Path: "/dev/null"}, - {Kind: archive.ChangeAdd, Path: "/dev/random"}, - {Kind: archive.ChangeAdd, Path: "/dev/stdout"}, - {Kind: archive.ChangeAdd, Path: "/dev/stderr"}, - {Kind: archive.ChangeAdd, Path: "/dev/tty1"}, - {Kind: archive.ChangeAdd, Path: "/dev/stdin"}, - {Kind: archive.ChangeAdd, Path: "/dev/tty"}, - {Kind: archive.ChangeAdd, Path: "/dev/urandom"}, - } - - for _, item := range items { - assert.Contains(t, expected, item) - } + assert.Equal(t, expected, items) } diff --git a/vendor/github.com/docker/docker/integration/container/export_test.go b/vendor/github.com/docker/docker/integration/container/export_test.go new file mode 100644 index 000000000..657b1fce4 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/export_test.go @@ -0,0 +1,53 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "encoding/json" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/integration/internal/request" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/gotestyourself/gotestyourself/poll" + "github.com/gotestyourself/gotestyourself/skip" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// export an image and try to import it into a new one +func TestExportContainerAndImportImage(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + cID := container.Run(t, ctx, client, container.WithCmd("true")) + poll.WaitOn(t, container.IsStopped(ctx, client, cID), poll.WithDelay(100*time.Millisecond)) + + reference := "repo/testexp:v1" + exportResp, err := client.ContainerExport(ctx, cID) + require.NoError(t, err) + importResp, err := client.ImageImport(ctx, types.ImageImportSource{ + Source: exportResp, + SourceName: "-", + }, reference, types.ImageImportOptions{}) + require.NoError(t, err) + + // If the import is successfully, then the message output should contain + // the image ID and match with the output from `docker images`. + + dec := json.NewDecoder(importResp) + var jm jsonmessage.JSONMessage + err = dec.Decode(&jm) + require.NoError(t, err) + + images, err := client.ImageList(ctx, types.ImageListOptions{ + Filters: filters.NewArgs(filters.Arg("reference", reference)), + }) + require.NoError(t, err) + assert.Equal(t, jm.Status, images[0].ID) +} diff --git a/vendor/github.com/docker/docker/integration/container/inspect_test.go b/vendor/github.com/docker/docker/integration/container/inspect_test.go index 2eace554e..c7ea23b51 100644 --- a/vendor/github.com/docker/docker/integration/container/inspect_test.go +++ b/vendor/github.com/docker/docker/integration/container/inspect_test.go @@ -40,9 +40,9 @@ func TestInspectCpusetInConfigPre120(t *testing.T) { require.NoError(t, err, "unable to unmarshal body for version 1.19: %s", err) config, ok := inspectJSON["Config"] - assert.Equal(t, ok, true, "Unable to find 'Config'") + assert.Equal(t, true, ok, "Unable to find 'Config'") cfg := config.(map[string]interface{}) _, ok = cfg["Cpuset"] - assert.Equal(t, ok, true, "API version 1.19 expected to include Cpuset in 'Config'") + assert.Equal(t, true, ok, "API version 1.19 expected to include Cpuset in 'Config'") } diff --git a/vendor/github.com/docker/docker/integration/container/kill_test.go b/vendor/github.com/docker/docker/integration/container/kill_test.go index 09e37ac0d..5fae91267 100644 --- a/vendor/github.com/docker/docker/integration/container/kill_test.go +++ b/vendor/github.com/docker/docker/integration/container/kill_test.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/integration/container" import ( "context" @@ -155,8 +155,7 @@ func TestInspectOomKilledTrue(t *testing.T) { ctx := context.Background() client := request.NewAPIClient(t) - name := "testoomkilled" - cID := container.Run(t, ctx, client, container.WithName(name), container.WithCmd("sh", "-c", "x=a; while true; do x=$x$x$x$x; done"), func(c *container.TestContainerConfig) { + cID := container.Run(t, ctx, client, container.WithCmd("sh", "-c", "x=a; while true; do x=$x$x$x$x; done"), func(c *container.TestContainerConfig) { c.HostConfig.Resources.Memory = 32 * 1024 * 1024 }) @@ -164,7 +163,7 @@ func TestInspectOomKilledTrue(t *testing.T) { inspect, err := client.ContainerInspect(ctx, cID) require.NoError(t, err) - assert.Equal(t, inspect.State.OOMKilled, true) + assert.Equal(t, true, inspect.State.OOMKilled) } func TestInspectOomKilledFalse(t *testing.T) { @@ -174,12 +173,11 @@ func TestInspectOomKilledFalse(t *testing.T) { ctx := context.Background() client := request.NewAPIClient(t) - name := "testoomkilled" - cID := container.Run(t, ctx, client, container.WithName(name), container.WithCmd("sh", "-c", "echo hello world")) + cID := container.Run(t, ctx, client, container.WithCmd("sh", "-c", "echo hello world")) poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) inspect, err := client.ContainerInspect(ctx, cID) require.NoError(t, err) - assert.Equal(t, inspect.State.OOMKilled, false) + assert.Equal(t, false, inspect.State.OOMKilled) } diff --git a/vendor/github.com/docker/docker/integration/container/links_linux_test.go b/vendor/github.com/docker/docker/integration/container/links_linux_test.go index 87b27e321..d230898ed 100644 --- a/vendor/github.com/docker/docker/integration/container/links_linux_test.go +++ b/vendor/github.com/docker/docker/integration/container/links_linux_test.go @@ -20,7 +20,7 @@ import ( ) func TestLinksEtcHostsContentMatch(t *testing.T) { - skip.If(t, !testEnv.IsLocalDaemon()) + skip.If(t, testEnv.IsRemoteDaemon()) hosts, err := ioutil.ReadFile("/etc/hosts") skip.If(t, os.IsNotExist(err)) diff --git a/vendor/github.com/docker/docker/integration/container/logs_test.go b/vendor/github.com/docker/docker/integration/container/logs_test.go index 1157da14b..bae431527 100644 --- a/vendor/github.com/docker/docker/integration/container/logs_test.go +++ b/vendor/github.com/docker/docker/integration/container/logs_test.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/integration/container" import ( "context" @@ -20,7 +20,6 @@ func TestLogsFollowTailEmpty(t *testing.T) { ctx := context.Background() id := container.Run(t, ctx, client, container.WithCmd("sleep", "100000")) - defer client.ContainerRemove(ctx, id, types.ContainerRemoveOptions{Force: true}) logs, err := client.ContainerLogs(ctx, id, types.ContainerLogsOptions{ShowStdout: true, Tail: "2"}) if logs != nil { diff --git a/vendor/github.com/docker/docker/integration/container/mounts_linux_test.go b/vendor/github.com/docker/docker/integration/container/mounts_linux_test.go index ceec70627..71bdccc71 100644 --- a/vendor/github.com/docker/docker/integration/container/mounts_linux_test.go +++ b/vendor/github.com/docker/docker/integration/container/mounts_linux_test.go @@ -23,6 +23,7 @@ import ( ) func TestContainerShmNoLeak(t *testing.T) { + skip.If(t, testEnv.IsRemoteDaemon(), "cannot start daemon on remote test run") t.Parallel() d := daemon.New(t, "docker", "dockerd", daemon.Config{}) client, err := d.NewClient() @@ -94,7 +95,7 @@ func TestContainerShmNoLeak(t *testing.T) { func TestContainerNetworkMountsNoChown(t *testing.T) { // chown only applies to Linux bind mounted volumes; must be same host to verify - skip.If(t, testEnv.DaemonInfo.OSType != "linux" || !testEnv.IsLocalDaemon()) + skip.If(t, testEnv.DaemonInfo.OSType != "linux" || testEnv.IsRemoteDaemon()) defer setupTest(t)() diff --git a/vendor/github.com/docker/docker/integration/container/nat_test.go b/vendor/github.com/docker/docker/integration/container/nat_test.go index ad93e59f4..293ba9bba 100644 --- a/vendor/github.com/docker/docker/integration/container/nat_test.go +++ b/vendor/github.com/docker/docker/integration/container/nat_test.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/integration/container" import ( "bytes" @@ -22,7 +22,7 @@ import ( ) func TestNetworkNat(t *testing.T) { - skip.If(t, !testEnv.IsLocalDaemon()) + skip.If(t, testEnv.IsRemoteDaemon()) defer setupTest(t)() @@ -36,11 +36,11 @@ func TestNetworkNat(t *testing.T) { data, err := ioutil.ReadAll(conn) require.NoError(t, err) - assert.Equal(t, strings.TrimSpace(string(data)), msg) + assert.Equal(t, msg, strings.TrimSpace(string(data))) } func TestNetworkLocalhostTCPNat(t *testing.T) { - skip.If(t, !testEnv.IsLocalDaemon()) + skip.If(t, testEnv.IsRemoteDaemon()) defer setupTest(t)() @@ -53,11 +53,11 @@ func TestNetworkLocalhostTCPNat(t *testing.T) { data, err := ioutil.ReadAll(conn) require.NoError(t, err) - assert.Equal(t, strings.TrimSpace(string(data)), msg) + assert.Equal(t, msg, strings.TrimSpace(string(data))) } func TestNetworkLoopbackNat(t *testing.T) { - skip.If(t, !testEnv.IsLocalDaemon()) + skip.If(t, testEnv.IsRemoteDaemon()) msg := "it works" startServerContainer(t, msg, 8080) @@ -81,7 +81,7 @@ func TestNetworkLoopbackNat(t *testing.T) { _, err = io.Copy(&b, body) require.NoError(t, err) - assert.Equal(t, strings.TrimSpace(b.String()), msg) + assert.Equal(t, msg, strings.TrimSpace(b.String())) } func startServerContainer(t *testing.T, msg string, port int) string { @@ -109,7 +109,7 @@ func getExternalAddress(t *testing.T) net.IP { ifaceAddrs, err := iface.Addrs() require.NoError(t, err) - assert.NotEqual(t, len(ifaceAddrs), 0) + assert.NotEqual(t, 0, len(ifaceAddrs)) ifaceIP, _, err := net.ParseCIDR(ifaceAddrs[0].String()) require.NoError(t, err) diff --git a/vendor/github.com/docker/docker/integration/container/pause_test.go b/vendor/github.com/docker/docker/integration/container/pause_test.go index cacc3621d..bf9f9c3d8 100644 --- a/vendor/github.com/docker/docker/integration/container/pause_test.go +++ b/vendor/github.com/docker/docker/integration/container/pause_test.go @@ -25,20 +25,19 @@ func TestPause(t *testing.T) { client := request.NewAPIClient(t) ctx := context.Background() - name := "testeventpause" - cID := container.Run(t, ctx, client, container.WithName(name)) + cID := container.Run(t, ctx, client) poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond)) since := request.DaemonUnixTime(ctx, t, client, testEnv) - err := client.ContainerPause(ctx, name) + err := client.ContainerPause(ctx, cID) require.NoError(t, err) inspect, err := client.ContainerInspect(ctx, cID) require.NoError(t, err) - assert.Equal(t, inspect.State.Paused, true) + assert.Equal(t, true, inspect.State.Paused) - err = client.ContainerUnpause(ctx, name) + err = client.ContainerUnpause(ctx, cID) require.NoError(t, err) until := request.DaemonUnixTime(ctx, t, client, testEnv) @@ -46,9 +45,9 @@ func TestPause(t *testing.T) { messages, errs := client.Events(ctx, types.EventsOptions{ Since: since, Until: until, - Filters: filters.NewArgs(filters.Arg("container", name)), + Filters: filters.NewArgs(filters.Arg("container", cID)), }) - assert.Equal(t, getEventActions(t, messages, errs), []string{"pause", "unpause"}) + assert.Equal(t, []string{"pause", "unpause"}, getEventActions(t, messages, errs)) } func TestPauseFailsOnWindowsServerContainers(t *testing.T) { @@ -89,7 +88,7 @@ func getEventActions(t *testing.T, messages <-chan events.Message, errs <-chan e for { select { case err := <-errs: - assert.Equal(t, err == nil || err == io.EOF, true) + assert.True(t, err == nil || err == io.EOF) return actions case e := <-messages: actions = append(actions, e.Status) diff --git a/vendor/github.com/docker/docker/integration/container/ps_test.go b/vendor/github.com/docker/docker/integration/container/ps_test.go index dfcb0e2ef..358276b36 100644 --- a/vendor/github.com/docker/docker/integration/container/ps_test.go +++ b/vendor/github.com/docker/docker/integration/container/ps_test.go @@ -5,9 +5,8 @@ import ( "testing" "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/network" + "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -18,23 +17,9 @@ func TestPsFilter(t *testing.T) { client := request.NewAPIClient(t) ctx := context.Background() - createContainerForFilter := func(ctx context.Context, name string) string { - body, err := client.ContainerCreate(ctx, - &container.Config{ - Cmd: []string{"top"}, - Image: "busybox", - }, - &container.HostConfig{}, - &network.NetworkingConfig{}, - name, - ) - require.NoError(t, err) - return body.ID - } - - prev := createContainerForFilter(ctx, "prev") - createContainerForFilter(ctx, "top") - next := createContainerForFilter(ctx, "next") + prev := container.Create(t, ctx, client, container.WithName("prev")) + container.Create(t, ctx, client, container.WithName("top")) + next := container.Create(t, ctx, client, container.WithName("next")) containerIDs := func(containers []types.Container) []string { entries := []string{} diff --git a/vendor/github.com/docker/docker/integration/container/remove_test.go b/vendor/github.com/docker/docker/integration/container/remove_test.go new file mode 100644 index 000000000..98aacdd20 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/remove_test.go @@ -0,0 +1,113 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "os" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/integration/internal/request" + "github.com/docker/docker/internal/testutil" + "github.com/gotestyourself/gotestyourself/fs" + "github.com/gotestyourself/gotestyourself/poll" + "github.com/gotestyourself/gotestyourself/skip" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func getPrefixAndSlashFromDaemonPlatform() (prefix, slash string) { + if testEnv.OSType == "windows" { + return "c:", `\` + } + return "", "/" +} + +// Test case for #5244: `docker rm` fails if bind dir doesn't exist anymore +func TestRemoveContainerWithRemovedVolume(t *testing.T) { + skip.If(t, testEnv.IsRemoteDaemon()) + + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + tempDir := fs.NewDir(t, "test-rm-container-with-removed-volume", fs.WithMode(0755)) + defer tempDir.Remove() + + cID := container.Run(t, ctx, client, container.WithCmd("true"), container.WithBind(tempDir.Path(), prefix+slash+"test")) + poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) + + err := os.RemoveAll(tempDir.Path()) + require.NoError(t, err) + + err = client.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{ + RemoveVolumes: true, + }) + require.NoError(t, err) + + _, _, err = client.ContainerInspectWithRaw(ctx, cID, true) + testutil.ErrorContains(t, err, "No such container") +} + +// Test case for #2099/#2125 +func TestRemoveContainerWithVolume(t *testing.T) { + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + cID := container.Run(t, ctx, client, container.WithCmd("true"), container.WithVolume(prefix+slash+"srv")) + poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) + + insp, _, err := client.ContainerInspectWithRaw(ctx, cID, true) + require.NoError(t, err) + assert.Equal(t, 1, len(insp.Mounts)) + volName := insp.Mounts[0].Name + + err = client.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{ + RemoveVolumes: true, + }) + require.NoError(t, err) + + volumes, err := client.VolumeList(ctx, filters.NewArgs(filters.Arg("name", volName))) + require.NoError(t, err) + assert.Equal(t, 0, len(volumes.Volumes)) +} + +func TestRemoveContainerRunning(t *testing.T) { + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + cID := container.Run(t, ctx, client) + + err := client.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{}) + testutil.ErrorContains(t, err, "cannot remove a running container") +} + +func TestRemoveContainerForceRemoveRunning(t *testing.T) { + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + cID := container.Run(t, ctx, client) + + err := client.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{ + Force: true, + }) + require.NoError(t, err) +} + +func TestRemoveInvalidContainer(t *testing.T) { + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + err := client.ContainerRemove(ctx, "unknown", types.ContainerRemoveOptions{}) + testutil.ErrorContains(t, err, "No such container") +} diff --git a/vendor/github.com/docker/docker/integration/container/rename_test.go b/vendor/github.com/docker/docker/integration/container/rename_test.go index 2138ee578..3567aee1f 100644 --- a/vendor/github.com/docker/docker/integration/container/rename_test.go +++ b/vendor/github.com/docker/docker/integration/container/rename_test.go @@ -55,7 +55,7 @@ func TestRenameStoppedContainer(t *testing.T) { inspect, err := client.ContainerInspect(ctx, cID) require.NoError(t, err) - assert.Equal(t, inspect.Name, "/"+oldName) + assert.Equal(t, "/"+oldName, inspect.Name) newName := "new_name" + stringid.GenerateNonCryptoID() err = client.ContainerRename(ctx, oldName, newName) @@ -63,7 +63,7 @@ func TestRenameStoppedContainer(t *testing.T) { inspect, err = client.ContainerInspect(ctx, cID) require.NoError(t, err) - assert.Equal(t, inspect.Name, "/"+newName) + assert.Equal(t, "/"+newName, inspect.Name) } func TestRenameRunningContainerAndReuse(t *testing.T) { @@ -81,7 +81,7 @@ func TestRenameRunningContainerAndReuse(t *testing.T) { inspect, err := client.ContainerInspect(ctx, cID) require.NoError(t, err) - assert.Equal(t, inspect.Name, "/"+newName) + assert.Equal(t, "/"+newName, inspect.Name) _, err = client.ContainerInspect(ctx, oldName) testutil.ErrorContains(t, err, "No such container: "+oldName) @@ -91,7 +91,7 @@ func TestRenameRunningContainerAndReuse(t *testing.T) { inspect, err = client.ContainerInspect(ctx, cID) require.NoError(t, err) - assert.Equal(t, inspect.Name, "/"+oldName) + assert.Equal(t, "/"+oldName, inspect.Name) } func TestRenameInvalidName(t *testing.T) { @@ -108,7 +108,7 @@ func TestRenameInvalidName(t *testing.T) { inspect, err := client.ContainerInspect(ctx, oldName) require.NoError(t, err) - assert.Equal(t, inspect.ID, cID) + assert.Equal(t, cID, inspect.ID) } // Test case for GitHub issue 22466 @@ -133,6 +133,10 @@ func TestRenameAnonymousContainer(t *testing.T) { }) err = client.ContainerRename(ctx, cID, "container1") require.NoError(t, err) + // Stop/Start the container to get registered + // FIXME(vdemeester) this is a really weird behavior as it fails otherwise + err = client.ContainerStop(ctx, "container1", nil) + require.NoError(t, err) err = client.ContainerStart(ctx, "container1", types.ContainerStartOptions{}) require.NoError(t, err) @@ -152,7 +156,7 @@ func TestRenameAnonymousContainer(t *testing.T) { inspect, err := client.ContainerInspect(ctx, cID) require.NoError(t, err) - assert.Equal(t, inspect.State.ExitCode, 0) + assert.Equal(t, 0, inspect.State.ExitCode, "container %s exited with the wrong exitcode: %+v", cID, inspect) } // TODO: should be a unit test @@ -175,7 +179,7 @@ func TestRenameContainerWithSameName(t *testing.T) { // of the linked container should be updated so that the other // container could still reference to the container that is renamed. func TestRenameContainerWithLinkedContainer(t *testing.T) { - skip.If(t, !testEnv.IsLocalDaemon()) + skip.If(t, testEnv.IsRemoteDaemon()) defer setupTest(t)() ctx := context.Background() @@ -192,5 +196,5 @@ func TestRenameContainerWithLinkedContainer(t *testing.T) { inspect, err := client.ContainerInspect(ctx, "app2/mysql") require.NoError(t, err) - assert.Equal(t, inspect.ID, db1ID) + assert.Equal(t, db1ID, inspect.ID) } diff --git a/vendor/github.com/docker/docker/integration/container/resize_test.go b/vendor/github.com/docker/docker/integration/container/resize_test.go index 0a5f26067..18438ea82 100644 --- a/vendor/github.com/docker/docker/integration/container/resize_test.go +++ b/vendor/github.com/docker/docker/integration/container/resize_test.go @@ -44,7 +44,7 @@ func TestResizeWithInvalidSize(t *testing.T) { endpoint := "/containers/" + cID + "/resize?h=foo&w=bar" res, _, err := req.Post(endpoint) require.NoError(t, err) - assert.Equal(t, res.StatusCode, http.StatusBadRequest) + assert.Equal(t, http.StatusBadRequest, res.StatusCode) } func TestResizeWhenContainerNotStarted(t *testing.T) { diff --git a/vendor/github.com/docker/docker/integration/container/restart_test.go b/vendor/github.com/docker/docker/integration/container/restart_test.go index accaf2cd8..7a2576e21 100644 --- a/vendor/github.com/docker/docker/integration/container/restart_test.go +++ b/vendor/github.com/docker/docker/integration/container/restart_test.go @@ -9,9 +9,11 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/integration-cli/daemon" + "github.com/gotestyourself/gotestyourself/skip" ) func TestDaemonRestartKillContainers(t *testing.T) { + skip.If(t, testEnv.IsRemoteDaemon(), "cannot start daemon on remote test run") type testCase struct { desc string config *container.Config diff --git a/vendor/github.com/docker/docker/integration/image/commit_test.go b/vendor/github.com/docker/docker/integration/image/commit_test.go index a515b706a..39fc956db 100644 --- a/vendor/github.com/docker/docker/integration/image/commit_test.go +++ b/vendor/github.com/docker/docker/integration/image/commit_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" + "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -16,10 +16,9 @@ func TestCommitInheritsEnv(t *testing.T) { client := request.NewAPIClient(t) ctx := context.Background() - createResp1, err := client.ContainerCreate(ctx, &container.Config{Image: "busybox"}, nil, nil, "") - require.NoError(t, err) + cID1 := container.Create(t, ctx, client) - commitResp1, err := client.ContainerCommit(ctx, createResp1.ID, types.ContainerCommitOptions{ + commitResp1, err := client.ContainerCommit(ctx, cID1, types.ContainerCommitOptions{ Changes: []string{"ENV PATH=/bin"}, Reference: "test-commit-image", }) @@ -31,10 +30,9 @@ func TestCommitInheritsEnv(t *testing.T) { expectedEnv1 := []string{"PATH=/bin"} assert.Equal(t, expectedEnv1, image1.Config.Env) - createResp2, err := client.ContainerCreate(ctx, &container.Config{Image: image1.ID}, nil, nil, "") - require.NoError(t, err) + cID2 := container.Create(t, ctx, client, container.WithImage(image1.ID)) - commitResp2, err := client.ContainerCommit(ctx, createResp2.ID, types.ContainerCommitOptions{ + commitResp2, err := client.ContainerCommit(ctx, cID2, types.ContainerCommitOptions{ Changes: []string{"ENV PATH=/usr/bin:$PATH"}, Reference: "test-commit-image", }) diff --git a/vendor/github.com/docker/docker/integration/image/remove_test.go b/vendor/github.com/docker/docker/integration/image/remove_test.go new file mode 100644 index 000000000..825724bd0 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/image/remove_test.go @@ -0,0 +1,60 @@ +package image // import "github.com/docker/docker/integration/image" + +import ( + "context" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/integration/internal/request" + "github.com/docker/docker/internal/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRemoveImageOrphaning(t *testing.T) { + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + img := "test-container-orphaning" + + // Create a container from busybox, and commit a small change so we have a new image + cID1 := container.Create(t, ctx, client, container.WithCmd("")) + commitResp1, err := client.ContainerCommit(ctx, cID1, types.ContainerCommitOptions{ + Changes: []string{`ENTRYPOINT ["true"]`}, + Reference: img, + }) + require.NoError(t, err) + + // verifies that reference now points to first image + resp, _, err := client.ImageInspectWithRaw(ctx, img) + require.NoError(t, err) + assert.Equal(t, resp.ID, commitResp1.ID) + + // Create a container from created image, and commit a small change with same reference name + cID2 := container.Create(t, ctx, client, container.WithImage(img), container.WithCmd("")) + commitResp2, err := client.ContainerCommit(ctx, cID2, types.ContainerCommitOptions{ + Changes: []string{`LABEL Maintainer="Integration Tests"`}, + Reference: img, + }) + require.NoError(t, err) + + // verifies that reference now points to second image + resp, _, err = client.ImageInspectWithRaw(ctx, img) + require.NoError(t, err) + assert.Equal(t, resp.ID, commitResp2.ID) + + // try to remove the image, should not error out. + _, err = client.ImageRemove(ctx, img, types.ImageRemoveOptions{}) + require.NoError(t, err) + + // check if the first image is still there + resp, _, err = client.ImageInspectWithRaw(ctx, commitResp1.ID) + require.NoError(t, err) + assert.Equal(t, resp.ID, commitResp1.ID) + + // check if the second image has been deleted + _, _, err = client.ImageInspectWithRaw(ctx, commitResp2.ID) + testutil.ErrorContains(t, err, "No such image:") +} diff --git a/vendor/github.com/docker/docker/integration/internal/container/ops.go b/vendor/github.com/docker/docker/integration/internal/container/ops.go index 9360527d3..b4ad66f93 100644 --- a/vendor/github.com/docker/docker/integration/internal/container/ops.go +++ b/vendor/github.com/docker/docker/integration/internal/container/ops.go @@ -22,6 +22,13 @@ func WithLinks(links ...string) func(*TestContainerConfig) { } } +// WithImage sets the image of the container +func WithImage(image string) func(*TestContainerConfig) { + return func(c *TestContainerConfig) { + c.Config.Image = image + } +} + // WithCmd sets the comannds of the container func WithCmd(cmds ...string) func(*TestContainerConfig) { return func(c *TestContainerConfig) { diff --git a/vendor/github.com/docker/docker/integration/internal/request/client.go b/vendor/github.com/docker/docker/integration/internal/request/client.go index 367db14c5..34e589ec8 100644 --- a/vendor/github.com/docker/docker/integration/internal/request/client.go +++ b/vendor/github.com/docker/docker/integration/internal/request/client.go @@ -20,8 +20,8 @@ func NewAPIClient(t *testing.T, ops ...func(*client.Client) error) client.APICli return clt } -// daemonTime provides the current time on the daemon host -func daemonTime(ctx context.Context, t *testing.T, client client.APIClient, testEnv *environment.Execution) time.Time { +// DaemonTime provides the current time on the daemon host +func DaemonTime(ctx context.Context, t *testing.T, client client.APIClient, testEnv *environment.Execution) time.Time { if testEnv.IsLocalDaemon() { return time.Now() } @@ -37,6 +37,6 @@ func daemonTime(ctx context.Context, t *testing.T, client client.APIClient, test // DaemonUnixTime returns the current time on the daemon host with nanoseconds precision. // It return the time formatted how the client sends timestamps to the server. func DaemonUnixTime(ctx context.Context, t *testing.T, client client.APIClient, testEnv *environment.Execution) string { - dt := daemonTime(ctx, t, client, testEnv) + dt := DaemonTime(ctx, t, client, testEnv) return fmt.Sprintf("%d.%09d", dt.Unix(), int64(dt.Nanosecond())) } diff --git a/vendor/github.com/docker/docker/integration/network/service_test.go b/vendor/github.com/docker/docker/integration/network/service_test.go new file mode 100644 index 000000000..684b29c1c --- /dev/null +++ b/vendor/github.com/docker/docker/integration/network/service_test.go @@ -0,0 +1,70 @@ +package network // import "github.com/docker/docker/integration/network" + +import ( + "runtime" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/gotestyourself/gotestyourself/poll" + "github.com/stretchr/testify/require" + "golang.org/x/net/context" +) + +func TestServiceWithPredefinedNetwork(t *testing.T) { + defer setupTest(t)() + d := newSwarm(t) + defer d.Stop(t) + client, err := client.NewClientWithOpts(client.WithHost((d.Sock()))) + require.NoError(t, err) + + hostName := "host" + var instances uint64 = 1 + serviceName := "TestService" + serviceSpec := swarmServiceSpec(serviceName, instances) + serviceSpec.TaskTemplate.Networks = append(serviceSpec.TaskTemplate.Networks, swarm.NetworkAttachmentConfig{Target: hostName}) + + serviceResp, err := client.ServiceCreate(context.Background(), serviceSpec, types.ServiceCreateOptions{ + QueryRegistry: false, + }) + require.NoError(t, err) + + pollSettings := func(config *poll.Settings) { + if runtime.GOARCH == "arm64" || runtime.GOARCH == "arm" { + config.Timeout = 50 * time.Second + config.Delay = 100 * time.Millisecond + } + } + + serviceID := serviceResp.ID + poll.WaitOn(t, serviceRunningCount(client, serviceID, instances), pollSettings) + + _, _, err = client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) + require.NoError(t, err) + + err = client.ServiceRemove(context.Background(), serviceID) + require.NoError(t, err) + + poll.WaitOn(t, serviceIsRemoved(client, serviceID), pollSettings) + poll.WaitOn(t, noTasks(client), pollSettings) + +} + +func serviceRunningCount(client client.ServiceAPIClient, serviceID string, instances uint64) func(log poll.LogT) poll.Result { + return func(log poll.LogT) poll.Result { + filter := filters.NewArgs() + filter.Add("service", serviceID) + services, err := client.ServiceList(context.Background(), types.ServiceListOptions{}) + if err != nil { + return poll.Error(err) + } + + if len(services) != int(instances) { + return poll.Continue("Service count at %d waiting for %d", len(services), instances) + } + return poll.Success() + } +} diff --git a/vendor/github.com/docker/docker/integration/secret/main_test.go b/vendor/github.com/docker/docker/integration/secret/main_test.go index 6a5b0985d..4a3d4c75e 100644 --- a/vendor/github.com/docker/docker/integration/secret/main_test.go +++ b/vendor/github.com/docker/docker/integration/secret/main_test.go @@ -1,4 +1,4 @@ -package secret +package secret // import "github.com/docker/docker/integration/secret" import ( "fmt" diff --git a/vendor/github.com/docker/docker/integration/secret/secret_test.go b/vendor/github.com/docker/docker/integration/secret/secret_test.go index 8a292f005..27c8fd3d0 100644 --- a/vendor/github.com/docker/docker/integration/secret/secret_test.go +++ b/vendor/github.com/docker/docker/integration/secret/secret_test.go @@ -1,4 +1,4 @@ -package secret +package secret // import "github.com/docker/docker/integration/secret" import ( "bytes" diff --git a/vendor/github.com/docker/docker/integration/service/inspect_test.go b/vendor/github.com/docker/docker/integration/service/inspect_test.go index fdb22cf2f..8cd24bc31 100644 --- a/vendor/github.com/docker/docker/integration/service/inspect_test.go +++ b/vendor/github.com/docker/docker/integration/service/inspect_test.go @@ -18,7 +18,7 @@ import ( ) func TestInspect(t *testing.T) { - skip.IfCondition(t, !testEnv.IsLocalDaemon()) + skip.IfCondition(t, testEnv.IsRemoteDaemon()) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) diff --git a/vendor/github.com/docker/docker/integration/system/event_test.go b/vendor/github.com/docker/docker/integration/system/event_test.go index 7f0419569..688d7c27d 100644 --- a/vendor/github.com/docker/docker/integration/system/event_test.go +++ b/vendor/github.com/docker/docker/integration/system/event_test.go @@ -2,15 +2,22 @@ package system // import "github.com/docker/docker/integration/system" import ( "context" + "encoding/json" + "io" + "net/http" + "net/url" + "strconv" "testing" - "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/strslice" + req "github.com/docker/docker/integration-cli/request" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -58,3 +65,55 @@ func TestEvents(t *testing.T) { } } + +// Test case for #18888: Events messages have been switched from generic +// `JSONMessage` to `events.Message` types. The switch does not break the +// backward compatibility so old `JSONMessage` could still be used. +// This test verifies that backward compatibility maintains. +func TestEventsBackwardsCompatible(t *testing.T) { + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + since := request.DaemonTime(ctx, t, client, testEnv) + ts := strconv.FormatInt(since.Unix(), 10) + + cID := container.Create(t, ctx, client) + + // In case there is no events, the API should have responded immediately (not blocking), + // The test here makes sure the response time is less than 3 sec. + expectedTime := time.Now().Add(3 * time.Second) + emptyResp, emptyBody, err := req.Get("/events") + require.NoError(t, err) + defer emptyBody.Close() + assert.Equal(t, http.StatusOK, emptyResp.StatusCode) + assert.True(t, time.Now().Before(expectedTime), "timeout waiting for events api to respond, should have responded immediately") + + // We also test to make sure the `events.Message` is compatible with `JSONMessage` + q := url.Values{} + q.Set("since", ts) + _, body, err := req.Get("/events?" + q.Encode()) + require.NoError(t, err) + defer body.Close() + + dec := json.NewDecoder(body) + var containerCreateEvent *jsonmessage.JSONMessage + for { + var event jsonmessage.JSONMessage + if err := dec.Decode(&event); err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + if event.Status == "create" && event.ID == cID { + containerCreateEvent = &event + break + } + } + + assert.NotNil(t, containerCreateEvent) + assert.Equal(t, "create", containerCreateEvent.Status) + assert.Equal(t, cID, containerCreateEvent.ID) + assert.Equal(t, "busybox", containerCreateEvent.From) +} diff --git a/vendor/github.com/docker/docker/integration/system/info_test.go b/vendor/github.com/docker/docker/integration/system/info_test.go index 47c46cb87..d04fdcdc8 100644 --- a/vendor/github.com/docker/docker/integration/system/info_test.go +++ b/vendor/github.com/docker/docker/integration/system/info_test.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/integration/system" import ( "fmt" diff --git a/vendor/github.com/docker/docker/internal/test/environment/environment.go b/vendor/github.com/docker/docker/internal/test/environment/environment.go index bde4c0a1b..16f614633 100644 --- a/vendor/github.com/docker/docker/internal/test/environment/environment.go +++ b/vendor/github.com/docker/docker/internal/test/environment/environment.go @@ -96,7 +96,7 @@ func toSlash(path string) string { } // IsLocalDaemon is true if the daemon under test is on the same -// host as the CLI. +// host as the test process. // // Deterministically working out the environment in which CI is running // to evaluate whether the daemon is local or remote is not possible through @@ -115,6 +115,12 @@ func (e *Execution) IsLocalDaemon() bool { return os.Getenv("DOCKER_REMOTE_DAEMON") == "" } +// IsRemoteDaemon is true if the daemon under test is on different host +// as the test process. +func (e *Execution) IsRemoteDaemon() bool { + return !e.IsLocalDaemon() +} + // Print the execution details to stdout // TODO: print everything func (e *Execution) Print() { diff --git a/vendor/github.com/docker/docker/internal/test/environment/protect.go b/vendor/github.com/docker/docker/internal/test/environment/protect.go index 482dde60b..ffbf985b7 100644 --- a/vendor/github.com/docker/docker/internal/test/environment/protect.go +++ b/vendor/github.com/docker/docker/internal/test/environment/protect.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" ) -var frozenImages = []string{"busybox:1.27-glibc", "hello-world:frozen", "debian:jessie"} +var frozenImages = []string{"busybox:latest", "busybox:glibc", "hello-world:frozen", "debian:jessie"} type protectedElements struct { containers map[string]struct{} diff --git a/vendor/github.com/docker/docker/layer/filestore.go b/vendor/github.com/docker/docker/layer/filestore.go index a7454b5e9..b1cbb8016 100644 --- a/vendor/github.com/docker/docker/layer/filestore.go +++ b/vendor/github.com/docker/docker/layer/filestore.go @@ -37,10 +37,10 @@ type fileMetadataTransaction struct { ws *ioutils.AtomicWriteSet } -// NewFSMetadataStore returns an instance of a metadata store +// newFSMetadataStore returns an instance of a metadata store // which is backed by files on disk using the provided root // as the root of metadata files. -func NewFSMetadataStore(root string) (MetadataStore, error) { +func newFSMetadataStore(root string) (*fileMetadataStore, error) { if err := os.MkdirAll(root, 0700); err != nil { return nil, err } @@ -66,7 +66,7 @@ func (fms *fileMetadataStore) getMountFilename(mount, filename string) string { return filepath.Join(fms.getMountDirectory(mount), filename) } -func (fms *fileMetadataStore) StartTransaction() (MetadataTransaction, error) { +func (fms *fileMetadataStore) StartTransaction() (*fileMetadataTransaction, error) { tmpDir := filepath.Join(fms.root, "tmp") if err := os.MkdirAll(tmpDir, 0755); err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/layer/filestore_test.go b/vendor/github.com/docker/docker/layer/filestore_test.go index 8e6dd27f9..498379e37 100644 --- a/vendor/github.com/docker/docker/layer/filestore_test.go +++ b/vendor/github.com/docker/docker/layer/filestore_test.go @@ -24,12 +24,12 @@ func newFileMetadataStore(t *testing.T) (*fileMetadataStore, string, func()) { if err != nil { t.Fatal(err) } - fms, err := NewFSMetadataStore(td) + fms, err := newFSMetadataStore(td) if err != nil { t.Fatal(err) } - return fms.(*fileMetadataStore), td, func() { + return fms, td, func() { if err := os.RemoveAll(td); err != nil { t.Logf("Failed to cleanup %q: %s", td, err) } diff --git a/vendor/github.com/docker/docker/layer/layer.go b/vendor/github.com/docker/docker/layer/layer.go index 48fa7cf3c..d0c7fa860 100644 --- a/vendor/github.com/docker/docker/layer/layer.go +++ b/vendor/github.com/docker/docker/layer/layer.go @@ -201,54 +201,6 @@ type DescribableStore interface { RegisterWithDescriptor(io.Reader, ChainID, distribution.Descriptor) (Layer, error) } -// MetadataTransaction represents functions for setting layer metadata -// with a single transaction. -type MetadataTransaction interface { - SetSize(int64) error - SetParent(parent ChainID) error - SetDiffID(DiffID) error - SetCacheID(string) error - SetDescriptor(distribution.Descriptor) error - setOS(string) error - TarSplitWriter(compressInput bool) (io.WriteCloser, error) - - Commit(ChainID) error - Cancel() error - String() string -} - -// MetadataStore represents a backend for persisting -// metadata about layers and providing the metadata -// for restoring a Store. -type MetadataStore interface { - // StartTransaction starts an update for new metadata - // which will be used to represent an ID on commit. - StartTransaction() (MetadataTransaction, error) - - GetSize(ChainID) (int64, error) - GetParent(ChainID) (ChainID, error) - GetDiffID(ChainID) (DiffID, error) - GetCacheID(ChainID) (string, error) - GetDescriptor(ChainID) (distribution.Descriptor, error) - getOS(ChainID) (string, error) - TarSplitReader(ChainID) (io.ReadCloser, error) - - SetMountID(string, string) error - SetInitID(string, string) error - SetMountParent(string, ChainID) error - - GetMountID(string) (string, error) - GetInitID(string) (string, error) - GetMountParent(string) (ChainID, error) - - // List returns the full list of referenced - // read-only and read-write layers - List() ([]ChainID, []string, error) - - Remove(ChainID) error - RemoveMount(string) error -} - // CreateChainID returns ID for a layerDigest slice func CreateChainID(dgsts []DiffID) ChainID { return createChainIDFromParent("", dgsts...) diff --git a/vendor/github.com/docker/docker/layer/layer_store.go b/vendor/github.com/docker/docker/layer/layer_store.go index ec8ecd6c1..bf0705afc 100644 --- a/vendor/github.com/docker/docker/layer/layer_store.go +++ b/vendor/github.com/docker/docker/layer/layer_store.go @@ -27,7 +27,7 @@ import ( const maxLayerDepth = 125 type layerStore struct { - store MetadataStore + store *fileMetadataStore driver graphdriver.Driver useTarSplit bool @@ -65,18 +65,15 @@ func NewStoreFromOptions(options StoreOptions) (Store, error) { } logrus.Debugf("Initialized graph driver %s", driver) - fms, err := NewFSMetadataStore(fmt.Sprintf(options.MetadataStorePathTemplate, driver)) - if err != nil { - return nil, err - } + root := fmt.Sprintf(options.MetadataStorePathTemplate, driver) - return NewStoreFromGraphDriver(fms, driver, options.OS) + return newStoreFromGraphDriver(root, driver, options.OS) } -// NewStoreFromGraphDriver creates a new Store instance using the provided +// newStoreFromGraphDriver creates a new Store instance using the provided // metadata store and graph driver. The metadata store will be used to restore // the Store. -func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver, os string) (Store, error) { +func newStoreFromGraphDriver(root string, driver graphdriver.Driver, os string) (Store, error) { if !system.IsOSSupported(os) { return nil, fmt.Errorf("failed to initialize layer store as operating system '%s' is not supported", os) } @@ -85,8 +82,13 @@ func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver, os caps = capDriver.Capabilities() } + ms, err := newFSMetadataStore(root) + if err != nil { + return nil, err + } + ls := &layerStore{ - store: store, + store: ms, driver: driver, layerMap: map[ChainID]*roLayer{}, mounts: map[string]*mountedLayer{}, @@ -94,7 +96,7 @@ func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver, os os: os, } - ids, mounts, err := store.List() + ids, mounts, err := ms.List() if err != nil { return nil, err } @@ -225,7 +227,7 @@ func (ls *layerStore) loadMount(mount string) error { return nil } -func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent string, layer *roLayer) error { +func (ls *layerStore) applyTar(tx *fileMetadataTransaction, ts io.Reader, parent string, layer *roLayer) error { digester := digest.Canonical.Digester() tr := io.TeeReader(ts, digester.Hash()) diff --git a/vendor/github.com/docker/docker/layer/layer_test.go b/vendor/github.com/docker/docker/layer/layer_test.go index 0bf25bc72..5c4e8fab1 100644 --- a/vendor/github.com/docker/docker/layer/layer_test.go +++ b/vendor/github.com/docker/docker/layer/layer_test.go @@ -69,11 +69,8 @@ func newTestStore(t *testing.T) (Store, string, func()) { } graph, graphcleanup := newTestGraphDriver(t) - fms, err := NewFSMetadataStore(td) - if err != nil { - t.Fatal(err) - } - ls, err := NewStoreFromGraphDriver(fms, graph, runtime.GOOS) + + ls, err := newStoreFromGraphDriver(td, graph, runtime.GOOS) if err != nil { t.Fatal(err) } @@ -403,7 +400,7 @@ func TestStoreRestore(t *testing.T) { t.Fatal(err) } - ls2, err := NewStoreFromGraphDriver(ls.(*layerStore).store, ls.(*layerStore).driver, runtime.GOOS) + ls2, err := newStoreFromGraphDriver(ls.(*layerStore).store.root, ls.(*layerStore).driver, runtime.GOOS) if err != nil { t.Fatal(err) } diff --git a/vendor/github.com/docker/docker/layer/migration_test.go b/vendor/github.com/docker/docker/layer/migration_test.go index 2c6800f15..923166371 100644 --- a/vendor/github.com/docker/docker/layer/migration_test.go +++ b/vendor/github.com/docker/docker/layer/migration_test.go @@ -90,11 +90,8 @@ func TestLayerMigration(t *testing.T) { t.Fatal(err) } - fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) - if err != nil { - t.Fatal(err) - } - ls, err := NewStoreFromGraphDriver(fms, graph, runtime.GOOS) + root := filepath.Join(td, "layers") + ls, err := newStoreFromGraphDriver(root, graph, runtime.GOOS) if err != nil { t.Fatal(err) } @@ -218,11 +215,8 @@ func TestLayerMigrationNoTarsplit(t *testing.T) { t.Fatal(err) } - fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) - if err != nil { - t.Fatal(err) - } - ls, err := NewStoreFromGraphDriver(fms, graph, runtime.GOOS) + root := filepath.Join(td, "layers") + ls, err := newStoreFromGraphDriver(root, graph, runtime.GOOS) if err != nil { t.Fatal(err) } diff --git a/vendor/github.com/docker/docker/layer/ro_layer.go b/vendor/github.com/docker/docker/layer/ro_layer.go index e1f1b407c..bc0fe1ddd 100644 --- a/vendor/github.com/docker/docker/layer/ro_layer.go +++ b/vendor/github.com/docker/docker/layer/ro_layer.go @@ -121,7 +121,7 @@ func (rl *roLayer) depth() int { return rl.parent.depth() + 1 } -func storeLayer(tx MetadataTransaction, layer *roLayer) error { +func storeLayer(tx *fileMetadataTransaction, layer *roLayer) error { if err := tx.SetDiffID(layer.diffID); err != nil { return err } diff --git a/vendor/github.com/docker/docker/pkg/mount/mount.go b/vendor/github.com/docker/docker/pkg/mount/mount.go index 8ff4925d7..7bdbf8a83 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mount.go +++ b/vendor/github.com/docker/docker/pkg/mount/mount.go @@ -72,7 +72,9 @@ func RecursiveUnmount(target string) error { } // Make the deepest mount be first - sort.Sort(sort.Reverse(byMountpoint(mounts))) + sort.Slice(mounts, func(i, j int) bool { + return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint) + }) for i, m := range mounts { if !strings.HasPrefix(m.Mountpoint, target) { diff --git a/vendor/github.com/docker/docker/pkg/mount/mount_unix_test.go b/vendor/github.com/docker/docker/pkg/mount/mount_unix_test.go index f3333d5b3..84699eee5 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mount_unix_test.go +++ b/vendor/github.com/docker/docker/pkg/mount/mount_unix_test.go @@ -25,6 +25,10 @@ func TestMountOptionsParsing(t *testing.T) { } func TestMounted(t *testing.T) { + if os.Getuid() != 0 { + t.Skip("root required") + } + tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) @@ -76,6 +80,10 @@ func TestMounted(t *testing.T) { } func TestMountReadonly(t *testing.T) { + if os.Getuid() != 0 { + t.Skip("root required") + } + tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_linux_test.go b/vendor/github.com/docker/docker/pkg/mount/mounter_linux_test.go index ac858e269..e5da438ef 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_linux_test.go +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_linux_test.go @@ -14,7 +14,7 @@ import ( func TestMount(t *testing.T) { if os.Getuid() != 0 { - t.Skip("not root tests would fail") + t.Skip("root required") } source, err := ioutil.TempDir("", "mount-test-source-") diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go index 05803938a..ecd03fc02 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go @@ -38,17 +38,3 @@ type Info struct { // VfsOpts represents per super block options. VfsOpts string } - -type byMountpoint []*Info - -func (by byMountpoint) Len() int { - return len(by) -} - -func (by byMountpoint) Less(i, j int) bool { - return by[i].Mountpoint < by[j].Mountpoint -} - -func (by byMountpoint) Swap(i, j int) { - by[i], by[j] = by[j], by[i] -} diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go index 0cb2b959c..019514491 100644 --- a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go +++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go @@ -12,6 +12,10 @@ import ( // nothing is propagated in or out func TestSubtreePrivate(t *testing.T) { + if os.Getuid() != 0 { + t.Skip("root required") + } + tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) @@ -110,6 +114,10 @@ func TestSubtreePrivate(t *testing.T) { // Testing that when a target is a shared mount, // then child mounts propagate to the source func TestSubtreeShared(t *testing.T) { + if os.Getuid() != 0 { + t.Skip("root required") + } + tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) @@ -178,6 +186,10 @@ func TestSubtreeShared(t *testing.T) { // testing that mounts to a shared source show up in the slave target, // and that mounts into a slave target do _not_ show up in the shared source func TestSubtreeSharedSlave(t *testing.T) { + if os.Getuid() != 0 { + t.Skip("root required") + } + tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) @@ -282,6 +294,10 @@ func TestSubtreeSharedSlave(t *testing.T) { } func TestSubtreeUnbindable(t *testing.T) { + if os.Getuid() != 0 { + t.Skip("root required") + } + tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go index 8104bf63c..aec4cce72 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go @@ -1,44 +1,45 @@ package operatingsystem // import "github.com/docker/docker/pkg/parsers/operatingsystem" import ( - "unsafe" + "fmt" - "golang.org/x/sys/windows" + "golang.org/x/sys/windows/registry" ) -// See https://code.google.com/p/go/source/browse/src/pkg/mime/type_windows.go?r=d14520ac25bf6940785aabb71f5be453a286f58c -// for a similar sample - // GetOperatingSystem gets the name of the current operating system. func GetOperatingSystem() (string, error) { - var h windows.Handle - // Default return value ret := "Unknown Operating System" - if err := windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, - windows.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), - 0, - windows.KEY_READ, - &h); err != nil { + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\WIndows NT\CurrentVersion`, registry.QUERY_VALUE) + if err != nil { return ret, err } - defer windows.RegCloseKey(h) + defer k.Close() - var buf [1 << 10]uint16 - var typ uint32 - n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 - - if err := windows.RegQueryValueEx(h, - windows.StringToUTF16Ptr("ProductName"), - nil, - &typ, - (*byte)(unsafe.Pointer(&buf[0])), - &n); err != nil { + pn, _, err := k.GetStringValue("ProductName") + if err != nil { return ret, err } - ret = windows.UTF16ToString(buf[:]) + ret = pn + + ri, _, err := k.GetStringValue("ReleaseId") + if err != nil { + return ret, err + } + ret = fmt.Sprintf("%s Version %s", ret, ri) + + cbn, _, err := k.GetStringValue("CurrentBuildNumber") + if err != nil { + return ret, err + } + + ubr, _, err := k.GetIntegerValue("UBR") + if err != nil { + return ret, err + } + ret = fmt.Sprintf("%s (OS Build %s.%d)", ret, cbn, ubr) return ret, nil } diff --git a/vendor/github.com/docker/docker/pkg/plugins/client_test.go b/vendor/github.com/docker/docker/pkg/plugins/client_test.go index 44dd0fa81..10c8d8fd5 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/client_test.go +++ b/vendor/github.com/docker/docker/pkg/plugins/client_test.go @@ -2,6 +2,7 @@ package plugins // import "github.com/docker/docker/pkg/plugins" import ( "bytes" + "context" "encoding/json" "io" "net/http" @@ -13,7 +14,9 @@ import ( "github.com/docker/docker/pkg/plugins/transport" "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var ( @@ -232,3 +235,43 @@ func TestClientSendFile(t *testing.T) { } assert.Equal(t, m, output) } + +func TestClientWithRequestTimeout(t *testing.T) { + timeout := 1 * time.Millisecond + testHandler := func(w http.ResponseWriter, r *http.Request) { + time.Sleep(timeout + 1*time.Millisecond) + w.WriteHeader(http.StatusOK) + } + + srv := httptest.NewServer(http.HandlerFunc(testHandler)) + defer srv.Close() + + client := &Client{http: srv.Client(), requestFactory: &testRequestWrapper{srv}} + _, err := client.callWithRetry("/Plugin.Hello", nil, false, WithRequestTimeout(timeout)) + require.Error(t, err, "expected error") + + err = errors.Cause(err) + + switch e := err.(type) { + case *url.Error: + err = e.Err + } + require.Equal(t, context.DeadlineExceeded, err) +} + +type testRequestWrapper struct { + *httptest.Server +} + +func (w *testRequestWrapper) NewRequest(path string, data io.Reader) (*http.Request, error) { + req, err := http.NewRequest("POST", path, data) + if err != nil { + return nil, err + } + u, err := url.Parse(w.Server.URL) + if err != nil { + return nil, err + } + req.URL = u + return req, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go index 85e89a7ee..ee7e0256f 100644 --- a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go @@ -1,6 +1,7 @@ package system // import "github.com/docker/docker/pkg/system" import ( + "fmt" "unsafe" "github.com/sirupsen/logrus" @@ -53,6 +54,10 @@ func GetOSVersion() OSVersion { return osv } +func (osv OSVersion) ToString() string { + return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) +} + // IsWindowsClient returns true if the SKU is client // @engine maintainers - this function should not be removed or modified as it // is used to enforce licensing restrictions on Windows. diff --git a/vendor/github.com/docker/docker/plugin/backend_linux.go b/vendor/github.com/docker/docker/plugin/backend_linux.go index c86b11f61..000ee996d 100644 --- a/vendor/github.com/docker/docker/plugin/backend_linux.go +++ b/vendor/github.com/docker/docker/plugin/backend_linux.go @@ -33,6 +33,7 @@ import ( "github.com/docker/docker/plugin/v2" refstore "github.com/docker/docker/reference" digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/net/context" @@ -146,10 +147,15 @@ func (s *tempConfigStore) Get(d digest.Digest) ([]byte, error) { return s.config, nil } -func (s *tempConfigStore) RootFSAndOSFromConfig(c []byte) (*image.RootFS, string, error) { +func (s *tempConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { return configToRootFS(c) } +func (s *tempConfigStore) PlatformFromConfig(c []byte) (*specs.Platform, error) { + // TODO: LCOW/Plugins. This will need revisiting. For now use the runtime OS + return &specs.Platform{OS: runtime.GOOS}, nil +} + func computePrivileges(c types.PluginConfig) types.PluginPrivileges { var privileges types.PluginPrivileges if c.Network.Type != "null" && c.Network.Type != "bridge" && c.Network.Type != "" { @@ -534,10 +540,15 @@ func (s *pluginConfigStore) Get(d digest.Digest) ([]byte, error) { return ioutil.ReadAll(rwc) } -func (s *pluginConfigStore) RootFSAndOSFromConfig(c []byte) (*image.RootFS, string, error) { +func (s *pluginConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { return configToRootFS(c) } +func (s *pluginConfigStore) PlatformFromConfig(c []byte) (*specs.Platform, error) { + // TODO: LCOW/Plugins. This will need revisiting. For now use the runtime OS + return &specs.Platform{OS: runtime.GOOS}, nil +} + type pluginLayerProvider struct { pm *Manager plugin *v2.Plugin diff --git a/vendor/github.com/docker/docker/plugin/blobstore.go b/vendor/github.com/docker/docker/plugin/blobstore.go index 170222070..82d6ce18e 100644 --- a/vendor/github.com/docker/docker/plugin/blobstore.go +++ b/vendor/github.com/docker/docker/plugin/blobstore.go @@ -6,6 +6,7 @@ import ( "io/ioutil" "os" "path/filepath" + "runtime" "github.com/docker/docker/distribution/xfer" "github.com/docker/docker/image" @@ -14,6 +15,7 @@ import ( "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/progress" "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/net/context" @@ -178,6 +180,10 @@ func (dm *downloadManager) Put(dt []byte) (digest.Digest, error) { func (dm *downloadManager) Get(d digest.Digest) ([]byte, error) { return nil, fmt.Errorf("digest not found") } -func (dm *downloadManager) RootFSAndOSFromConfig(c []byte) (*image.RootFS, string, error) { +func (dm *downloadManager) RootFSFromConfig(c []byte) (*image.RootFS, error) { return configToRootFS(c) } +func (dm *downloadManager) PlatformFromConfig(c []byte) (*specs.Platform, error) { + // TODO: LCOW/Plugins. This will need revisiting. For now use the runtime OS + return &specs.Platform{OS: runtime.GOOS}, nil +} diff --git a/vendor/github.com/docker/docker/plugin/manager.go b/vendor/github.com/docker/docker/plugin/manager.go index 1879a7890..7595e7cbc 100644 --- a/vendor/github.com/docker/docker/plugin/manager.go +++ b/vendor/github.com/docker/docker/plugin/manager.go @@ -8,7 +8,6 @@ import ( "path/filepath" "reflect" "regexp" - "runtime" "sort" "strings" "sync" @@ -353,19 +352,17 @@ func isEqualPrivilege(a, b types.PluginPrivilege) bool { return reflect.DeepEqual(a.Value, b.Value) } -func configToRootFS(c []byte) (*image.RootFS, string, error) { - // TODO @jhowardmsft LCOW - Will need to revisit this. - os := runtime.GOOS +func configToRootFS(c []byte) (*image.RootFS, error) { var pluginConfig types.PluginConfig if err := json.Unmarshal(c, &pluginConfig); err != nil { - return nil, "", err + return nil, err } // validation for empty rootfs is in distribution code if pluginConfig.Rootfs == nil { - return nil, os, nil + return nil, nil } - return rootFSFromPlugin(pluginConfig.Rootfs), os, nil + return rootFSFromPlugin(pluginConfig.Rootfs), nil } func rootFSFromPlugin(pluginfs *types.PluginConfigRootfs) *image.RootFS { diff --git a/vendor/github.com/docker/docker/profiles/seccomp/default.json b/vendor/github.com/docker/docker/profiles/seccomp/default.json index 38467c7be..5717c00cd 100755 --- a/vendor/github.com/docker/docker/profiles/seccomp/default.json +++ b/vendor/github.com/docker/docker/profiles/seccomp/default.json @@ -322,6 +322,7 @@ "stat64", "statfs", "statfs64", + "statx", "symlink", "symlinkat", "sync", diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go index 9deab38e1..be29aa4f7 100644 --- a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go @@ -315,6 +315,7 @@ func DefaultProfile() *types.Seccomp { "stat64", "statfs", "statfs64", + "statx", "symlink", "symlinkat", "sync", diff --git a/vendor/github.com/docker/docker/vendor.conf b/vendor/github.com/docker/docker/vendor.conf index dd641112e..766142e67 100644 --- a/vendor/github.com/docker/docker/vendor.conf +++ b/vendor/github.com/docker/docker/vendor.conf @@ -72,7 +72,7 @@ github.com/pborman/uuid v1.0 google.golang.org/grpc v1.3.0 # When updating, also update RUNC_COMMIT in hack/dockerfile/install/runc accordingly -github.com/opencontainers/runc 6c55f98695e902427906eed2c799e566e3d3dfb5 +github.com/opencontainers/runc 4fc53a81fb7c994640722ac585fa9ca548971871 github.com/opencontainers/runtime-spec v1.0.1 github.com/opencontainers/image-spec v1.0.1 github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 @@ -114,7 +114,7 @@ github.com/containerd/containerd 3fa104f843ec92328912e042b767d26825f202aa github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6 github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371 github.com/containerd/cgroups c0710c92e8b3a44681d1321dcfd1360fc5c6c089 -github.com/containerd/console 84eeaae905fa414d03e07bcd6c8d3f19e7cf180e +github.com/containerd/console 2748ece16665b45a47f884001d5831ec79703880 github.com/containerd/go-runc 4f6e87ae043f859a38255247b49c9abc262d002f github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788 github.com/dmcgowan/go-tar go1.10 diff --git a/vendor/github.com/docker/docker/volume/linux_parser.go b/vendor/github.com/docker/docker/volume/linux_parser.go index 3d94b789a..6eb796b67 100644 --- a/vendor/github.com/docker/docker/volume/linux_parser.go +++ b/vendor/github.com/docker/docker/volume/linux_parser.go @@ -83,7 +83,7 @@ func (p *linuxParser) validateMountConfigImpl(mnt *mount.Mount, validateBindSour if validateBindSourceExists { exists, _, _ := currentFileInfoProvider.fileInfo(mnt.Source) if !exists { - return &errMountConfig{mnt, errBindNotExist} + return &errMountConfig{mnt, errBindSourceDoesNotExist(mnt.Source)} } } diff --git a/vendor/github.com/docker/docker/volume/validate.go b/vendor/github.com/docker/docker/volume/validate.go index 3ebdcd7d9..6512fb11b 100644 --- a/vendor/github.com/docker/docker/volume/validate.go +++ b/vendor/github.com/docker/docker/volume/validate.go @@ -7,8 +7,6 @@ import ( "github.com/pkg/errors" ) -var errBindNotExist = errors.New("bind source path does not exist") - type errMountConfig struct { mount *mount.Mount err error @@ -18,6 +16,10 @@ func (e *errMountConfig) Error() string { return fmt.Sprintf("invalid mount config for type %q: %v", e.mount.Type, e.err.Error()) } +func errBindSourceDoesNotExist(path string) error { + return errors.Errorf("bind mount source path does not exist: %s", path) +} + func errExtraField(name string) error { return errors.Errorf("field %s must not be specified", name) } diff --git a/vendor/github.com/docker/docker/volume/validate_test.go b/vendor/github.com/docker/docker/volume/validate_test.go index d767b3d7f..d230ef319 100644 --- a/vendor/github.com/docker/docker/volume/validate_test.go +++ b/vendor/github.com/docker/docker/volume/validate_test.go @@ -31,7 +31,7 @@ func TestValidateMount(t *testing.T) { {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath}, nil}, {mount.Mount{Type: "invalid", Target: testDestinationPath}, errors.New("mount type unknown")}, - {mount.Mount{Type: mount.TypeBind, Source: testSourcePath, Target: testDestinationPath}, errBindNotExist}, + {mount.Mount{Type: mount.TypeBind, Source: testSourcePath, Target: testDestinationPath}, errBindSourceDoesNotExist(testSourcePath)}, } lcowCases := []struct { @@ -44,7 +44,7 @@ func TestValidateMount(t *testing.T) { {mount.Mount{Type: mount.TypeBind}, errMissingField("Target")}, {mount.Mount{Type: mount.TypeBind, Target: "/foo"}, errMissingField("Source")}, {mount.Mount{Type: mount.TypeBind, Target: "/foo", Source: "c:\\foo", VolumeOptions: &mount.VolumeOptions{}}, errExtraField("VolumeOptions")}, - {mount.Mount{Type: mount.TypeBind, Source: "c:\\foo", Target: "/foo"}, errBindNotExist}, + {mount.Mount{Type: mount.TypeBind, Source: "c:\\foo", Target: "/foo"}, errBindSourceDoesNotExist("c:\\foo")}, {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: "/foo"}, nil}, {mount.Mount{Type: "invalid", Target: "/foo"}, errors.New("mount type unknown")}, } diff --git a/vendor/github.com/docker/docker/volume/volume_test.go b/vendor/github.com/docker/docker/volume/volume_test.go index 3ff2861ba..f5bc1b0f5 100644 --- a/vendor/github.com/docker/docker/volume/volume_test.go +++ b/vendor/github.com/docker/docker/volume/volume_test.go @@ -120,7 +120,7 @@ func TestParseMountRaw(t *testing.T) { `c:\:d:\:xyzzy`: "invalid volume specification: ", `c:`: "cannot be `c:`", `c:\`: "cannot be `c:`", - `c:\notexist:d:`: `source path does not exist`, + `c:\notexist:d:`: `bind mount source path does not exist: c:\notexist`, `c:\windows\system32\ntdll.dll:d:`: `source path must be a directory`, `name<:d:`: `invalid volume specification`, `name>:d:`: `invalid volume specification`, @@ -189,7 +189,7 @@ func TestParseMountRaw(t *testing.T) { `c:\:/foo:xyzzy`: "invalid volume specification: ", `/`: "destination can't be '/'", `/..`: "destination can't be '/'", - `c:\notexist:/foo`: `source path does not exist`, + `c:\notexist:/foo`: `bind mount source path does not exist: c:\notexist`, `c:\windows\system32\ntdll.dll:/foo`: `source path must be a directory`, `name<:/foo`: `invalid volume specification`, `name>:/foo`: `invalid volume specification`, diff --git a/vendor/github.com/docker/docker/volume/windows_parser.go b/vendor/github.com/docker/docker/volume/windows_parser.go index ad4e6fc28..84b6717c9 100644 --- a/vendor/github.com/docker/docker/volume/windows_parser.go +++ b/vendor/github.com/docker/docker/volume/windows_parser.go @@ -252,7 +252,7 @@ func (p *windowsParser) validateMountConfigReg(mnt *mount.Mount, destRegex strin return &errMountConfig{mnt, err} } if !exists { - return &errMountConfig{mnt, errBindNotExist} + return &errMountConfig{mnt, errBindSourceDoesNotExist(mnt.Source)} } if !isdir { return &errMountConfig{mnt, fmt.Errorf("source path must be a directory")} diff --git a/vendor/github.com/fnproject/fn_go/VERSION b/vendor/github.com/fnproject/fn_go/VERSION index 373f8c6f0..72f9fa820 100644 --- a/vendor/github.com/fnproject/fn_go/VERSION +++ b/vendor/github.com/fnproject/fn_go/VERSION @@ -1 +1 @@ -0.2.3 \ No newline at end of file +0.2.4 \ No newline at end of file diff --git a/vendor/github.com/fnproject/fn_go/client/fn_client.go b/vendor/github.com/fnproject/fn_go/client/fn_client.go index 22e6eee3f..f643d9da7 100644 --- a/vendor/github.com/fnproject/fn_go/client/fn_client.go +++ b/vendor/github.com/fnproject/fn_go/client/fn_client.go @@ -41,9 +41,6 @@ func NewHTTPClient(formats strfmt.Registry) *Fn { // using a customizable transport config. func NewHTTPClientWithConfig(formats strfmt.Registry, cfg *TransportConfig) *Fn { // ensure nullable parameters have default - if formats == nil { - formats = strfmt.Default - } if cfg == nil { cfg = DefaultTransportConfig() } @@ -55,6 +52,11 @@ func NewHTTPClientWithConfig(formats strfmt.Registry, cfg *TransportConfig) *Fn // New creates a new fn client func New(transport runtime.ClientTransport, formats strfmt.Registry) *Fn { + // ensure nullable parameters have default + if formats == nil { + formats = strfmt.Default + } + cli := new(Fn) cli.Transport = transport diff --git a/vendor/github.com/fnproject/fn_go/client/operations/get_apps_app_calls_call_log_responses.go b/vendor/github.com/fnproject/fn_go/client/operations/get_apps_app_calls_call_log_responses.go index da4cff8b6..f1c96f9ad 100644 --- a/vendor/github.com/fnproject/fn_go/client/operations/get_apps_app_calls_call_log_responses.go +++ b/vendor/github.com/fnproject/fn_go/client/operations/get_apps_app_calls_call_log_responses.go @@ -54,7 +54,7 @@ func NewGetAppsAppCallsCallLogOK() *GetAppsAppCallsCallLogOK { Log found */ type GetAppsAppCallsCallLogOK struct { - Payload string + Payload *models.LogWrapper } func (o *GetAppsAppCallsCallLogOK) Error() string { @@ -63,8 +63,10 @@ func (o *GetAppsAppCallsCallLogOK) Error() string { func (o *GetAppsAppCallsCallLogOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + o.Payload = new(models.LogWrapper) + // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { return err } diff --git a/vendor/github.com/fnproject/fn_go/client/operations/operations_client.go b/vendor/github.com/fnproject/fn_go/client/operations/operations_client.go index 3b40c362a..56923b5ab 100644 --- a/vendor/github.com/fnproject/fn_go/client/operations/operations_client.go +++ b/vendor/github.com/fnproject/fn_go/client/operations/operations_client.go @@ -39,7 +39,7 @@ func (a *Client) GetAppsAppCallsCallLog(params *GetAppsAppCallsCallLogParams) (* ID: "GetAppsAppCallsCallLog", Method: "GET", PathPattern: "/apps/{app}/calls/{call}/log", - ProducesMediaTypes: []string{"text/plain"}, + ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http", "https"}, Params: params, diff --git a/vendor/github.com/fnproject/fn_go/models/app.go b/vendor/github.com/fnproject/fn_go/models/app.go index fdb669e0a..cb4a42773 100644 --- a/vendor/github.com/fnproject/fn_go/models/app.go +++ b/vendor/github.com/fnproject/fn_go/models/app.go @@ -10,6 +10,7 @@ import ( "github.com/go-openapi/errors" "github.com/go-openapi/swag" + "github.com/go-openapi/validate" ) // App app @@ -19,21 +20,65 @@ type App struct { // Application configuration, applied to all routes. Config map[string]string `json:"config,omitempty"` + // Time when app was created. Always in UTC. + // Read Only: true + CreatedAt strfmt.DateTime `json:"created_at,omitempty"` + // Name of this app. Must be different than the image name. Can ony contain alphanumeric, -, and _. // Read Only: true Name string `json:"name,omitempty"` + + // Most recent time that app was updated. Always in UTC. + // Read Only: true + UpdatedAt strfmt.DateTime `json:"updated_at,omitempty"` } // Validate validates this app func (m *App) Validate(formats strfmt.Registry) error { var res []error + if err := m.validateCreatedAt(formats); err != nil { + // prop + res = append(res, err) + } + + if err := m.validateUpdatedAt(formats); err != nil { + // prop + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } +func (m *App) validateCreatedAt(formats strfmt.Registry) error { + + if swag.IsZero(m.CreatedAt) { // not required + return nil + } + + if err := validate.FormatOf("created_at", "body", "date-time", m.CreatedAt.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *App) validateUpdatedAt(formats strfmt.Registry) error { + + if swag.IsZero(m.UpdatedAt) { // not required + return nil + } + + if err := validate.FormatOf("updated_at", "body", "date-time", m.UpdatedAt.String(), formats); err != nil { + return err + } + + return nil +} + // MarshalBinary interface implementation func (m *App) MarshalBinary() ([]byte, error) { if m == nil { diff --git a/vendor/github.com/fnproject/fn_go/models/app_wrapper.go b/vendor/github.com/fnproject/fn_go/models/app_wrapper.go index 441f52879..b28e785d2 100644 --- a/vendor/github.com/fnproject/fn_go/models/app_wrapper.go +++ b/vendor/github.com/fnproject/fn_go/models/app_wrapper.go @@ -59,6 +59,7 @@ func (m *AppWrapper) validateApp(formats strfmt.Registry) error { } return err } + } return nil @@ -78,6 +79,7 @@ func (m *AppWrapper) validateError(formats strfmt.Registry) error { } return err } + } return nil diff --git a/vendor/github.com/fnproject/fn_go/models/apps_wrapper.go b/vendor/github.com/fnproject/fn_go/models/apps_wrapper.go index fb7fe7234..5d03c4e03 100644 --- a/vendor/github.com/fnproject/fn_go/models/apps_wrapper.go +++ b/vendor/github.com/fnproject/fn_go/models/apps_wrapper.go @@ -6,6 +6,8 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "strconv" + strfmt "github.com/go-openapi/strfmt" "github.com/go-openapi/errors" @@ -19,7 +21,7 @@ type AppsWrapper struct { // apps // Required: true - Apps AppsWrapperApps `json:"apps"` + Apps []*App `json:"apps"` // error Error *ErrorBody `json:"error,omitempty"` @@ -55,11 +57,23 @@ func (m *AppsWrapper) validateApps(formats strfmt.Registry) error { return err } - if err := m.Apps.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("apps") + for i := 0; i < len(m.Apps); i++ { + + if swag.IsZero(m.Apps[i]) { // not required + continue } - return err + + if m.Apps[i] != nil { + + if err := m.Apps[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("apps" + "." + strconv.Itoa(i)) + } + return err + } + + } + } return nil @@ -79,6 +93,7 @@ func (m *AppsWrapper) validateError(formats strfmt.Registry) error { } return err } + } return nil diff --git a/vendor/github.com/fnproject/fn_go/models/call.go b/vendor/github.com/fnproject/fn_go/models/call.go index e296ee452..cfe439a9c 100644 --- a/vendor/github.com/fnproject/fn_go/models/call.go +++ b/vendor/github.com/fnproject/fn_go/models/call.go @@ -6,10 +6,13 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "strconv" + strfmt "github.com/go-openapi/strfmt" "github.com/go-openapi/errors" "github.com/go-openapi/swag" + "github.com/go-openapi/validate" ) // Call call @@ -44,8 +47,9 @@ type Call struct { // Read Only: true StartedAt strfmt.DateTime `json:"started_at,omitempty"` - // stats - Stats CallStats `json:"stats"` + // A histogram of stats for a call, each is a snapshot of a calls state at the timestamp. + // Read Only: true + Stats []*Stat `json:"stats"` // Call execution status. // Read Only: true @@ -56,12 +60,99 @@ type Call struct { func (m *Call) Validate(formats strfmt.Registry) error { var res []error + if err := m.validateCompletedAt(formats); err != nil { + // prop + res = append(res, err) + } + + if err := m.validateCreatedAt(formats); err != nil { + // prop + res = append(res, err) + } + + if err := m.validateStartedAt(formats); err != nil { + // prop + res = append(res, err) + } + + if err := m.validateStats(formats); err != nil { + // prop + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } +func (m *Call) validateCompletedAt(formats strfmt.Registry) error { + + if swag.IsZero(m.CompletedAt) { // not required + return nil + } + + if err := validate.FormatOf("completed_at", "body", "date-time", m.CompletedAt.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *Call) validateCreatedAt(formats strfmt.Registry) error { + + if swag.IsZero(m.CreatedAt) { // not required + return nil + } + + if err := validate.FormatOf("created_at", "body", "date-time", m.CreatedAt.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *Call) validateStartedAt(formats strfmt.Registry) error { + + if swag.IsZero(m.StartedAt) { // not required + return nil + } + + if err := validate.FormatOf("started_at", "body", "date-time", m.StartedAt.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *Call) validateStats(formats strfmt.Registry) error { + + if swag.IsZero(m.Stats) { // not required + return nil + } + + for i := 0; i < len(m.Stats); i++ { + + if swag.IsZero(m.Stats[i]) { // not required + continue + } + + if m.Stats[i] != nil { + + if err := m.Stats[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("stats" + "." + strconv.Itoa(i)) + } + return err + } + + } + + } + + return nil +} + // MarshalBinary interface implementation func (m *Call) MarshalBinary() ([]byte, error) { if m == nil { diff --git a/vendor/github.com/fnproject/fn_go/models/call_wrapper.go b/vendor/github.com/fnproject/fn_go/models/call_wrapper.go index 48594aeb2..f3d420d40 100644 --- a/vendor/github.com/fnproject/fn_go/models/call_wrapper.go +++ b/vendor/github.com/fnproject/fn_go/models/call_wrapper.go @@ -51,6 +51,7 @@ func (m *CallWrapper) validateCall(formats strfmt.Registry) error { } return err } + } return nil diff --git a/vendor/github.com/fnproject/fn_go/models/calls_wrapper.go b/vendor/github.com/fnproject/fn_go/models/calls_wrapper.go index 37af1670c..3f21011f1 100644 --- a/vendor/github.com/fnproject/fn_go/models/calls_wrapper.go +++ b/vendor/github.com/fnproject/fn_go/models/calls_wrapper.go @@ -6,6 +6,8 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "strconv" + strfmt "github.com/go-openapi/strfmt" "github.com/go-openapi/errors" @@ -19,7 +21,7 @@ type CallsWrapper struct { // calls // Required: true - Calls CallsWrapperCalls `json:"calls"` + Calls []*Call `json:"calls"` // error Error *ErrorBody `json:"error,omitempty"` @@ -55,11 +57,23 @@ func (m *CallsWrapper) validateCalls(formats strfmt.Registry) error { return err } - if err := m.Calls.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("calls") + for i := 0; i < len(m.Calls); i++ { + + if swag.IsZero(m.Calls[i]) { // not required + continue } - return err + + if m.Calls[i] != nil { + + if err := m.Calls[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("calls" + "." + strconv.Itoa(i)) + } + return err + } + + } + } return nil @@ -79,6 +93,7 @@ func (m *CallsWrapper) validateError(formats strfmt.Registry) error { } return err } + } return nil diff --git a/vendor/github.com/fnproject/fn_go/models/error.go b/vendor/github.com/fnproject/fn_go/models/error.go index 00080eb53..7959433fb 100644 --- a/vendor/github.com/fnproject/fn_go/models/error.go +++ b/vendor/github.com/fnproject/fn_go/models/error.go @@ -49,6 +49,7 @@ func (m *Error) validateError(formats strfmt.Registry) error { } return err } + } return nil diff --git a/vendor/github.com/fnproject/fn_go/models/log_wrapper.go b/vendor/github.com/fnproject/fn_go/models/log_wrapper.go index dbc1c8e21..b0143fdc3 100644 --- a/vendor/github.com/fnproject/fn_go/models/log_wrapper.go +++ b/vendor/github.com/fnproject/fn_go/models/log_wrapper.go @@ -51,6 +51,7 @@ func (m *LogWrapper) validateLog(formats strfmt.Registry) error { } return err } + } return nil diff --git a/vendor/github.com/fnproject/fn_go/models/route.go b/vendor/github.com/fnproject/fn_go/models/route.go index 4aa3b430f..89fd8995d 100644 --- a/vendor/github.com/fnproject/fn_go/models/route.go +++ b/vendor/github.com/fnproject/fn_go/models/route.go @@ -22,6 +22,13 @@ type Route struct { // Route configuration - overrides application configuration Config map[string]string `json:"config,omitempty"` + // Max usable CPU cores for this route. Value in MilliCPUs (eg. 500m) or as floating-point (eg. 0.5) + Cpus string `json:"cpus,omitempty"` + + // Time when route was created. Always in UTC. + // Read Only: true + CreatedAt strfmt.DateTime `json:"created_at,omitempty"` + // Payload format sent into function. Format string `json:"format,omitempty"` @@ -46,12 +53,21 @@ type Route struct { // Route type Type string `json:"type,omitempty"` + + // Most recent time that route was updated. Always in UTC. + // Read Only: true + UpdatedAt strfmt.DateTime `json:"updated_at,omitempty"` } // Validate validates this route func (m *Route) Validate(formats strfmt.Registry) error { var res []error + if err := m.validateCreatedAt(formats); err != nil { + // prop + res = append(res, err) + } + if err := m.validateFormat(formats); err != nil { // prop res = append(res, err) @@ -67,12 +83,30 @@ func (m *Route) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := m.validateUpdatedAt(formats); err != nil { + // prop + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } +func (m *Route) validateCreatedAt(formats strfmt.Registry) error { + + if swag.IsZero(m.CreatedAt) { // not required + return nil + } + + if err := validate.FormatOf("created_at", "body", "date-time", m.CreatedAt.String(), formats); err != nil { + return err + } + + return nil +} + var routeTypeFormatPropEnum []interface{} func init() { @@ -86,10 +120,13 @@ func init() { } const ( + // RouteFormatDefault captures enum value "default" RouteFormatDefault string = "default" + // RouteFormatHTTP captures enum value "http" RouteFormatHTTP string = "http" + // RouteFormatJSON captures enum value "json" RouteFormatJSON string = "json" ) @@ -122,10 +159,6 @@ func (m *Route) validateHeaders(formats strfmt.Registry) error { return nil } - if swag.IsZero(m.Headers) { // not required - return nil - } - return nil } @@ -142,8 +175,10 @@ func init() { } const ( + // RouteTypeSync captures enum value "sync" RouteTypeSync string = "sync" + // RouteTypeAsync captures enum value "async" RouteTypeAsync string = "async" ) @@ -170,6 +205,19 @@ func (m *Route) validateType(formats strfmt.Registry) error { return nil } +func (m *Route) validateUpdatedAt(formats strfmt.Registry) error { + + if swag.IsZero(m.UpdatedAt) { // not required + return nil + } + + if err := validate.FormatOf("updated_at", "body", "date-time", m.UpdatedAt.String(), formats); err != nil { + return err + } + + return nil +} + // MarshalBinary interface implementation func (m *Route) MarshalBinary() ([]byte, error) { if m == nil { diff --git a/vendor/github.com/fnproject/fn_go/models/route_wrapper.go b/vendor/github.com/fnproject/fn_go/models/route_wrapper.go index 1de7cd4cf..2f2c38190 100644 --- a/vendor/github.com/fnproject/fn_go/models/route_wrapper.go +++ b/vendor/github.com/fnproject/fn_go/models/route_wrapper.go @@ -62,6 +62,7 @@ func (m *RouteWrapper) validateError(formats strfmt.Registry) error { } return err } + } return nil @@ -81,6 +82,7 @@ func (m *RouteWrapper) validateRoute(formats strfmt.Registry) error { } return err } + } return nil diff --git a/vendor/github.com/fnproject/fn_go/models/routes_wrapper.go b/vendor/github.com/fnproject/fn_go/models/routes_wrapper.go index f54507e02..5a0cfdaae 100644 --- a/vendor/github.com/fnproject/fn_go/models/routes_wrapper.go +++ b/vendor/github.com/fnproject/fn_go/models/routes_wrapper.go @@ -6,6 +6,8 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "strconv" + strfmt "github.com/go-openapi/strfmt" "github.com/go-openapi/errors" @@ -26,7 +28,7 @@ type RoutesWrapper struct { // routes // Required: true - Routes RoutesWrapperRoutes `json:"routes"` + Routes []*Route `json:"routes"` } // Validate validates this routes wrapper @@ -63,6 +65,7 @@ func (m *RoutesWrapper) validateError(formats strfmt.Registry) error { } return err } + } return nil @@ -74,11 +77,23 @@ func (m *RoutesWrapper) validateRoutes(formats strfmt.Registry) error { return err } - if err := m.Routes.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("routes") + for i := 0; i < len(m.Routes); i++ { + + if swag.IsZero(m.Routes[i]) { // not required + continue } - return err + + if m.Routes[i] != nil { + + if err := m.Routes[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("routes" + "." + strconv.Itoa(i)) + } + return err + } + + } + } return nil diff --git a/vendor/github.com/fnproject/fn_go/models/stat.go b/vendor/github.com/fnproject/fn_go/models/stat.go index cabd5d8d1..f20958848 100644 --- a/vendor/github.com/fnproject/fn_go/models/stat.go +++ b/vendor/github.com/fnproject/fn_go/models/stat.go @@ -10,6 +10,7 @@ import ( "github.com/go-openapi/errors" "github.com/go-openapi/swag" + "github.com/go-openapi/validate" ) // Stat stat @@ -32,6 +33,11 @@ func (m *Stat) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := m.validateTimestamp(formats); err != nil { + // prop + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -52,6 +58,20 @@ func (m *Stat) validateMetrics(formats strfmt.Registry) error { } return err } + + } + + return nil +} + +func (m *Stat) validateTimestamp(formats strfmt.Registry) error { + + if swag.IsZero(m.Timestamp) { // not required + return nil + } + + if err := validate.FormatOf("timestamp", "body", "date-time", m.Timestamp.String(), formats); err != nil { + return err } return nil diff --git a/vendor/github.com/garyburd/redigo/.travis.yml b/vendor/github.com/garyburd/redigo/.travis.yml index 158f16142..25d626529 100644 --- a/vendor/github.com/garyburd/redigo/.travis.yml +++ b/vendor/github.com/garyburd/redigo/.travis.yml @@ -5,11 +5,12 @@ services: go: - 1.4 - - 1.5 - - 1.6 - - 1.7 - - 1.8 - - 1.9 + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x - tip script: diff --git a/vendor/github.com/garyburd/redigo/redis/list_test.go b/vendor/github.com/garyburd/redigo/redis/list_test.go new file mode 100644 index 000000000..a1e9d3378 --- /dev/null +++ b/vendor/github.com/garyburd/redigo/redis/list_test.go @@ -0,0 +1,85 @@ +// Copyright 2018 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// +build go1.9 + +package redis + +import "testing" + +func TestPoolList(t *testing.T) { + var idle idleList + var a, b, c idleConn + + check := func(ics ...*idleConn) { + if idle.count != len(ics) { + t.Fatal("idle.count != len(ics)") + } + if len(ics) == 0 { + if idle.front != nil { + t.Fatalf("front not nil") + } + if idle.back != nil { + t.Fatalf("back not nil") + } + return + } + if idle.front != ics[0] { + t.Fatal("front != ics[0]") + } + if idle.back != ics[len(ics)-1] { + t.Fatal("back != ics[len(ics)-1]") + } + if idle.front.prev != nil { + t.Fatal("front.prev != nil") + } + if idle.back.next != nil { + t.Fatal("back.next != nil") + } + for i := 1; i < len(ics)-1; i++ { + if ics[i-1].next != ics[i] { + t.Fatal("ics[i-1].next != ics[i]") + } + if ics[i+1].prev != ics[i] { + t.Fatal("ics[i+1].prev != ics[i]") + } + } + } + + idle.pushFront(&c) + check(&c) + idle.pushFront(&b) + check(&b, &c) + idle.pushFront(&a) + check(&a, &b, &c) + idle.popFront() + check(&b, &c) + idle.popFront() + check(&c) + idle.popFront() + check() + + idle.pushFront(&c) + check(&c) + idle.pushFront(&b) + check(&b, &c) + idle.pushFront(&a) + check(&a, &b, &c) + idle.popBack() + check(&a, &b) + idle.popBack() + check(&a) + idle.popBack() + check() +} diff --git a/vendor/github.com/garyburd/redigo/redis/pool.go b/vendor/github.com/garyburd/redigo/redis/pool.go index 5af97a058..3e6f4260a 100644 --- a/vendor/github.com/garyburd/redigo/redis/pool.go +++ b/vendor/github.com/garyburd/redigo/redis/pool.go @@ -16,13 +16,13 @@ package redis import ( "bytes" - "container/list" "crypto/rand" "crypto/sha1" "errors" "io" "strconv" "sync" + "sync/atomic" "time" "github.com/garyburd/redigo/internal" @@ -150,19 +150,13 @@ type Pool struct { // for a connection to be returned to the pool before returning. Wait bool - // mu protects fields defined below. - mu sync.Mutex - cond *sync.Cond - closed bool - active int + chInitialized uint32 // set to 1 when field ch is initialized - // Stack of idleConn with most recently used at the front. - idle list.List -} - -type idleConn struct { - c Conn - t time.Time + mu sync.Mutex // mu protects the following fields + closed bool // set to true when the pool is closed. + active int // the number of open connections in the pool + ch chan struct{} // limits open connections when p.Wait is true + idle idleList // idle connections } // NewPool creates a new pool. @@ -178,7 +172,7 @@ func NewPool(newFn func() (Conn, error), maxIdle int) *Pool { // getting an underlying connection, then the connection Err, Do, Send, Flush // and Receive methods return that error. func (p *Pool) Get() Conn { - c, err := p.get() + c, err := p.get(nil) if err != nil { return errorConnection{err} } @@ -187,7 +181,8 @@ func (p *Pool) Get() Conn { // PoolStats contains pool statistics. type PoolStats struct { - // ActiveCount is the number of connections in the pool. The count includes idle connections and connections in use. + // ActiveCount is the number of connections in the pool. The count includes + // idle connections and connections in use. ActiveCount int // IdleCount is the number of idle connections in the pool. IdleCount int @@ -198,14 +193,15 @@ func (p *Pool) Stats() PoolStats { p.mu.Lock() stats := PoolStats{ ActiveCount: p.active, - IdleCount: p.idle.Len(), + IdleCount: p.idle.count, } p.mu.Unlock() return stats } -// ActiveCount returns the number of connections in the pool. The count includes idle connections and connections in use. +// ActiveCount returns the number of connections in the pool. The count +// includes idle connections and connections in use. func (p *Pool) ActiveCount() int { p.mu.Lock() active := p.active @@ -216,7 +212,7 @@ func (p *Pool) ActiveCount() int { // IdleCount returns the number of idle connections in the pool. func (p *Pool) IdleCount() int { p.mu.Lock() - idle := p.idle.Len() + idle := p.idle.count p.mu.Unlock() return idle } @@ -224,132 +220,146 @@ func (p *Pool) IdleCount() int { // Close releases the resources used by the pool. func (p *Pool) Close() error { p.mu.Lock() - idle := p.idle - p.idle.Init() + if p.closed { + p.mu.Unlock() + return nil + } p.closed = true - p.active -= idle.Len() - if p.cond != nil { - p.cond.Broadcast() + p.active -= p.idle.count + ic := p.idle.front + p.idle.count = 0 + p.idle.front, p.idle.back = nil, nil + if p.ch != nil { + close(p.ch) } p.mu.Unlock() - for e := idle.Front(); e != nil; e = e.Next() { - e.Value.(idleConn).c.Close() + for ; ic != nil; ic = ic.next { + ic.c.Close() } return nil } -// release decrements the active count and signals waiters. The caller must -// hold p.mu during the call. -func (p *Pool) release() { - p.active -= 1 - if p.cond != nil { - p.cond.Signal() +func (p *Pool) lazyInit() { + // Fast path. + if atomic.LoadUint32(&p.chInitialized) == 1 { + return } + // Slow path. + p.mu.Lock() + if p.chInitialized == 0 { + p.ch = make(chan struct{}, p.MaxActive) + if p.closed { + close(p.ch) + } else { + for i := 0; i < p.MaxActive; i++ { + p.ch <- struct{}{} + } + } + atomic.StoreUint32(&p.chInitialized, 1) + } + p.mu.Unlock() } // get prunes stale connections and returns a connection from the idle list or // creates a new connection. -func (p *Pool) get() (Conn, error) { +func (p *Pool) get(ctx interface { + Done() <-chan struct{} + Err() error +}) (Conn, error) { + + // Handle limit for p.Wait == true. + if p.Wait && p.MaxActive > 0 { + p.lazyInit() + if ctx == nil { + <-p.ch + } else { + select { + case <-p.ch: + case <-ctx.Done(): + return nil, ctx.Err() + } + } + } + p.mu.Lock() - // Prune stale connections. - - if timeout := p.IdleTimeout; timeout > 0 { - for i, n := 0, p.idle.Len(); i < n; i++ { - e := p.idle.Back() - if e == nil { - break - } - ic := e.Value.(idleConn) - if ic.t.Add(timeout).After(nowFunc()) { - break - } - p.idle.Remove(e) - p.release() + // Prune stale connections at the back of the idle list. + if p.IdleTimeout > 0 { + n := p.idle.count + for i := 0; i < n && p.idle.back != nil && p.idle.back.t.Add(p.IdleTimeout).Before(nowFunc()); i++ { + c := p.idle.back.c + p.idle.popBack() p.mu.Unlock() - ic.c.Close() + c.Close() p.mu.Lock() + p.active-- } } - for { - // Get idle connection. - - for i, n := 0, p.idle.Len(); i < n; i++ { - e := p.idle.Front() - if e == nil { - break - } - ic := e.Value.(idleConn) - p.idle.Remove(e) - test := p.TestOnBorrow - p.mu.Unlock() - if test == nil || test(ic.c, ic.t) == nil { - return ic.c, nil - } - ic.c.Close() - p.mu.Lock() - p.release() + // Get idle connection from the front of idle list. + for p.idle.front != nil { + ic := p.idle.front + p.idle.popFront() + p.mu.Unlock() + if p.TestOnBorrow == nil || p.TestOnBorrow(ic.c, ic.t) == nil { + return ic.c, nil } - - // Check for pool closed before dialing a new connection. - - if p.closed { - p.mu.Unlock() - return nil, errors.New("redigo: get on closed pool") - } - - // Dial new connection if under limit. - - if p.MaxActive == 0 || p.active < p.MaxActive { - dial := p.Dial - p.active += 1 - p.mu.Unlock() - c, err := dial() - if err != nil { - p.mu.Lock() - p.release() - p.mu.Unlock() - c = nil - } - return c, err - } - - if !p.Wait { - p.mu.Unlock() - return nil, ErrPoolExhausted - } - - if p.cond == nil { - p.cond = sync.NewCond(&p.mu) - } - p.cond.Wait() + ic.c.Close() + p.mu.Lock() + p.active-- } + + // Check for pool closed before dialing a new connection. + if p.closed { + p.mu.Unlock() + return nil, errors.New("redigo: get on closed pool") + } + + // Handle limit for p.Wait == false. + if !p.Wait && p.MaxActive > 0 && p.active >= p.MaxActive { + p.mu.Unlock() + return nil, ErrPoolExhausted + } + + p.active++ + p.mu.Unlock() + c, err := p.Dial() + if err != nil { + c = nil + p.mu.Lock() + p.active-- + if p.ch != nil && !p.closed { + p.ch <- struct{}{} + } + p.mu.Unlock() + } + return c, err } func (p *Pool) put(c Conn, forceClose bool) error { - err := c.Err() p.mu.Lock() - if !p.closed && err == nil && !forceClose { - p.idle.PushFront(idleConn{t: nowFunc(), c: c}) - if p.idle.Len() > p.MaxIdle { - c = p.idle.Remove(p.idle.Back()).(idleConn).c + if !p.closed && !forceClose { + p.idle.pushFront(&idleConn{t: nowFunc(), c: c}) + if p.idle.count > p.MaxIdle { + c = p.idle.back.c + p.idle.popBack() } else { c = nil } } - if c == nil { - if p.cond != nil { - p.cond.Signal() - } + if c != nil { p.mu.Unlock() - return nil + c.Close() + p.mu.Lock() + p.active-- } - p.release() + if p.ch != nil && !p.closed { + p.ch <- struct{}{} + } p.mu.Unlock() - return c.Close() + return nil } type pooledConnection struct { @@ -409,7 +419,7 @@ func (pc *pooledConnection) Close() error { } } c.Do("") - pc.p.put(c, pc.state != 0) + pc.p.put(c, pc.state != 0 || c.Err() != nil) return nil } @@ -467,3 +477,51 @@ func (ec errorConnection) Close() error func (ec errorConnection) Flush() error { return ec.err } func (ec errorConnection) Receive() (interface{}, error) { return nil, ec.err } func (ec errorConnection) ReceiveWithTimeout(time.Duration) (interface{}, error) { return nil, ec.err } + +type idleList struct { + count int + front, back *idleConn +} + +type idleConn struct { + c Conn + t time.Time + next, prev *idleConn +} + +func (l *idleList) pushFront(ic *idleConn) { + ic.next = l.front + ic.prev = nil + if l.count == 0 { + l.back = ic + } else { + l.front.prev = ic + } + l.front = ic + l.count++ + return +} + +func (l *idleList) popFront() { + ic := l.front + l.count-- + if l.count == 0 { + l.front, l.back = nil, nil + } else { + ic.next.prev = nil + l.front = ic.next + } + ic.next, ic.prev = nil, nil +} + +func (l *idleList) popBack() { + ic := l.back + l.count-- + if l.count == 0 { + l.front, l.back = nil, nil + } else { + ic.prev.next = nil + l.back = ic.prev + } + ic.next, ic.prev = nil, nil +} diff --git a/vendor/github.com/garyburd/redigo/redis/pool17.go b/vendor/github.com/garyburd/redigo/redis/pool17.go new file mode 100644 index 000000000..57a22644f --- /dev/null +++ b/vendor/github.com/garyburd/redigo/redis/pool17.go @@ -0,0 +1,35 @@ +// Copyright 2018 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// +build go1.7 + +package redis + +import "context" + +// GetContext gets a connection using the provided context. +// +// The provided Context must be non-nil. If the context expires before the +// connection is complete, an error is returned. Any expiration on the context +// will not affect the returned connection. +// +// If the function completes without error, then the application must close the +// returned connection. +func (p *Pool) GetContext(ctx context.Context) (Conn, error) { + c, err := p.get(ctx) + if err != nil { + return errorConnection{err}, err + } + return &pooledConnection{p: p, c: c}, nil +} diff --git a/vendor/github.com/garyburd/redigo/redis/pool17_test.go b/vendor/github.com/garyburd/redigo/redis/pool17_test.go new file mode 100644 index 000000000..51ef4b643 --- /dev/null +++ b/vendor/github.com/garyburd/redigo/redis/pool17_test.go @@ -0,0 +1,58 @@ +// Copyright 2018 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// +build go1.7 + +package redis_test + +import ( + "context" + "testing" + + "github.com/garyburd/redigo/redis" +) + +func TestWaitPoolGetAfterClose(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 1, + MaxActive: 1, + Dial: d.dial, + Wait: true, + } + p.Close() + _, err := p.GetContext(context.Background()) + if err == nil { + t.Fatal("expected error") + } +} + +func TestWaitPoolGetCanceledContext(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 1, + MaxActive: 1, + Dial: d.dial, + Wait: true, + } + defer p.Close() + ctx, f := context.WithCancel(context.Background()) + f() + c := p.Get() + defer c.Close() + _, err := p.GetContext(ctx) + if err != context.Canceled { + t.Fatalf("got error %v, want %v", err, context.Canceled) + } +} diff --git a/vendor/github.com/garyburd/redigo/redis/pool_test.go b/vendor/github.com/garyburd/redigo/redis/pool_test.go index b1c510679..e8b55781e 100644 --- a/vendor/github.com/garyburd/redigo/redis/pool_test.go +++ b/vendor/github.com/garyburd/redigo/redis/pool_test.go @@ -231,7 +231,7 @@ func TestPoolTimeout(t *testing.T) { d.check("1", p, 1, 1, 0) - now = now.Add(p.IdleTimeout) + now = now.Add(p.IdleTimeout + 1) c = p.Get() c.Do("PING") @@ -445,9 +445,6 @@ func startGoroutines(p *redis.Pool, cmd string, args ...interface{}) chan error }() } - // Wait for goroutines to block. - time.Sleep(time.Second / 4) - return errs } diff --git a/vendor/github.com/garyburd/redigo/redis/scan_test.go b/vendor/github.com/garyburd/redigo/redis/scan_test.go index 0c1956185..7930c27ce 100644 --- a/vendor/github.com/garyburd/redigo/redis/scan_test.go +++ b/vendor/github.com/garyburd/redigo/redis/scan_test.go @@ -84,8 +84,8 @@ var scanConversionTests = []struct { {"1m", durationScan{Duration: time.Minute}}, {[]byte("1m"), durationScan{Duration: time.Minute}}, {time.Minute.Nanoseconds(), durationScan{Duration: time.Minute}}, - {[]interface{}{[]byte("1m")}, []durationScan{durationScan{Duration: time.Minute}}}, - {[]interface{}{[]byte("1m")}, []*durationScan{&durationScan{Duration: time.Minute}}}, + {[]interface{}{[]byte("1m")}, []durationScan{{Duration: time.Minute}}}, + {[]interface{}{[]byte("1m")}, []*durationScan{{Duration: time.Minute}}}, } func TestScanConversion(t *testing.T) { @@ -318,7 +318,7 @@ var scanSliceTests = []struct { []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, nil, true, - []*struct{ A, B string }{{"a1", "b1"}, {"a2", "b2"}}, + []*struct{ A, B string }{{A: "a1", B: "b1"}, {A: "a2", B: "b2"}}, }, { []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, diff --git a/vendor/github.com/go-ini/ini/.travis.yml b/vendor/github.com/go-ini/ini/.travis.yml index b097527e1..75fe7b74b 100644 --- a/vendor/github.com/go-ini/ini/.travis.yml +++ b/vendor/github.com/go-ini/ini/.travis.yml @@ -6,8 +6,9 @@ go: - 1.7.x - 1.8.x - 1.9.x + - 1.10.x -script: +script: - go get golang.org/x/tools/cmd/cover - go get github.com/smartystreets/goconvey - mkdir -p $HOME/gopath/src/gopkg.in diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile index 1316911d2..af27ff076 100644 --- a/vendor/github.com/go-ini/ini/Makefile +++ b/vendor/github.com/go-ini/ini/Makefile @@ -12,4 +12,4 @@ vet: go vet coverage: - go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out \ No newline at end of file + go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md index f4ff27cd3..6b7b73fed 100644 --- a/vendor/github.com/go-ini/ini/README.md +++ b/vendor/github.com/go-ini/ini/README.md @@ -87,7 +87,7 @@ key1, err := sec1.GetKey("Key") key2, err := sec2.GetKey("KeY") ``` -#### MySQL-like boolean key +#### MySQL-like boolean key MySQL's configuration allows a key without value as follows: @@ -320,7 +320,7 @@ cfg, err := ini.LoadSources(ini.LoadOptions{ }, "filename") ``` -Holy crap! +Holy crap! Note that single quotes around values will be stripped: @@ -339,7 +339,7 @@ How do you transform this to regular format automatically? ```go cfg, err := ini.LoadSources(ini.LoadOptions{UnescapeValueDoubleQuotes: true}, "en-US.ini")) -cfg.Section("").Key("create_repo").String() +cfg.Section("").Key("create_repo").String() // You got: created repository %s ``` @@ -440,7 +440,7 @@ By default, spaces are used to align "=" sign between key and values, to disable ```go ini.PrettyFormat = false -``` +``` ## Advanced Usage @@ -489,6 +489,33 @@ cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] ``` +### Same Key with Multiple Values + +Do you ever have a configuration file like this? + +```ini +[remote "origin"] +url = https://github.com/Antergone/test1.git +url = https://github.com/Antergone/test2.git +fetch = +refs/heads/*:refs/remotes/origin/* +``` + +By default, only the last read value will be kept for the key `url`. If you want to keep all copies of value of this key, you can use `ShadowLoad` to achieve it: + +```go +cfg, err := ini.ShadowLoad(".gitconfig") +// ... + +f.Section(`remote "origin"`).Key("url").String() +// Result: https://github.com/Antergone/test1.git + +f.Section(`remote "origin"`).Key("url").ValueWithShadows() +// Result: []string{ +// "https://github.com/Antergone/test1.git", +// "https://github.com/Antergone/test2.git", +// } +``` + ### Unparseable Sections Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`: diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md index 69aefef12..67a536302 100644 --- a/vendor/github.com/go-ini/ini/README_ZH.md +++ b/vendor/github.com/go-ini/ini/README_ZH.md @@ -332,7 +332,7 @@ create_repo="创建了仓库 %s" ```go cfg, err := ini.LoadSources(ini.LoadOptions{UnescapeValueDoubleQuotes: true}, "en-US.ini")) -cfg.Section("").Key("create_repo").String() +cfg.Section("").Key("create_repo").String() // You got: 创建了仓库 %s ``` @@ -433,7 +433,7 @@ cfg.WriteToIndent(writer, "\t") ```go ini.PrettyFormat = false -``` +``` ## 高级用法 @@ -482,6 +482,33 @@ cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] ``` +### 同个键名包含多个值 + +你是否也曾被下面的配置文件所困扰? + +```ini +[remote "origin"] +url = https://github.com/Antergone/test1.git +url = https://github.com/Antergone/test2.git +fetch = +refs/heads/*:refs/remotes/origin/* +``` + +没错!默认情况下,只有最后一次出现的值会被保存到 `url` 中,可我就是想要保留所有的值怎么办啊?不要紧,用 `ShadowLoad` 轻松解决你的烦恼: + +```go +cfg, err := ini.ShadowLoad(".gitconfig") +// ... + +f.Section(`remote "origin"`).Key("url").String() +// Result: https://github.com/Antergone/test1.git + +f.Section(`remote "origin"`).Key("url").ValueWithShadows() +// Result: []string{ +// "https://github.com/Antergone/test1.git", +// "https://github.com/Antergone/test2.git", +// } +``` + ### 无法解析的分区 如果遇到一些比较特殊的分区,它们不包含常见的键值对,而是没有固定格式的纯文本,则可以使用 `LoadOptions.UnparsableSections` 进行处理: diff --git a/vendor/github.com/go-ini/ini/file.go b/vendor/github.com/go-ini/ini/file.go index ce26c3b31..ae6264acf 100644 --- a/vendor/github.com/go-ini/ini/file.go +++ b/vendor/github.com/go-ini/ini/file.go @@ -140,9 +140,14 @@ func (f *File) Section(name string) *Section { // Section returns list of Section. func (f *File) Sections() []*Section { + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + sections := make([]*Section, len(f.sectionList)) - for i := range f.sectionList { - sections[i] = f.Section(f.sectionList[i]) + for i, name := range f.sectionList { + sections[i] = f.sections[name] } return sections } diff --git a/vendor/github.com/go-ini/ini/file_test.go b/vendor/github.com/go-ini/ini/file_test.go index 11612eb9a..593224b58 100644 --- a/vendor/github.com/go-ini/ini/file_test.go +++ b/vendor/github.com/go-ini/ini/file_test.go @@ -16,6 +16,7 @@ package ini_test import ( "bytes" + "io/ioutil" "testing" . "github.com/smartystreets/goconvey/convey" @@ -253,93 +254,15 @@ func TestFile_WriteTo(t *testing.T) { var buf bytes.Buffer _, err = f.WriteTo(&buf) So(err, ShouldBeNil) - So(buf.String(), ShouldEqual, `; Package name -NAME = ini -; Package version -VERSION = v1 -; Package import path -IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s -; Information about package author -# Bio can be written in multiple lines. -[author] -; This is author name -NAME = Unknwon -E-MAIL = u@gogs.io -GITHUB = https://github.com/%(NAME)s -# Succeeding comment -BIO = """Gopher. -Coding addict. -Good man. -""" + golden := "testdata/TestFile_WriteTo.golden" + if *update { + ioutil.WriteFile(golden, buf.Bytes(), 0644) + } -[package] -CLONE_URL = https://%(IMPORT_PATH)s - -[package.sub] -UNUSED_KEY = should be deleted - -[features] -- = Support read/write comments of keys and sections -- = Support auto-increment of key names -- = Support load multiple files to overwrite key values - -[types] -STRING = str -BOOL = true -BOOL_FALSE = false -FLOAT64 = 1.25 -INT = 10 -TIME = 2015-01-01T20:17:05Z -DURATION = 2h45m -UINT = 3 - -[array] -STRINGS = en, zh, de -FLOAT64S = 1.1, 2.2, 3.3 -INTS = 1, 2, 3 -UINTS = 1, 2, 3 -TIMES = 2015-01-01T20:17:05Z,2015-01-01T20:17:05Z,2015-01-01T20:17:05Z - -[note] -empty_lines = next line is empty -boolean_key -more = notes - -; Comment before the section -; This is a comment for the section too -[comments] -; Comment before key -key = value -; This is a comment for key2 -key2 = value2 -key3 = "one", "two", "three" - -[string escapes] -key1 = value1, value2, value3 -key2 = value1\, value2 -key3 = val\ue1, value2 -key4 = value1\\, value\\\\2 -key5 = value1\,, value2 -key6 = aaa bbb\ and\ space ccc - -[advance] -value with quotes = some value -value quote2 again = some value -includes comment sign = `+"`"+"my#password"+"`"+` -includes comment sign2 = `+"`"+"my;password"+"`"+` -true = 2+3=5 -`+"`"+`1+1=2`+"`"+` = true -`+"`"+`6+1=7`+"`"+` = true -"""`+"`"+`5+5`+"`"+`""" = 10 -`+"`"+`"6+6"`+"`"+` = 12 -`+"`"+`7-2=4`+"`"+` = false -ADDRESS = """404 road, -NotFound, State, 50000""" -two_lines = how about continuation lines? -lots_of_lines = 1 2 3 4 - -`) + expected, err := ioutil.ReadFile(golden) + So(err, ShouldBeNil) + So(buf.String(), ShouldEqual, string(expected)) }) } diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go index 535d3588a..9f6ea3b41 100644 --- a/vendor/github.com/go-ini/ini/ini.go +++ b/vendor/github.com/go-ini/ini/ini.go @@ -32,7 +32,7 @@ const ( // Maximum allowed depth when recursively substituing variable names. _DEPTH_VALUES = 99 - _VERSION = "1.32.0" + _VERSION = "1.33.0" ) // Version returns current package version literal. diff --git a/vendor/github.com/go-ini/ini/ini_test.go b/vendor/github.com/go-ini/ini/ini_test.go index 3e6992d3f..df5fbe384 100644 --- a/vendor/github.com/go-ini/ini/ini_test.go +++ b/vendor/github.com/go-ini/ini/ini_test.go @@ -16,6 +16,7 @@ package ini_test import ( "bytes" + "flag" "io/ioutil" "testing" @@ -47,6 +48,8 @@ const ( _NOT_FOUND_CONF = "testdata/404.ini" ) +var update = flag.Bool("update", false, "Update .golden files") + func TestLoad(t *testing.T) { Convey("Load from good data sources", t, func() { f, err := ini.Load([]byte(` diff --git a/vendor/github.com/go-ini/ini/struct_test.go b/vendor/github.com/go-ini/ini/struct_test.go index 75987ea99..0dbbef22d 100644 --- a/vendor/github.com/go-ini/ini/struct_test.go +++ b/vendor/github.com/go-ini/ini/struct_test.go @@ -37,7 +37,7 @@ type testNested struct { Unused int `ini:"-"` } -type testEmbeded struct { +type TestEmbeded struct { GPA float64 } @@ -49,7 +49,7 @@ type testStruct struct { Born time.Time Time time.Duration `ini:"Duration"` Others testNested - *testEmbeded `ini:"grade"` + *TestEmbeded `ini:"grade"` Unused int `ini:"-"` Unsigned uint Omitted bool `ini:"omitthis,omitempty"` @@ -97,12 +97,12 @@ type unsupport2 struct { } } -type unsupport3 struct { +type Unsupport3 struct { Cities byte } type unsupport4 struct { - *unsupport3 `ini:"Others"` + *Unsupport3 `ini:"Others"` } type defaultValue struct { @@ -155,7 +155,7 @@ func Test_MapToStruct(t *testing.T) { So(fmt.Sprint(ts.Others.Populations), ShouldEqual, "[12345678 98765432]") So(fmt.Sprint(ts.Others.Coordinates), ShouldEqual, "[192.168 10.11]") So(ts.Others.Note, ShouldEqual, "Hello world!") - So(ts.testEmbeded.GPA, ShouldEqual, 2.8) + So(ts.TestEmbeded.GPA, ShouldEqual, 2.8) }) Convey("Map section to struct", func() { diff --git a/vendor/github.com/go-ini/ini/testdata/TestFile_WriteTo.golden b/vendor/github.com/go-ini/ini/testdata/TestFile_WriteTo.golden new file mode 100644 index 000000000..03409d86d --- /dev/null +++ b/vendor/github.com/go-ini/ini/testdata/TestFile_WriteTo.golden @@ -0,0 +1,86 @@ +; Package name +NAME = ini +; Package version +VERSION = v1 +; Package import path +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +; Information about package author +# Bio can be written in multiple lines. +[author] +; This is author name +NAME = Unknwon +E-MAIL = u@gogs.io +GITHUB = https://github.com/%(NAME)s +# Succeeding comment +BIO = """Gopher. +Coding addict. +Good man. +""" + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +UNUSED_KEY = should be deleted + +[features] +- = Support read/write comments of keys and sections +- = Support auto-increment of key names +- = Support load multiple files to overwrite key values + +[types] +STRING = str +BOOL = true +BOOL_FALSE = false +FLOAT64 = 1.25 +INT = 10 +TIME = 2015-01-01T20:17:05Z +DURATION = 2h45m +UINT = 3 + +[array] +STRINGS = en, zh, de +FLOAT64S = 1.1, 2.2, 3.3 +INTS = 1, 2, 3 +UINTS = 1, 2, 3 +TIMES = 2015-01-01T20:17:05Z,2015-01-01T20:17:05Z,2015-01-01T20:17:05Z + +[note] +empty_lines = next line is empty +boolean_key +more = notes + +; Comment before the section +; This is a comment for the section too +[comments] +; Comment before key +key = value +; This is a comment for key2 +key2 = value2 +key3 = "one", "two", "three" + +[string escapes] +key1 = value1, value2, value3 +key2 = value1\, value2 +key3 = val\ue1, value2 +key4 = value1\\, value\\\\2 +key5 = value1\,, value2 +key6 = aaa bbb\ and\ space ccc + +[advance] +value with quotes = some value +value quote2 again = some value +includes comment sign = `my#password` +includes comment sign2 = `my;password` +true = 2+3=5 +`1+1=2` = true +`6+1=7` = true +"""`5+5`""" = 10 +`"6+6"` = 12 +`7-2=4` = false +ADDRESS = """404 road, +NotFound, State, 50000""" +two_lines = how about continuation lines? +lots_of_lines = 1 2 3 4 + diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go index 5054a7be6..cebb5cc59 100644 --- a/vendor/github.com/go-openapi/spec/expander.go +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -336,12 +336,17 @@ func normalizeAbsPath(path string) string { // base could be a directory or a full file path func normalizePaths(refPath, base string) string { refURL, _ := url.Parse(refPath) - if path.IsAbs(refURL.Path) { + if path.IsAbs(refURL.Path) || filepath.IsAbs(refPath) { // refPath is actually absolute if refURL.Host != "" { return refPath } - return filepath.FromSlash(refPath) + parts := strings.Split(refPath, "#") + result := filepath.FromSlash(parts[0]) + if len(parts) == 2 { + result += "#" + parts[1] + } + return result } // relative refPath @@ -430,9 +435,11 @@ func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) toFetch := *refURL toFetch.Fragment = "" - data, fromCache := r.cache.Get(toFetch.String()) + normalized := normalizeAbsPath(toFetch.String()) + + data, fromCache := r.cache.Get(normalized) if !fromCache { - b, err := r.loadDoc(toFetch.String()) + b, err := r.loadDoc(normalized) if err != nil { return nil, url.URL{}, false, err } @@ -440,7 +447,7 @@ func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) if err := json.Unmarshal(b, &data); err != nil { return nil, url.URL{}, false, err } - r.cache.Set(toFetch.String(), data) + r.cache.Set(normalized, data) } return data, toFetch, fromCache, nil diff --git a/vendor/github.com/go-openapi/spec/expander_test.go b/vendor/github.com/go-openapi/spec/expander_test.go index b648a0cf0..82a6affe8 100644 --- a/vendor/github.com/go-openapi/spec/expander_test.go +++ b/vendor/github.com/go-openapi/spec/expander_test.go @@ -20,6 +20,7 @@ import ( "log" "net/http" "net/http/httptest" + "runtime" "testing" "github.com/go-openapi/jsonpointer" @@ -37,36 +38,72 @@ func jsonDoc(path string) (json.RawMessage, error) { // tests that paths are normalized correctly func TestNormalizePaths(t *testing.T) { - testCases := []struct { + type testNormalizePathsTestCases []struct { refPath string base string expOutput string - }{ - { - // file basePath, absolute refPath - refPath: "/another/base/path.json#/definitions/Pet", - base: "/base/path.json", - expOutput: "/another/base/path.json#/definitions/Pet", - }, - { - // file basePath, relative refPath - refPath: "another/base/path.json#/definitions/Pet", - base: "/base/path.json", - expOutput: "/base/another/base/path.json#/definitions/Pet", - }, - { - // http basePath, absolute refPath - refPath: "http://www.anotherexample.com/another/base/path/swagger.json#/definitions/Pet", - base: "http://www.example.com/base/path/swagger.json", - expOutput: "http://www.anotherexample.com/another/base/path/swagger.json#/definitions/Pet", - }, - { - // http basePath, relative refPath - refPath: "another/base/path/swagger.json#/definitions/Pet", - base: "http://www.example.com/base/path/swagger.json", - expOutput: "http://www.example.com/base/path/another/base/path/swagger.json#/definitions/Pet", - }, } + testCases := func() testNormalizePathsTestCases { + testCases := testNormalizePathsTestCases{ + { + // http basePath, absolute refPath + refPath: "http://www.anotherexample.com/another/base/path/swagger.json#/definitions/Pet", + base: "http://www.example.com/base/path/swagger.json", + expOutput: "http://www.anotherexample.com/another/base/path/swagger.json#/definitions/Pet", + }, + { + // http basePath, relative refPath + refPath: "another/base/path/swagger.json#/definitions/Pet", + base: "http://www.example.com/base/path/swagger.json", + expOutput: "http://www.example.com/base/path/another/base/path/swagger.json#/definitions/Pet", + }, + } + if runtime.GOOS == "windows" { + testCases = append(testCases, testNormalizePathsTestCases{ + { + // file basePath, absolute refPath, no fragment + refPath: `C:\another\base\path.json`, + base: `C:\base\path.json`, + expOutput: `C:\another\base\path.json`, + }, + { + // file basePath, absolute refPath + refPath: `C:\another\base\path.json#/definitions/Pet`, + base: `C:\base\path.json`, + expOutput: `C:\another\base\path.json#/definitions/Pet`, + }, + { + // file basePath, relative refPath + refPath: `another\base\path.json#/definitions/Pet`, + base: `C:\base\path.json`, + expOutput: `C:\base\another\base\path.json#/definitions/Pet`, + }, + }...) + return testCases + } + // linux case + testCases = append(testCases, testNormalizePathsTestCases{ + { + // file basePath, absolute refPath, no fragment + refPath: "/another/base/path.json", + base: "/base/path.json", + expOutput: "/another/base/path.json", + }, + { + // file basePath, absolute refPath + refPath: "/another/base/path.json#/definitions/Pet", + base: "/base/path.json", + expOutput: "/another/base/path.json#/definitions/Pet", + }, + { + // file basePath, relative refPath + refPath: "another/base/path.json#/definitions/Pet", + base: "/base/path.json", + expOutput: "/base/another/base/path.json#/definitions/Pet", + }, + }...) + return testCases + }() for _, tcase := range testCases { out := normalizePaths(tcase.refPath, tcase.base) diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go index cb20a6a0f..17ab0f616 100644 --- a/vendor/github.com/go-openapi/swag/json.go +++ b/vendor/github.com/go-openapi/swag/json.go @@ -96,8 +96,7 @@ func ConcatJSON(blobs ...[]byte) []byte { last := len(blobs) - 1 var opening, closing byte - a := 0 - idx := 0 + var idx, a int buf := bytes.NewBuffer(nil) for i, b := range blobs { diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go index 8a2c85434..70f4fb361 100644 --- a/vendor/github.com/go-openapi/swag/loading.go +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -19,7 +19,6 @@ import ( "io/ioutil" "log" "net/http" - "net/url" "path/filepath" "strings" "time" @@ -45,7 +44,10 @@ func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func( return remote } return func(pth string) ([]byte, error) { - upth, _ := url.PathUnescape(pth) + upth, err := pathUnescape(pth) + if err != nil { + return nil, err + } return local(filepath.FromSlash(upth)) } } diff --git a/vendor/github.com/go-openapi/swag/post_go18.go b/vendor/github.com/go-openapi/swag/post_go18.go new file mode 100644 index 000000000..ef48086db --- /dev/null +++ b/vendor/github.com/go-openapi/swag/post_go18.go @@ -0,0 +1,9 @@ +// +build go1.8 + +package swag + +import "net/url" + +func pathUnescape(path string) (string, error) { + return url.PathUnescape(path) +} diff --git a/vendor/github.com/go-openapi/swag/pre_go18.go b/vendor/github.com/go-openapi/swag/pre_go18.go new file mode 100644 index 000000000..860bb2bbb --- /dev/null +++ b/vendor/github.com/go-openapi/swag/pre_go18.go @@ -0,0 +1,9 @@ +// +build !go1.8 + +package swag + +import "net/url" + +func pathUnescape(path string) (string, error) { + return url.QueryUnescape(path) +} diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go index 26502f21d..e2eff7568 100644 --- a/vendor/github.com/go-openapi/swag/yaml.go +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -42,6 +42,7 @@ func YAMLToJSON(data interface{}) (json.RawMessage, error) { return json.RawMessage(b), err } +// BytesToYAMLDoc converts a byte slice into a YAML document func BytesToYAMLDoc(data []byte) (interface{}, error) { var canary map[interface{}]interface{} // validate this is an object and not a different type if err := yaml.Unmarshal(data, &canary); err != nil { diff --git a/vendor/github.com/jmoiron/sqlx/.travis.yml b/vendor/github.com/jmoiron/sqlx/.travis.yml new file mode 100644 index 000000000..6bc68d67f --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/.travis.yml @@ -0,0 +1,27 @@ +# vim: ft=yaml sw=2 ts=2 + +language: go + +# enable database services +services: + - mysql + - postgresql + +# create test database +before_install: + - mysql -e 'CREATE DATABASE IF NOT EXISTS sqlxtest;' + - psql -c 'create database sqlxtest;' -U postgres + - go get github.com/mattn/goveralls + - export SQLX_MYSQL_DSN="travis:@/sqlxtest?parseTime=true" + - export SQLX_POSTGRES_DSN="postgres://postgres:@localhost/sqlxtest?sslmode=disable" + - export SQLX_SQLITE_DSN="$HOME/sqlxtest.db" + +# go versions to test +go: + - "1.8" + - "1.9" + - "1.10.x" + +# run tests w/ coverage +script: + - travis_retry $GOPATH/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/jmoiron/sqlx/README.md b/vendor/github.com/jmoiron/sqlx/README.md index d2d125896..c0db7f78b 100644 --- a/vendor/github.com/jmoiron/sqlx/README.md +++ b/vendor/github.com/jmoiron/sqlx/README.md @@ -1,6 +1,6 @@ # sqlx -[![Build Status](https://drone.io/github.com/jmoiron/sqlx/status.png)](https://drone.io/github.com/jmoiron/sqlx/latest) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/jmoiron/sqlx) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE) +[![Build Status](https://travis-ci.org/jmoiron/sqlx.svg?branch=master)](https://travis-ci.org/jmoiron/sqlx) [![Coverage Status](https://coveralls.io/repos/github/jmoiron/sqlx/badge.svg?branch=master)](https://coveralls.io/github/jmoiron/sqlx?branch=master) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/jmoiron/sqlx) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE) sqlx is a library which provides a set of extensions on go's standard `database/sql` library. The sqlx versions of `sql.DB`, `sql.TX`, `sql.Stmt`, diff --git a/vendor/github.com/jmoiron/sqlx/bind.go b/vendor/github.com/jmoiron/sqlx/bind.go index b81e6fc64..0fdc44355 100644 --- a/vendor/github.com/jmoiron/sqlx/bind.go +++ b/vendor/github.com/jmoiron/sqlx/bind.go @@ -21,7 +21,7 @@ const ( // BindType returns the bindtype for a given database given a drivername. func BindType(driverName string) int { switch driverName { - case "postgres", "pgx", "pq-timeouts": + case "postgres", "pgx", "pq-timeouts", "cloudsqlpostgres": return DOLLAR case "mysql": return QUESTION @@ -113,7 +113,8 @@ func In(query string, args ...interface{}) (string, []interface{}, error) { v := reflect.ValueOf(arg) t := reflectx.Deref(v.Type()) - if t.Kind() == reflect.Slice { + // []byte is a driver.Value type so it should not be expanded + if t.Kind() == reflect.Slice && t != reflect.TypeOf([]byte{}) { meta[i].length = v.Len() meta[i].v = v diff --git a/vendor/github.com/jmoiron/sqlx/sqlx.go b/vendor/github.com/jmoiron/sqlx/sqlx.go index e95f23ff2..e64947393 100644 --- a/vendor/github.com/jmoiron/sqlx/sqlx.go +++ b/vendor/github.com/jmoiron/sqlx/sqlx.go @@ -228,6 +228,14 @@ func (r *Row) Columns() ([]string, error) { return r.rows.Columns() } +// ColumnTypes returns the underlying sql.Rows.ColumnTypes(), or the deferred error +func (r *Row) ColumnTypes() ([]*sql.ColumnType, error) { + if r.err != nil { + return []*sql.ColumnType{}, r.err + } + return r.rows.ColumnTypes() +} + // Err returns the error encountered while scanning. func (r *Row) Err() error { return r.err diff --git a/vendor/github.com/jmoiron/sqlx/sqlx_context.go b/vendor/github.com/jmoiron/sqlx/sqlx_context.go index 0b1714514..d58ff337a 100644 --- a/vendor/github.com/jmoiron/sqlx/sqlx_context.go +++ b/vendor/github.com/jmoiron/sqlx/sqlx_context.go @@ -237,6 +237,19 @@ func (tx *Tx) NamedStmtContext(ctx context.Context, stmt *NamedStmt) *NamedStmt } } +// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt. +// +// The provided context is used for the preparation of the statement, not for +// the execution of the statement. +func (tx *Tx) PreparexContext(ctx context.Context, query string) (*Stmt, error) { + return PreparexContext(ctx, tx, query) +} + +// PrepareNamedContext returns an sqlx.NamedStmt +func (tx *Tx) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) { + return prepareNamedContext(ctx, tx, query) +} + // MustExecContext runs MustExecContext within a transaction. // Any placeholder parameters are replaced with supplied args. func (tx *Tx) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result { diff --git a/vendor/github.com/jmoiron/sqlx/sqlx_test.go b/vendor/github.com/jmoiron/sqlx/sqlx_test.go index 9502c13e1..e26c98074 100644 --- a/vendor/github.com/jmoiron/sqlx/sqlx_test.go +++ b/vendor/github.com/jmoiron/sqlx/sqlx_test.go @@ -1387,13 +1387,17 @@ func (p PropertyMap) Value() (driver.Value, error) { func (p PropertyMap) Scan(src interface{}) error { v := reflect.ValueOf(src) - if !v.IsValid() || v.IsNil() { + if !v.IsValid() || v.CanAddr() && v.IsNil() { return nil } - if data, ok := src.([]byte); ok { - return json.Unmarshal(data, &p) + switch ts := src.(type) { + case []byte: + return json.Unmarshal(ts, &p) + case string: + return json.Unmarshal([]byte(ts), &p) + default: + return fmt.Errorf("Could not not decode type %T -> %T", src, p) } - return fmt.Errorf("Could not not decode type %T -> %T", src, p) } func TestEmbeddedMaps(t *testing.T) { @@ -1493,6 +1497,9 @@ func TestIn(t *testing.T) { {"SELECT * FROM foo WHERE x in (?)", []interface{}{[]int{1, 2, 3, 4, 5, 6, 7, 8}}, 8}, + {"SELECT * FROM foo WHERE x = ? AND y in (?)", + []interface{}{[]byte("foo"), []int{0, 5, 3}}, + 4}, } for _, test := range tests { q, a, err := In(test.q, test.args...) diff --git a/vendor/github.com/mailru/easyjson/Makefile b/vendor/github.com/mailru/easyjson/Makefile index f877ab269..49c80f3bd 100644 --- a/vendor/github.com/mailru/easyjson/Makefile +++ b/vendor/github.com/mailru/easyjson/Makefile @@ -23,7 +23,9 @@ generate: root build .root/src/$(PKG)/tests/data.go \ .root/src/$(PKG)/tests/omitempty.go \ .root/src/$(PKG)/tests/nothing.go \ - .root/src/$(PKG)/tests/named_type.go + .root/src/$(PKG)/tests/named_type.go \ + .root/src/$(PKG)/tests/custom_map_key_type.go \ + .root/src/$(PKG)/tests/embedded_type.go .root/bin/easyjson -all .root/src/$(PKG)/tests/data.go .root/bin/easyjson -all .root/src/$(PKG)/tests/nothing.go @@ -33,6 +35,8 @@ generate: root build .root/bin/easyjson -build_tags=use_easyjson .root/src/$(PKG)/benchmark/data.go .root/bin/easyjson .root/src/$(PKG)/tests/nested_easy.go .root/bin/easyjson .root/src/$(PKG)/tests/named_type.go + .root/bin/easyjson .root/src/$(PKG)/tests/custom_map_key_type.go + .root/bin/easyjson .root/src/$(PKG)/tests/embedded_type.go test: generate root go test \ diff --git a/vendor/github.com/mailru/easyjson/gen/decoder.go b/vendor/github.com/mailru/easyjson/gen/decoder.go index 021933ac8..b24b11ce2 100644 --- a/vendor/github.com/mailru/easyjson/gen/decoder.go +++ b/vendor/github.com/mailru/easyjson/gen/decoder.go @@ -48,10 +48,12 @@ var primitiveStringDecoders = map[reflect.Kind]string{ reflect.Uint32: "in.Uint32Str()", reflect.Uint64: "in.Uint64Str()", reflect.Uintptr: "in.UintptrStr()", + reflect.Float32: "in.Float32Str()", + reflect.Float64: "in.Float64Str()", } var customDecoders = map[string]string{ - "json.Number": "in.JsonNumber()", + "json.Number": "in.JsonNumber()", } // genTypeDecoder generates decoding code for the type t, but uses unmarshaler interface if implemented by t. @@ -84,11 +86,19 @@ func (g *Generator) genTypeDecoder(t reflect.Type, out string, tags fieldTags, i return err } +// returns true of the type t implements one of the custom unmarshaler interfaces +func hasCustomUnmarshaler(t reflect.Type) bool { + t = reflect.PtrTo(t) + return t.Implements(reflect.TypeOf((*easyjson.Unmarshaler)(nil)).Elem()) || + t.Implements(reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()) || + t.Implements(reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()) +} + // genTypeDecoderNoCheck generates decoding code for the type t. func (g *Generator) genTypeDecoderNoCheck(t reflect.Type, out string, tags fieldTags, indent int) error { ws := strings.Repeat(" ", indent) // Check whether type is primitive, needs to be done after interface check. - if dec := customDecoders[t.String()]; dec != "" { + if dec := customDecoders[t.String()]; dec != "" { fmt.Fprintln(g.out, ws+out+" = "+dec) return nil } else if dec := primitiveStringDecoders[t.Kind()]; dec != "" && tags.asString { @@ -208,9 +218,9 @@ func (g *Generator) genTypeDecoderNoCheck(t reflect.Type, out string, tags field case reflect.Map: key := t.Key() keyDec, ok := primitiveStringDecoders[key.Kind()] - if !ok { - return fmt.Errorf("map type %v not supported: only string and integer keys are allowed", key) - } + if !ok && !hasCustomUnmarshaler(key) { + return fmt.Errorf("map type %v not supported: only string and integer keys and types implementing json.Unmarshaler are allowed", key) + } // else assume the caller knows what they are doing and that the custom unmarshaler performs the translation from string or integer keys to the key type elem := t.Elem() tmpVar := g.uniqueVarName() @@ -225,7 +235,15 @@ func (g *Generator) genTypeDecoderNoCheck(t reflect.Type, out string, tags field fmt.Fprintln(g.out, ws+" }") fmt.Fprintln(g.out, ws+" for !in.IsDelim('}') {") - fmt.Fprintln(g.out, ws+" key := "+g.getType(key)+"("+keyDec+")") + if keyDec != "" { + fmt.Fprintln(g.out, ws+" key := "+g.getType(key)+"("+keyDec+")") + } else { + fmt.Fprintln(g.out, ws+" var key "+g.getType(key)) + if err := g.genTypeDecoder(key, "key", tags, indent+2); err != nil { + return err + } + } + fmt.Fprintln(g.out, ws+" in.WantColon()") fmt.Fprintln(g.out, ws+" var "+tmpVar+" "+g.getType(elem)) diff --git a/vendor/github.com/mailru/easyjson/gen/encoder.go b/vendor/github.com/mailru/easyjson/gen/encoder.go index 48cba15d4..2775647b2 100644 --- a/vendor/github.com/mailru/easyjson/gen/encoder.go +++ b/vendor/github.com/mailru/easyjson/gen/encoder.go @@ -45,6 +45,8 @@ var primitiveStringEncoders = map[reflect.Kind]string{ reflect.Uint32: "out.Uint32Str(uint32(%v))", reflect.Uint64: "out.Uint64Str(uint64(%v))", reflect.Uintptr: "out.UintptrStr(uintptr(%v))", + reflect.Float32: "out.Float32Str(float32(%v))", + reflect.Float64: "out.Float64Str(float64(%v))", } // fieldTags contains parsed version of json struct field tags. @@ -108,6 +110,14 @@ func (g *Generator) genTypeEncoder(t reflect.Type, in string, tags fieldTags, in return err } +// returns true of the type t implements one of the custom marshaler interfaces +func hasCustomMarshaler(t reflect.Type) bool { + t = reflect.PtrTo(t) + return t.Implements(reflect.TypeOf((*easyjson.Marshaler)(nil)).Elem()) || + t.Implements(reflect.TypeOf((*json.Marshaler)(nil)).Elem()) || + t.Implements(reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()) +} + // genTypeEncoderNoCheck generates code that encodes in of type t into the writer. func (g *Generator) genTypeEncoderNoCheck(t reflect.Type, in string, tags fieldTags, indent int, assumeNonEmpty bool) error { ws := strings.Repeat(" ", indent) @@ -197,9 +207,9 @@ func (g *Generator) genTypeEncoderNoCheck(t reflect.Type, in string, tags fieldT case reflect.Map: key := t.Key() keyEnc, ok := primitiveStringEncoders[key.Kind()] - if !ok { - return fmt.Errorf("map key type %v not supported: only string and integer keys are allowed", key) - } + if !ok && !hasCustomMarshaler(key) { + return fmt.Errorf("map key type %v not supported: only string and integer keys and types implementing Marshaler interfaces are allowed", key) + } // else assume the caller knows what they are doing and that the custom marshaler performs the translation from the key type to a string or integer tmpVar := g.uniqueVarName() if !assumeNonEmpty { @@ -213,7 +223,14 @@ func (g *Generator) genTypeEncoderNoCheck(t reflect.Type, in string, tags fieldT fmt.Fprintln(g.out, ws+" "+tmpVar+"First := true") fmt.Fprintln(g.out, ws+" for "+tmpVar+"Name, "+tmpVar+"Value := range "+in+" {") fmt.Fprintln(g.out, ws+" if "+tmpVar+"First { "+tmpVar+"First = false } else { out.RawByte(',') }") - fmt.Fprintln(g.out, ws+" "+fmt.Sprintf(keyEnc, tmpVar+"Name")) + if keyEnc != "" { + fmt.Fprintln(g.out, ws+" "+fmt.Sprintf(keyEnc, tmpVar+"Name")) + } else { + if err := g.genTypeEncoder(key, tmpVar+"Name", tags, indent+2, false); err != nil { + return err + } + } + fmt.Fprintln(g.out, ws+" out.RawByte(':')") if err := g.genTypeEncoder(t.Elem(), tmpVar+"Value", tags, indent+2, false); err != nil { diff --git a/vendor/github.com/mailru/easyjson/gen/generator.go b/vendor/github.com/mailru/easyjson/gen/generator.go index eb0d70ba2..4f1eb0489 100644 --- a/vendor/github.com/mailru/easyjson/gen/generator.go +++ b/vendor/github.com/mailru/easyjson/gen/generator.go @@ -284,7 +284,11 @@ func (g *Generator) getType(t reflect.Type) string { lines := make([]string, 0, nf) for i := 0; i < nf; i++ { f := t.Field(i) - line := f.Name + " " + g.getType(f.Type) + var line string + if !f.Anonymous { + line = f.Name + " " + } // else the field is anonymous (an embedded type) + line += g.getType(f.Type) t := f.Tag if t != "" { line += " " + escapeTag(t) diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go index e5558ae39..18d65cd5a 100644 --- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go +++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -997,6 +997,22 @@ func (r *Lexer) Float32() float32 { return float32(n) } +func (r *Lexer) Float32Str() float32 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + n, err := strconv.ParseFloat(s, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return float32(n) +} + func (r *Lexer) Float64() float64 { s := r.number() if !r.Ok() { @@ -1014,6 +1030,22 @@ func (r *Lexer) Float64() float64 { return n } +func (r *Lexer) Float64Str() float64 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + n, err := strconv.ParseFloat(s, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return n +} + func (r *Lexer) Error() error { return r.fatalError } diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go index e5a5ddfdb..b9ed7ccaa 100644 --- a/vendor/github.com/mailru/easyjson/jwriter/writer.go +++ b/vendor/github.com/mailru/easyjson/jwriter/writer.go @@ -240,11 +240,25 @@ func (w *Writer) Float32(n float32) { w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) } +func (w *Writer) Float32Str(n float32) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + func (w *Writer) Float64(n float64) { w.Buffer.EnsureSpace(20) w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64) } +func (w *Writer) Float64Str(n float64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 64) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + func (w *Writer) Bool(v bool) { w.Buffer.EnsureSpace(5) if v { @@ -340,12 +354,11 @@ func (w *Writer) base64(in []byte) { return } - w.Buffer.EnsureSpace(((len(in) - 1) / 3 + 1) * 4) + w.Buffer.EnsureSpace(((len(in)-1)/3 + 1) * 4) si := 0 n := (len(in) / 3) * 3 - for si < n { // Convert 3x 8bit source bytes into 4 bytes val := uint(in[si+0])<<16 | uint(in[si+1])<<8 | uint(in[si+2]) diff --git a/vendor/github.com/mailru/easyjson/tests/basic_test.go b/vendor/github.com/mailru/easyjson/tests/basic_test.go index 018678402..64649c47c 100644 --- a/vendor/github.com/mailru/easyjson/tests/basic_test.go +++ b/vendor/github.com/mailru/easyjson/tests/basic_test.go @@ -38,6 +38,8 @@ var testCases = []struct { {&IntsValue, IntsString}, {&mapStringStringValue, mapStringStringString}, {&namedTypeValue, namedTypeValueString}, + {&customMapKeyTypeValue, customMapKeyTypeValueString}, + {&embeddedTypeValue, embeddedTypeValueString}, {&mapMyIntStringValue, mapMyIntStringValueString}, {&mapIntStringValue, mapIntStringValueString}, {&mapInt32StringValue, mapInt32StringValueString}, diff --git a/vendor/github.com/mailru/easyjson/tests/custom_map_key_type.go b/vendor/github.com/mailru/easyjson/tests/custom_map_key_type.go new file mode 100644 index 000000000..099bd06dc --- /dev/null +++ b/vendor/github.com/mailru/easyjson/tests/custom_map_key_type.go @@ -0,0 +1,29 @@ +package tests + +import fmt "fmt" + +//easyjson:json +type CustomMapKeyType struct { + Map map[customKeyType]int +} + +type customKeyType [2]byte + +func (k customKeyType) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%02x"`, k)), nil +} + +func (k *customKeyType) UnmarshalJSON(b []byte) error { + _, err := fmt.Sscanf(string(b), `"%02x%02x"`, &k[0], &k[1]) + return err +} + +var customMapKeyTypeValue CustomMapKeyType + +func init() { + customMapKeyTypeValue.Map = map[customKeyType]int{ + customKeyType{0x01, 0x02}: 3, + } +} + +var customMapKeyTypeValueString = `{"Map":{"0102":3}}` diff --git a/vendor/github.com/mailru/easyjson/tests/data.go b/vendor/github.com/mailru/easyjson/tests/data.go index 145f093d6..f6d6653c0 100644 --- a/vendor/github.com/mailru/easyjson/tests/data.go +++ b/vendor/github.com/mailru/easyjson/tests/data.go @@ -41,6 +41,9 @@ type PrimitiveTypes struct { Float32 float32 Float64 float64 + Float32String float32 `json:",string"` + Float64String float64 `json:",string"` + Ptr *string PtrNil *string } @@ -77,6 +80,9 @@ var primitiveTypesValue = PrimitiveTypes{ Float32: 1.5, Float64: math.MaxFloat64, + Float32String: 1.5, + Float64String: math.MaxFloat64, + Ptr: &str, } @@ -110,6 +116,9 @@ var primitiveTypesString = "{" + `"Float32":` + fmt.Sprint(1.5) + `,` + `"Float64":` + fmt.Sprint(math.MaxFloat64) + `,` + + `"Float32String":"` + fmt.Sprint(1.5) + `",` + + `"Float64String":"` + fmt.Sprint(math.MaxFloat64) + `",` + + `"Ptr":"bla",` + `"PtrNil":null` + diff --git a/vendor/github.com/mailru/easyjson/tests/embedded_type.go b/vendor/github.com/mailru/easyjson/tests/embedded_type.go new file mode 100644 index 000000000..66470b6ef --- /dev/null +++ b/vendor/github.com/mailru/easyjson/tests/embedded_type.go @@ -0,0 +1,24 @@ +package tests + +//easyjson:json +type EmbeddedType struct { + EmbeddedInnerType + Inner struct { + EmbeddedInnerType + } + Field2 int +} + +type EmbeddedInnerType struct { + Field1 int +} + +var embeddedTypeValue EmbeddedType + +func init() { + embeddedTypeValue.Field1 = 1 + embeddedTypeValue.Field2 = 2 + embeddedTypeValue.Inner.Field1 = 3 +} + +var embeddedTypeValueString = `{"Inner":{"Field1":3},"Field2":2,"Field1":1}` diff --git a/vendor/github.com/mattes/migrate/.gitignore b/vendor/github.com/mattes/migrate/.gitignore deleted file mode 100644 index 938901207..000000000 --- a/vendor/github.com/mattes/migrate/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -.DS_Store -cli/build -cli/cli -cli/migrate -.coverage -.godoc.pid diff --git a/vendor/github.com/mattes/migrate/.travis.yml b/vendor/github.com/mattes/migrate/.travis.yml deleted file mode 100644 index 6581f6826..000000000 --- a/vendor/github.com/mattes/migrate/.travis.yml +++ /dev/null @@ -1,61 +0,0 @@ -language: go -sudo: required - -go: - - 1.7 - - 1.8 - -env: - - MIGRATE_TEST_CONTAINER_BOOT_DELAY=10 - -# TODO: https://docs.docker.com/engine/installation/linux/ubuntu/ -# pre-provision with travis docker setup and pin down docker version in install step -services: - - docker - -install: - - make deps - - (cd $GOPATH/src/github.com/docker/docker && git fetch --all --tags --prune && git checkout v1.13.0) - - sudo apt-get update && sudo apt-get install docker-engine=1.13.0* - - go get github.com/mattn/goveralls - -script: - - make test - -after_success: - - goveralls -service=travis-ci -coverprofile .coverage/combined.txt - - make list-external-deps > dependency_tree.txt && cat dependency_tree.txt - -before_deploy: - - make build-cli - - gem install --no-ri --no-rdoc fpm - - fpm -s dir -t deb -n migrate -v "$(git describe --tags 2>/dev/null | cut -c 2-)" --license MIT -m matthias.kadenbach@gmail.com --url https://github.com/mattes/migrate --description='Database migrations' -a amd64 -p migrate.$(git describe --tags 2>/dev/null | cut -c 2-).deb --deb-no-default-config-files -f -C cli/build migrate.linux-amd64=/usr/bin/migrate - -deploy: - - provider: releases - api_key: - secure: EFow50BI448HVb/uQ1Kk2Kq0xzmwIYq3V67YyymXIuqSCodvXEsMiBPUoLrxEknpPEIc67LEQTNdfHBgvyHk6oRINWAfie+7pr5tKrpOTF9ghyxoN1PlO8WKQCqwCvGMBCnc5ur5rvzp0bqfpV2rs5q9/nngy3kBuEvs12V7iho= - skip_cleanup: true - on: - go: 1.8 - repo: mattes/migrate - tags: true - file: - - cli/build/migrate.linux-amd64.tar.gz - - cli/build/migrate.darwin-amd64.tar.gz - - cli/build/migrate.windows-amd64.exe.tar.gz - - cli/build/sha256sum.txt - - dependency_tree.txt - - provider: packagecloud - repository: migrate - username: mattes - token: - secure: RiHJ/+J9DvXUah/APYdWySWZ5uOOISYJ0wS7xddc7/BNStRVjzFzvJ9zmb67RkyZZrvGuVjPiL4T8mtDyCJCj47RmU/56wPdEHbar/FjsiUCgwvR19RlulkgbV4okBCePbwzMw6HNHRp14TzfQCPtnN4kef0lOI4gZJkImN7rtQ= - dist: ubuntu/xenial - package_glob: '*.deb' - skip_cleanup: true - on: - go: 1.8 - repo: mattes/migrate - tags: true - diff --git a/vendor/github.com/mattes/migrate/CONTRIBUTING.md b/vendor/github.com/mattes/migrate/CONTRIBUTING.md deleted file mode 100644 index fcf82a42e..000000000 --- a/vendor/github.com/mattes/migrate/CONTRIBUTING.md +++ /dev/null @@ -1,22 +0,0 @@ -# Development, Testing and Contributing - - 1. Make sure you have a running Docker daemon - (Install for [MacOS](https://docs.docker.com/docker-for-mac/)) - 2. Fork this repo and `git clone` somewhere to `$GOPATH/src/github.com/%you%/migrate` - 3. `make rewrite-import-paths` to update imports to your local fork - 4. Confirm tests are working: `make test-short` - 5. Write awesome code ... - 6. `make test` to run all tests against all database versions - 7. `make restore-import-paths` to restore import paths - 8. Push code and open Pull Request - -Some more helpful commands: - - * You can specify which database/ source tests to run: - `make test-short SOURCE='file go-bindata' DATABASE='postgres cassandra'` - * After `make test`, run `make html-coverage` which opens a shiny test coverage overview. - * Missing imports? `make deps` - * `make build-cli` builds the CLI in directory `cli/build/`. - * `make list-external-deps` lists all external dependencies for each package - * `make docs && make open-docs` opens godoc in your browser, `make kill-docs` kills the godoc server. - Repeatedly call `make docs` to refresh the server. diff --git a/vendor/github.com/mattes/migrate/FAQ.md b/vendor/github.com/mattes/migrate/FAQ.md deleted file mode 100644 index e5a52bd1c..000000000 --- a/vendor/github.com/mattes/migrate/FAQ.md +++ /dev/null @@ -1,67 +0,0 @@ -# FAQ - -#### How is the code base structured? - ``` - / package migrate (the heart of everything) - /cli the CLI wrapper - /database database driver and sub directories have the actual driver implementations - /source source driver and sub directories have the actual driver implementations - ``` - -#### Why is there no `source/driver.go:Last()`? - It's not needed. And unless the source has a "native" way to read a directory in reversed order, - it might be expensive to do a full directory scan in order to get the last element. - -#### What is a NilMigration? NilVersion? - NilMigration defines a migration without a body. NilVersion is defined as const -1. - -#### What is the difference between uint(version) and int(targetVersion)? - version refers to an existing migration version coming from a source and therefor can never be negative. - targetVersion can either be a version OR represent a NilVersion, which equals -1. - -#### What's the difference between Next/Previous and Up/Down? - ``` - 1_first_migration.up next -> 2_second_migration.up ... - 1_first_migration.down <- previous 2_second_migration.down ... - ``` - -#### Why two separate files (up and down) for a migration? - It makes all of our lives easier. No new markup/syntax to learn for users - and existing database utility tools continue to work as expected. - -#### How many migrations can migrate handle? - Whatever the maximum positive signed integer value is for your platform. - For 32bit it would be 2,147,483,647 migrations. Migrate only keeps references to - the currently run and pre-fetched migrations in memory. Please note that some - source drivers need to do build a full "directory" tree first, which puts some - heat on the memory consumption. - -#### Are the table tests in migrate_test.go bloated? - Yes and no. There are duplicate test cases for sure but they don't hurt here. In fact - the tests are very visual now and might help new users understand expected behaviors quickly. - Migrate from version x to y and y is the last migration? Just check out the test for - that particular case and know what's going on instantly. - -#### What is Docker being used for? - Only for testing. See [testing/docker.go](testing/docker.go) - -#### Why not just use docker-compose? - It doesn't give us enough runtime control for testing. We want to be able to bring up containers fast - and whenever we want, not just once at the beginning of all tests. - -#### Can I maintain my driver in my own repository? - Yes, technically thats possible. We want to encourage you to contribute your driver to this respository though. - The driver's functionality is dictated by migrate's interfaces. That means there should really - just be one driver for a database/ source. We want to prevent a future where several drivers doing the exact same thing, - just implemented a bit differently, co-exist somewhere on Github. If users have to do research first to find the - "best" available driver for a database in order to get started, we would have failed as an open source community. - -#### Can I mix multiple sources during a batch of migrations? - No. - -#### What does "dirty" database mean? - Before a migration runs, each database sets a dirty flag. Execution stops if a migration fails and the dirty state persists, - which prevents attempts to run more migrations on top of a failed migration. You need to manually fix the error - and then "force" the expected version. - - diff --git a/vendor/github.com/mattes/migrate/LICENSE b/vendor/github.com/mattes/migrate/LICENSE deleted file mode 100644 index 62efa3670..000000000 --- a/vendor/github.com/mattes/migrate/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Matthias Kadenbach - -https://github.com/mattes/migrate - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mattes/migrate/MIGRATIONS.md b/vendor/github.com/mattes/migrate/MIGRATIONS.md deleted file mode 100644 index 797fe44c5..000000000 --- a/vendor/github.com/mattes/migrate/MIGRATIONS.md +++ /dev/null @@ -1,5 +0,0 @@ -# Migrations - -## Best practices: How to write migrations. - -@TODO diff --git a/vendor/github.com/mattes/migrate/Makefile b/vendor/github.com/mattes/migrate/Makefile deleted file mode 100644 index 34e737ef6..000000000 --- a/vendor/github.com/mattes/migrate/Makefile +++ /dev/null @@ -1,123 +0,0 @@ -SOURCE ?= file go-bindata github aws-s3 google-cloud-storage -DATABASE ?= postgres mysql redshift -VERSION ?= $(shell git describe --tags 2>/dev/null | cut -c 2-) -TEST_FLAGS ?= -REPO_OWNER ?= $(shell cd .. && basename "$$(pwd)") - - -build-cli: clean - -mkdir ./cli/build - cd ./cli && GOOS=linux GOARCH=amd64 go build -a -o build/migrate.linux-amd64 -ldflags='-X main.Version=$(VERSION)' -tags '$(DATABASE) $(SOURCE)' . - cd ./cli && GOOS=darwin GOARCH=amd64 go build -a -o build/migrate.darwin-amd64 -ldflags='-X main.Version=$(VERSION)' -tags '$(DATABASE) $(SOURCE)' . - cd ./cli && GOOS=windows GOARCH=amd64 go build -a -o build/migrate.windows-amd64.exe -ldflags='-X main.Version=$(VERSION)' -tags '$(DATABASE) $(SOURCE)' . - cd ./cli/build && find . -name 'migrate*' | xargs -I{} tar czf {}.tar.gz {} - cd ./cli/build && shasum -a 256 * > sha256sum.txt - cat ./cli/build/sha256sum.txt - - -clean: - -rm -r ./cli/build - - -test-short: - make test-with-flags --ignore-errors TEST_FLAGS='-short' - - -test: - @-rm -r .coverage - @mkdir .coverage - make test-with-flags TEST_FLAGS='-v -race -covermode atomic -coverprofile .coverage/_$$(RAND).txt -bench=. -benchmem' - @echo 'mode: atomic' > .coverage/combined.txt - @cat .coverage/*.txt | grep -v 'mode: atomic' >> .coverage/combined.txt - - -test-with-flags: - @echo SOURCE: $(SOURCE) - @echo DATABASE: $(DATABASE) - - @go test $(TEST_FLAGS) . - @go test $(TEST_FLAGS) ./cli/... - @go test $(TEST_FLAGS) ./testing/... - - @echo -n '$(SOURCE)' | tr -s ' ' '\n' | xargs -I{} go test $(TEST_FLAGS) ./source/{} - @go test $(TEST_FLAGS) ./source/testing/... - @go test $(TEST_FLAGS) ./source/stub/... - - @echo -n '$(DATABASE)' | tr -s ' ' '\n' | xargs -I{} go test $(TEST_FLAGS) ./database/{} - @go test $(TEST_FLAGS) ./database/testing/... - @go test $(TEST_FLAGS) ./database/stub/... - - -kill-orphaned-docker-containers: - docker rm -f $(shell docker ps -aq --filter label=migrate_test) - - -html-coverage: - go tool cover -html=.coverage/combined.txt - - -deps: - -go get -v -u ./... - -go test -v -i ./... - # TODO: why is this not being fetched with the command above? - -go get -u github.com/fsouza/fake-gcs-server/fakestorage - - -list-external-deps: - $(call external_deps,'.') - $(call external_deps,'./cli/...') - $(call external_deps,'./testing/...') - - $(foreach v, $(SOURCE), $(call external_deps,'./source/$(v)/...')) - $(call external_deps,'./source/testing/...') - $(call external_deps,'./source/stub/...') - - $(foreach v, $(DATABASE), $(call external_deps,'./database/$(v)/...')) - $(call external_deps,'./database/testing/...') - $(call external_deps,'./database/stub/...') - - -restore-import-paths: - find . -name '*.go' -type f -execdir sed -i '' s%\"github.com/$(REPO_OWNER)/migrate%\"github.com/mattes/migrate%g '{}' \; - - -rewrite-import-paths: - find . -name '*.go' -type f -execdir sed -i '' s%\"github.com/mattes/migrate%\"github.com/$(REPO_OWNER)/migrate%g '{}' \; - - -# example: fswatch -0 --exclude .godoc.pid --event Updated . | xargs -0 -n1 -I{} make docs -docs: - -make kill-docs - nohup godoc -play -http=127.0.0.1:6064 /dev/null 2>&1 & echo $$! > .godoc.pid - cat .godoc.pid - - -kill-docs: - @cat .godoc.pid - kill -9 $$(cat .godoc.pid) - rm .godoc.pid - - -open-docs: - open http://localhost:6064/pkg/github.com/$(REPO_OWNER)/migrate - - -# example: make release V=0.0.0 -release: - git tag v$(V) - @read -p "Press enter to confirm and push to origin ..." && git push origin v$(V) - - -define external_deps - @echo '-- $(1)'; go list -f '{{join .Deps "\n"}}' $(1) | grep -v github.com/$(REPO_OWNER)/migrate | xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}' - -endef - - -.PHONY: build-cli clean test-short test test-with-flags deps html-coverage \ - restore-import-paths rewrite-import-paths list-external-deps release \ - docs kill-docs open-docs kill-orphaned-docker-containers - -SHELL = /bin/bash -RAND = $(shell echo $$RANDOM) - diff --git a/vendor/github.com/mattes/migrate/README.md b/vendor/github.com/mattes/migrate/README.md deleted file mode 100644 index 6475adb5d..000000000 --- a/vendor/github.com/mattes/migrate/README.md +++ /dev/null @@ -1,143 +0,0 @@ -[![Build Status](https://travis-ci.org/mattes/migrate.svg?branch=master)](https://travis-ci.org/mattes/migrate) -[![GoDoc](https://godoc.org/github.com/mattes/migrate?status.svg)](https://godoc.org/github.com/mattes/migrate) -[![Coverage Status](https://coveralls.io/repos/github/mattes/migrate/badge.svg?branch=v3.0-prev)](https://coveralls.io/github/mattes/migrate?branch=v3.0-prev) -[![packagecloud.io](https://img.shields.io/badge/deb-packagecloud.io-844fec.svg)](https://packagecloud.io/mattes/migrate?filter=debs) - -# migrate - -__Database migrations written in Go. Use as [CLI](#cli-usage) or import as [library](#use-in-your-go-project).__ - - * Migrate reads migrations from [sources](#migration-sources) - and applies them in correct order to a [database](#databases). - * Drivers are "dumb", migrate glues everything together and makes sure the logic is bulletproof. - (Keeps the drivers lightweight, too.) - * Database drivers don't assume things or try to correct user input. When in doubt, fail. - - -Looking for [v1](https://github.com/mattes/migrate/tree/v1)? - - -## Databases - -Database drivers run migrations. [Add a new database?](database/driver.go) - - * [PostgreSQL](database/postgres) - * [Redshift](database/redshift) - * [Ql](database/ql) - * [Cassandra](database/cassandra) ([todo #164](https://github.com/mattes/migrate/issues/164)) - * [SQLite](database/sqlite) ([todo #165](https://github.com/mattes/migrate/issues/165)) - * [MySQL/ MariaDB](database/mysql) - * [Neo4j](database/neo4j) ([todo #167](https://github.com/mattes/migrate/issues/167)) - * [MongoDB](database/mongodb) ([todo #169](https://github.com/mattes/migrate/issues/169)) - * [CrateDB](database/crate) ([todo #170](https://github.com/mattes/migrate/issues/170)) - * [Shell](database/shell) ([todo #171](https://github.com/mattes/migrate/issues/171)) - * [Google Cloud Spanner](database/spanner) ([todo #172](https://github.com/mattes/migrate/issues/172)) - - - -## Migration Sources - -Source drivers read migrations from local or remote sources. [Add a new source?](source/driver.go) - - * [Filesystem](source/file) - read from fileystem (always included) - * [Go-Bindata](source/go-bindata) - read from embedded binary data ([jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata)) - * [Github](source/github) - read from remote Github repositories - * [AWS S3](source/aws-s3) - read from Amazon Web Services S3 - * [Google Cloud Storage](source/google-cloud-storage) - read from Google Cloud Platform Storage - - - -## CLI usage - - * Simple wrapper around this library. - * Handles ctrl+c (SIGINT) gracefully. - * No config search paths, no config files, no magic ENV var injections. - -__[CLI Documentation](cli)__ - -([brew todo #156](https://github.com/mattes/migrate/issues/156)) - -``` -$ brew install migrate --with-postgres -$ migrate -database postgres://localhost:5432/database up 2 -``` - - -## Use in your Go project - - * API is stable and frozen for this release (v3.x). - * Package migrate has no external dependencies. - * Only import the drivers you need. - (check [dependency_tree.txt](https://github.com/mattes/migrate/releases) for each driver) - * To help prevent database corruptions, it supports graceful stops via `GracefulStop chan bool`. - * Bring your own logger. - * Uses `io.Reader` streams internally for low memory overhead. - * Thread-safe and no goroutine leaks. - -__[Go Documentation](https://godoc.org/github.com/mattes/migrate)__ - -```go -import ( - "github.com/mattes/migrate" - _ "github.com/mattes/migrate/database/postgres" - _ "github.com/mattes/migrate/source/github" -) - -func main() { - m, err := migrate.New( - "github://mattes:personal-access-token@mattes/migrate_test", - "postgres://localhost:5432/database?sslmode=enable") - m.Steps(2) -} -``` - -Want to use an existing database client? - -```go -import ( - "database/sql" - _ "github.com/lib/pq" - "github.com/mattes/migrate" - "github.com/mattes/migrate/database/postgres" - _ "github.com/mattes/migrate/source/file" -) - -func main() { - db, err := sql.Open("postgres", "postgres://localhost:5432/database?sslmode=enable") - driver, err := postgres.WithInstance(db, &postgres.Config{}) - m, err := migrate.NewWithDatabaseInstance( - "file:///migrations", - "postgres", driver) - m.Steps(2) -} -``` - -## Migration files - -Each migration has an up and down migration. [Why?](FAQ.md#why-two-separate-files-up-and-down-for-a-migration) - -``` -1481574547_create_users_table.up.sql -1481574547_create_users_table.down.sql -``` - -[Best practices: How to write migrations.](MIGRATIONS.md) - - - -## Development and Contributing - -Yes, please! [`Makefile`](Makefile) is your friend, -read the [development guide](CONTRIBUTING.md). - -Also have a look at the [FAQ](FAQ.md). - - - ---- - -__Alternatives__ - -https://bitbucket.org/liamstask/goose, https://github.com/tanel/dbmigrate, -https://github.com/BurntSushi/migration, https://github.com/DavidHuie/gomigrate, -https://github.com/rubenv/sql-migrate diff --git a/vendor/github.com/mattes/migrate/cli/README.md b/vendor/github.com/mattes/migrate/cli/README.md deleted file mode 100644 index b8f685cd1..000000000 --- a/vendor/github.com/mattes/migrate/cli/README.md +++ /dev/null @@ -1,105 +0,0 @@ -# migrate CLI - -## Installation - -#### With Go toolchain - -``` -$ go get -u -d github.com/mattes/migrate/cli -$ go build -tags 'postgres' -o /usr/local/bin/migrate github.com/mattes/migrate/cli -``` - -#### MacOS - -([todo #156](https://github.com/mattes/migrate/issues/156)) - -``` -$ brew install migrate --with-postgres -``` - -#### Linux (*.deb package) - -``` -$ curl -L https://packagecloud.io/mattes/migrate/gpgkey | apt-key add - -$ echo "deb https://packagecloud.io/mattes/migrate/ubuntu/ xenial main" > /etc/apt/sources.list.d/migrate.list -$ apt-get update -$ apt-get install -y migrate -``` - -#### Download pre-build binary (Windows, MacOS, or Linux) - -[Release Downloads](https://github.com/mattes/migrate/releases) - -``` -$ curl -L https://github.com/mattes/migrate/releases/download/$version/migrate.$platform-amd64.tar.gz | tar xvz -``` - - - -## Usage - -``` -$ migrate -help -Usage: migrate OPTIONS COMMAND [arg...] - migrate [ -version | -help ] - -Options: - -source Location of the migrations (driver://url) - -path Shorthand for -source=file://path - -database Run migrations against this database (driver://url) - -prefetch N Number of migrations to load in advance before executing (default 10) - -lock-timeout N Allow N seconds to acquire database lock (default 15) - -verbose Print verbose logging - -version Print version - -help Print usage - -Commands: - goto V Migrate to version V - up [N] Apply all or N up migrations - down [N] Apply all or N down migrations - drop Drop everyting inside database - force V Set version V but don't run migration (ignores dirty state) - version Print current migration version -``` - - -So let's say you want to run the first two migrations - -``` -$ migrate -database postgres://localhost:5432/database up 2 -``` - -If your migrations are hosted on github - -``` -$ migrate -source github://mattes:personal-access-token@mattes/migrate_test \ - -database postgres://localhost:5432/database down 2 -``` - -The CLI will gracefully stop at a safe point when SIGINT (ctrl+c) is received. -Send SIGKILL for immediate halt. - - - -## Reading CLI arguments from somewhere else - -##### ENV variables - -``` -$ migrate -database "$MY_MIGRATE_DATABASE" -``` - -##### JSON files - -Check out https://stedolan.github.io/jq/ - -``` -$ migrate -database "$(cat config.json | jq '.database')" -``` - -##### YAML files - -```` -$ migrate -database "$(cat config/database.yml | ruby -ryaml -e "print YAML.load(STDIN.read)['database']")" -$ migrate -database "$(cat config/database.yml | python -c 'import yaml,sys;print yaml.safe_load(sys.stdin)["database"]')" -``` diff --git a/vendor/github.com/mattes/migrate/cli/build_aws-s3.go b/vendor/github.com/mattes/migrate/cli/build_aws-s3.go deleted file mode 100644 index 766fd5663..000000000 --- a/vendor/github.com/mattes/migrate/cli/build_aws-s3.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build aws-s3 - -package main - -import ( - _ "github.com/mattes/migrate/source/aws-s3" -) diff --git a/vendor/github.com/mattes/migrate/cli/build_github.go b/vendor/github.com/mattes/migrate/cli/build_github.go deleted file mode 100644 index 9c813b46c..000000000 --- a/vendor/github.com/mattes/migrate/cli/build_github.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build github - -package main - -import ( - _ "github.com/mattes/migrate/source/github" -) diff --git a/vendor/github.com/mattes/migrate/cli/build_go-bindata.go b/vendor/github.com/mattes/migrate/cli/build_go-bindata.go deleted file mode 100644 index 8a6a89349..000000000 --- a/vendor/github.com/mattes/migrate/cli/build_go-bindata.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build go-bindata - -package main - -import ( - _ "github.com/mattes/migrate/source/go-bindata" -) diff --git a/vendor/github.com/mattes/migrate/cli/build_google-cloud-storage.go b/vendor/github.com/mattes/migrate/cli/build_google-cloud-storage.go deleted file mode 100644 index 04f314338..000000000 --- a/vendor/github.com/mattes/migrate/cli/build_google-cloud-storage.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build google-cloud-storage - -package main - -import ( - _ "github.com/mattes/migrate/source/google-cloud-storage" -) diff --git a/vendor/github.com/mattes/migrate/cli/build_mysql.go b/vendor/github.com/mattes/migrate/cli/build_mysql.go deleted file mode 100644 index 177766f5e..000000000 --- a/vendor/github.com/mattes/migrate/cli/build_mysql.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build mysql - -package main - -import ( - _ "github.com/mattes/migrate/database/mysql" -) diff --git a/vendor/github.com/mattes/migrate/cli/build_postgres.go b/vendor/github.com/mattes/migrate/cli/build_postgres.go deleted file mode 100644 index 87f6be757..000000000 --- a/vendor/github.com/mattes/migrate/cli/build_postgres.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build postgres - -package main - -import ( - _ "github.com/mattes/migrate/database/postgres" -) diff --git a/vendor/github.com/mattes/migrate/cli/build_ql.go b/vendor/github.com/mattes/migrate/cli/build_ql.go deleted file mode 100644 index cd56ef958..000000000 --- a/vendor/github.com/mattes/migrate/cli/build_ql.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build ql - -package main - -import ( - _ "github.com/mattes/migrate/database/ql" -) diff --git a/vendor/github.com/mattes/migrate/cli/build_redshift.go b/vendor/github.com/mattes/migrate/cli/build_redshift.go deleted file mode 100644 index 8153d0aa3..000000000 --- a/vendor/github.com/mattes/migrate/cli/build_redshift.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build redshift - -package main - -import ( - _ "github.com/mattes/migrate/database/redshift" -) diff --git a/vendor/github.com/mattes/migrate/cli/commands.go b/vendor/github.com/mattes/migrate/cli/commands.go deleted file mode 100644 index e26462164..000000000 --- a/vendor/github.com/mattes/migrate/cli/commands.go +++ /dev/null @@ -1,81 +0,0 @@ -package main - -import ( - "github.com/mattes/migrate" - _ "github.com/mattes/migrate/database/stub" // TODO remove again - _ "github.com/mattes/migrate/source/file" -) - -func gotoCmd(m *migrate.Migrate, v uint) { - if err := m.Migrate(v); err != nil { - if err != migrate.ErrNoChange { - log.fatalErr(err) - } else { - log.Println(err) - } - } -} - -func upCmd(m *migrate.Migrate, limit int) { - if limit >= 0 { - if err := m.Steps(limit); err != nil { - if err != migrate.ErrNoChange { - log.fatalErr(err) - } else { - log.Println(err) - } - } - } else { - if err := m.Up(); err != nil { - if err != migrate.ErrNoChange { - log.fatalErr(err) - } else { - log.Println(err) - } - } - } -} - -func downCmd(m *migrate.Migrate, limit int) { - if limit >= 0 { - if err := m.Steps(-limit); err != nil { - if err != migrate.ErrNoChange { - log.fatalErr(err) - } else { - log.Println(err) - } - } - } else { - if err := m.Down(); err != nil { - if err != migrate.ErrNoChange { - log.fatalErr(err) - } else { - log.Println(err) - } - } - } -} - -func dropCmd(m *migrate.Migrate) { - if err := m.Drop(); err != nil { - log.fatalErr(err) - } -} - -func forceCmd(m *migrate.Migrate, v int) { - if err := m.Force(v); err != nil { - log.fatalErr(err) - } -} - -func versionCmd(m *migrate.Migrate) { - v, dirty, err := m.Version() - if err != nil { - log.fatalErr(err) - } - if dirty { - log.Printf("%v (dirty)\n", v) - } else { - log.Println(v) - } -} diff --git a/vendor/github.com/mattes/migrate/cli/examples/Dockerfile b/vendor/github.com/mattes/migrate/cli/examples/Dockerfile deleted file mode 100644 index 740f951f8..000000000 --- a/vendor/github.com/mattes/migrate/cli/examples/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM ubuntu:xenial - -RUN apt-get update && \ - apt-get install -y curl apt-transport-https - -RUN curl -L https://packagecloud.io/mattes/migrate/gpgkey | apt-key add - && \ - echo "deb https://packagecloud.io/mattes/migrate/ubuntu/ xenial main" > /etc/apt/sources.list.d/migrate.list && \ - apt-get update && \ - apt-get install -y migrate - -RUN migrate -version - diff --git a/vendor/github.com/mattes/migrate/cli/log.go b/vendor/github.com/mattes/migrate/cli/log.go deleted file mode 100644 index a119d3481..000000000 --- a/vendor/github.com/mattes/migrate/cli/log.go +++ /dev/null @@ -1,45 +0,0 @@ -package main - -import ( - "fmt" - logpkg "log" - "os" -) - -type Log struct { - verbose bool -} - -func (l *Log) Printf(format string, v ...interface{}) { - if l.verbose { - logpkg.Printf(format, v...) - } else { - fmt.Fprintf(os.Stderr, format, v...) - } -} - -func (l *Log) Println(args ...interface{}) { - if l.verbose { - logpkg.Println(args...) - } else { - fmt.Fprintln(os.Stderr, args...) - } -} - -func (l *Log) Verbose() bool { - return l.verbose -} - -func (l *Log) fatalf(format string, v ...interface{}) { - l.Printf(format, v...) - os.Exit(1) -} - -func (l *Log) fatal(args ...interface{}) { - l.Println(args...) - os.Exit(1) -} - -func (l *Log) fatalErr(err error) { - l.fatal("error:", err) -} diff --git a/vendor/github.com/mattes/migrate/cli/main.go b/vendor/github.com/mattes/migrate/cli/main.go deleted file mode 100644 index 58b17c155..000000000 --- a/vendor/github.com/mattes/migrate/cli/main.go +++ /dev/null @@ -1,210 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "os" - "os/signal" - "strconv" - "syscall" - "time" - - "github.com/mattes/migrate" -) - -// set main log -var log = &Log{} - -func main() { - helpPtr := flag.Bool("help", false, "") - versionPtr := flag.Bool("version", false, "") - verbosePtr := flag.Bool("verbose", false, "") - prefetchPtr := flag.Uint("prefetch", 10, "") - lockTimeoutPtr := flag.Uint("lock-timeout", 15, "") - pathPtr := flag.String("path", "", "") - databasePtr := flag.String("database", "", "") - sourcePtr := flag.String("source", "", "") - - flag.Usage = func() { - fmt.Fprint(os.Stderr, - `Usage: migrate OPTIONS COMMAND [arg...] - migrate [ -version | -help ] - -Options: - -source Location of the migrations (driver://url) - -path Shorthand for -source=file://path - -database Run migrations against this database (driver://url) - -prefetch N Number of migrations to load in advance before executing (default 10) - -lock-timeout N Allow N seconds to acquire database lock (default 15) - -verbose Print verbose logging - -version Print version - -help Print usage - -Commands: - goto V Migrate to version V - up [N] Apply all or N up migrations - down [N] Apply all or N down migrations - drop Drop everyting inside database - force V Set version V but don't run migration (ignores dirty state) - version Print current migration version -`) - } - - flag.Parse() - - // initialize logger - log.verbose = *verbosePtr - - // show cli version - if *versionPtr { - fmt.Fprintln(os.Stderr, Version) - os.Exit(0) - } - - // show help - if *helpPtr { - flag.Usage() - os.Exit(0) - } - - // translate -path into -source if given - if *sourcePtr == "" && *pathPtr != "" { - *sourcePtr = fmt.Sprintf("file://%v", *pathPtr) - } - - // initialize migrate - // don't catch migraterErr here and let each command decide - // how it wants to handle the error - migrater, migraterErr := migrate.New(*sourcePtr, *databasePtr) - defer func() { - if migraterErr == nil { - migrater.Close() - } - }() - if migraterErr == nil { - migrater.Log = log - migrater.PrefetchMigrations = *prefetchPtr - migrater.LockTimeout = time.Duration(int64(*lockTimeoutPtr)) * time.Second - - // handle Ctrl+c - signals := make(chan os.Signal, 1) - signal.Notify(signals, syscall.SIGINT) - go func() { - for range signals { - log.Println("Stopping after this running migration ...") - migrater.GracefulStop <- true - return - } - }() - } - - startTime := time.Now() - - switch flag.Arg(0) { - case "goto": - if migraterErr != nil { - log.fatalErr(migraterErr) - } - - if flag.Arg(1) == "" { - log.fatal("error: please specify version argument V") - } - - v, err := strconv.ParseUint(flag.Arg(1), 10, 64) - if err != nil { - log.fatal("error: can't read version argument V") - } - - gotoCmd(migrater, uint(v)) - - if log.verbose { - log.Println("Finished after", time.Now().Sub(startTime)) - } - - case "up": - if migraterErr != nil { - log.fatalErr(migraterErr) - } - - limit := -1 - if flag.Arg(1) != "" { - n, err := strconv.ParseUint(flag.Arg(1), 10, 64) - if err != nil { - log.fatal("error: can't read limit argument N") - } - limit = int(n) - } - - upCmd(migrater, limit) - - if log.verbose { - log.Println("Finished after", time.Now().Sub(startTime)) - } - - case "down": - if migraterErr != nil { - log.fatalErr(migraterErr) - } - - limit := -1 - if flag.Arg(1) != "" { - n, err := strconv.ParseUint(flag.Arg(1), 10, 64) - if err != nil { - log.fatal("error: can't read limit argument N") - } - limit = int(n) - } - - downCmd(migrater, limit) - - if log.verbose { - log.Println("Finished after", time.Now().Sub(startTime)) - } - - case "drop": - if migraterErr != nil { - log.fatalErr(migraterErr) - } - - dropCmd(migrater) - - if log.verbose { - log.Println("Finished after", time.Now().Sub(startTime)) - } - - case "force": - if migraterErr != nil { - log.fatalErr(migraterErr) - } - - if flag.Arg(1) == "" { - log.fatal("error: please specify version argument V") - } - - v, err := strconv.ParseInt(flag.Arg(1), 10, 64) - if err != nil { - log.fatal("error: can't read version argument V") - } - - if v < -1 { - log.fatal("error: argument V must be >= -1") - } - - forceCmd(migrater, int(v)) - - if log.verbose { - log.Println("Finished after", time.Now().Sub(startTime)) - } - - case "version": - if migraterErr != nil { - log.fatalErr(migraterErr) - } - - versionCmd(migrater) - - default: - flag.Usage() - os.Exit(0) - } -} diff --git a/vendor/github.com/mattes/migrate/cli/version.go b/vendor/github.com/mattes/migrate/cli/version.go deleted file mode 100644 index 6c3ec49fe..000000000 --- a/vendor/github.com/mattes/migrate/cli/version.go +++ /dev/null @@ -1,4 +0,0 @@ -package main - -// Version is set in Makefile with build flags -var Version = "dev" diff --git a/vendor/github.com/mattes/migrate/database/cassandra/README.md b/vendor/github.com/mattes/migrate/database/cassandra/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/mattes/migrate/database/crate/README.md b/vendor/github.com/mattes/migrate/database/crate/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/mattes/migrate/database/driver.go b/vendor/github.com/mattes/migrate/database/driver.go deleted file mode 100644 index 53d524070..000000000 --- a/vendor/github.com/mattes/migrate/database/driver.go +++ /dev/null @@ -1,110 +0,0 @@ -// Package database provides the Database interface. -// All database drivers must implement this interface, register themselves, -// optionally provide a `WithInstance` function and pass the tests -// in package database/testing. -package database - -import ( - "fmt" - "io" - nurl "net/url" - "sync" -) - -var ( - ErrLocked = fmt.Errorf("can't acquire lock") -) - -const NilVersion int = -1 - -var driversMu sync.RWMutex -var drivers = make(map[string]Driver) - -// Driver is the interface every database driver must implement. -// -// How to implement a database driver? -// 1. Implement this interface. -// 2. Optionally, add a function named `WithInstance`. -// This function should accept an existing DB instance and a Config{} struct -// and return a driver instance. -// 3. Add a test that calls database/testing.go:Test() -// 4. Add own tests for Open(), WithInstance() (when provided) and Close(). -// All other functions are tested by tests in database/testing. -// Saves you some time and makes sure all database drivers behave the same way. -// 5. Call Register in init(). -// -// Guidelines: -// * Don't try to correct user input. Don't assume things. -// When in doubt, return an error and explain the situation to the user. -// * All configuration input must come from the URL string in func Open() -// or the Config{} struct in WithInstance. Don't os.Getenv(). -type Driver interface { - // Open returns a new driver instance configured with parameters - // coming from the URL string. Migrate will call this function - // only once per instance. - Open(url string) (Driver, error) - - // Close closes the underlying database instance managed by the driver. - // Migrate will call this function only once per instance. - Close() error - - // Lock should acquire a database lock so that only one migration process - // can run at a time. Migrate will call this function before Run is called. - // If the implementation can't provide this functionality, return nil. - // Return database.ErrLocked if database is already locked. - Lock() error - - // Unlock should release the lock. Migrate will call this function after - // all migrations have been run. - Unlock() error - - // Run applies a migration to the database. migration is garantueed to be not nil. - Run(migration io.Reader) error - - // SetVersion saves version and dirty state. - // Migrate will call this function before and after each call to Run. - // version must be >= -1. -1 means NilVersion. - SetVersion(version int, dirty bool) error - - // Version returns the currently active version and if the database is dirty. - // When no migration has been applied, it must return version -1. - // Dirty means, a previous migration failed and user interaction is required. - Version() (version int, dirty bool, err error) - - // Drop deletes everyting in the database. - Drop() error -} - -// Open returns a new driver instance. -func Open(url string) (Driver, error) { - u, err := nurl.Parse(url) - if err != nil { - return nil, err - } - - if u.Scheme == "" { - return nil, fmt.Errorf("database driver: invalid URL scheme") - } - - driversMu.RLock() - d, ok := drivers[u.Scheme] - driversMu.RUnlock() - if !ok { - return nil, fmt.Errorf("database driver: unknown driver %v (forgotton import?)", u.Scheme) - } - - return d.Open(url) -} - -// Register globally registers a driver. -func Register(name string, driver Driver) { - driversMu.Lock() - defer driversMu.Unlock() - if driver == nil { - panic("Register driver is nil") - } - if _, dup := drivers[name]; dup { - panic("Register called twice for driver " + name) - } - drivers[name] = driver -} diff --git a/vendor/github.com/mattes/migrate/database/driver_test.go b/vendor/github.com/mattes/migrate/database/driver_test.go deleted file mode 100644 index c0a29304f..000000000 --- a/vendor/github.com/mattes/migrate/database/driver_test.go +++ /dev/null @@ -1,8 +0,0 @@ -package database - -func ExampleDriver() { - // see database/stub for an example - - // database/stub/stub.go has the driver implementation - // database/stub/stub_test.go runs database/testing/test.go:Test -} diff --git a/vendor/github.com/mattes/migrate/database/error.go b/vendor/github.com/mattes/migrate/database/error.go deleted file mode 100644 index eb802c753..000000000 --- a/vendor/github.com/mattes/migrate/database/error.go +++ /dev/null @@ -1,27 +0,0 @@ -package database - -import ( - "fmt" -) - -// Error should be used for errors involving queries ran against the database -type Error struct { - // Optional: the line number - Line uint - - // Query is a query excerpt - Query []byte - - // Err is a useful/helping error message for humans - Err string - - // OrigErr is the underlying error - OrigErr error -} - -func (e Error) Error() string { - if len(e.Err) == 0 { - return fmt.Sprintf("%v in line %v: %s", e.OrigErr, e.Line, e.Query) - } - return fmt.Sprintf("%v in line %v: %s (details: %v)", e.Err, e.Line, e.Query, e.OrigErr) -} diff --git a/vendor/github.com/mattes/migrate/database/mongodb/README.md b/vendor/github.com/mattes/migrate/database/mongodb/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/mattes/migrate/database/mysql/README.md b/vendor/github.com/mattes/migrate/database/mysql/README.md deleted file mode 100644 index fc79c98a1..000000000 --- a/vendor/github.com/mattes/migrate/database/mysql/README.md +++ /dev/null @@ -1,24 +0,0 @@ -# mysql - -`mysql://user:password@tcp(host:port)/dbname?query` - -| URL Query | WithInstance Config | Description | -|------------|---------------------|-------------| -| `x-migrations-table` | `MigrationsTable` | Name of the migrations table | -| `dbname` | `DatabaseName` | The name of the database to connect to | -| `user` | | The user to sign in as | -| `password` | | The user's password | -| `host` | | The host to connect to. | -| `port` | | The port to bind to. | -| `x-tls-ca` | | The location of the root certificate file. | -| `x-tls-cert` | | Cert file location. | -| `x-tls-key` | | Key file location. | -| `x-tls-insecure-skip-verify` | | Whether or not to use SSL (true\|false) | - -## Upgrading from v1 - -1. Write down the current migration version from schema_migrations -1. `DROP TABLE schema_migrations` -2. Wrap your existing migrations in transactions ([BEGIN/COMMIT](https://dev.mysql.com/doc/refman/5.7/en/commit.html)) if you use multiple statements within one migration. -3. Download and install the latest migrate version. -4. Force the current migration version with `migrate force `. diff --git a/vendor/github.com/mattes/migrate/database/mysql/mysql.go b/vendor/github.com/mattes/migrate/database/mysql/mysql.go deleted file mode 100644 index a559e113c..000000000 --- a/vendor/github.com/mattes/migrate/database/mysql/mysql.go +++ /dev/null @@ -1,327 +0,0 @@ -package mysql - -import ( - "crypto/tls" - "crypto/x509" - "database/sql" - "fmt" - "io" - "io/ioutil" - nurl "net/url" - "strconv" - "strings" - - "github.com/go-sql-driver/mysql" - "github.com/mattes/migrate" - "github.com/mattes/migrate/database" -) - -func init() { - database.Register("mysql", &Mysql{}) -} - -var DefaultMigrationsTable = "schema_migrations" - -var ( - ErrDatabaseDirty = fmt.Errorf("database is dirty") - ErrNilConfig = fmt.Errorf("no config") - ErrNoDatabaseName = fmt.Errorf("no database name") - ErrAppendPEM = fmt.Errorf("failed to append PEM") -) - -type Config struct { - MigrationsTable string - DatabaseName string -} - -type Mysql struct { - db *sql.DB - isLocked bool - - config *Config -} - -// instance must have `multiStatements` set to true -func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { - if config == nil { - return nil, ErrNilConfig - } - - if err := instance.Ping(); err != nil { - return nil, err - } - - query := `SELECT DATABASE()` - var databaseName sql.NullString - if err := instance.QueryRow(query).Scan(&databaseName); err != nil { - return nil, &database.Error{OrigErr: err, Query: []byte(query)} - } - - if len(databaseName.String) == 0 { - return nil, ErrNoDatabaseName - } - - config.DatabaseName = databaseName.String - - if len(config.MigrationsTable) == 0 { - config.MigrationsTable = DefaultMigrationsTable - } - - mx := &Mysql{ - db: instance, - config: config, - } - - if err := mx.ensureVersionTable(); err != nil { - return nil, err - } - - return mx, nil -} - -func (m *Mysql) Open(url string) (database.Driver, error) { - purl, err := nurl.Parse(url) - if err != nil { - return nil, err - } - - q := purl.Query() - q.Set("multiStatements", "true") - purl.RawQuery = q.Encode() - - db, err := sql.Open("mysql", strings.Replace( - migrate.FilterCustomQuery(purl).String(), "mysql://", "", 1)) - if err != nil { - return nil, err - } - - migrationsTable := purl.Query().Get("x-migrations-table") - if len(migrationsTable) == 0 { - migrationsTable = DefaultMigrationsTable - } - - // use custom TLS? - ctls := purl.Query().Get("tls") - if len(ctls) > 0 { - if _, isBool := readBool(ctls); !isBool && strings.ToLower(ctls) != "skip-verify" { - rootCertPool := x509.NewCertPool() - pem, err := ioutil.ReadFile(purl.Query().Get("x-tls-ca")) - if err != nil { - return nil, err - } - - if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { - return nil, ErrAppendPEM - } - - certs, err := tls.LoadX509KeyPair(purl.Query().Get("x-tls-cert"), purl.Query().Get("x-tls-key")) - if err != nil { - return nil, err - } - - insecureSkipVerify := false - if len(purl.Query().Get("x-tls-insecure-skip-verify")) > 0 { - x, err := strconv.ParseBool(purl.Query().Get("x-tls-insecure-skip-verify")) - if err != nil { - return nil, err - } - insecureSkipVerify = x - } - - mysql.RegisterTLSConfig(ctls, &tls.Config{ - RootCAs: rootCertPool, - Certificates: []tls.Certificate{certs}, - InsecureSkipVerify: insecureSkipVerify, - }) - } - } - - mx, err := WithInstance(db, &Config{ - DatabaseName: purl.Path, - MigrationsTable: migrationsTable, - }) - if err != nil { - return nil, err - } - - return mx, nil -} - -func (m *Mysql) Close() error { - return m.db.Close() -} - -func (m *Mysql) Lock() error { - if m.isLocked { - return database.ErrLocked - } - - aid, err := database.GenerateAdvisoryLockId(m.config.DatabaseName) - if err != nil { - return err - } - - query := "SELECT GET_LOCK(?, 1)" - var success bool - if err := m.db.QueryRow(query, aid).Scan(&success); err != nil { - return &database.Error{OrigErr: err, Err: "try lock failed", Query: []byte(query)} - } - - if success { - m.isLocked = true - return nil - } - - return database.ErrLocked -} - -func (m *Mysql) Unlock() error { - if !m.isLocked { - return nil - } - - aid, err := database.GenerateAdvisoryLockId(m.config.DatabaseName) - if err != nil { - return err - } - - query := `SELECT RELEASE_LOCK(?)` - if _, err := m.db.Exec(query, aid); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - - m.isLocked = false - return nil -} - -func (m *Mysql) Run(migration io.Reader) error { - migr, err := ioutil.ReadAll(migration) - if err != nil { - return err - } - - query := string(migr[:]) - if _, err := m.db.Exec(query); err != nil { - return database.Error{OrigErr: err, Err: "migration failed", Query: migr} - } - - return nil -} - -func (m *Mysql) SetVersion(version int, dirty bool) error { - tx, err := m.db.Begin() - if err != nil { - return &database.Error{OrigErr: err, Err: "transaction start failed"} - } - - query := "TRUNCATE `" + m.config.MigrationsTable + "`" - if _, err := m.db.Exec(query); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - - if version >= 0 { - query := "INSERT INTO `" + m.config.MigrationsTable + "` (version, dirty) VALUES (?, ?)" - if _, err := m.db.Exec(query, version, dirty); err != nil { - tx.Rollback() - return &database.Error{OrigErr: err, Query: []byte(query)} - } - } - - if err := tx.Commit(); err != nil { - return &database.Error{OrigErr: err, Err: "transaction commit failed"} - } - - return nil -} - -func (m *Mysql) Version() (version int, dirty bool, err error) { - query := "SELECT version, dirty FROM `" + m.config.MigrationsTable + "` LIMIT 1" - err = m.db.QueryRow(query).Scan(&version, &dirty) - switch { - case err == sql.ErrNoRows: - return database.NilVersion, false, nil - - case err != nil: - if e, ok := err.(*mysql.MySQLError); ok { - if e.Number == 0 { - return database.NilVersion, false, nil - } - } - return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} - - default: - return version, dirty, nil - } -} - -func (m *Mysql) Drop() error { - // select all tables - query := `SHOW TABLES LIKE '%'` - tables, err := m.db.Query(query) - if err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - defer tables.Close() - - // delete one table after another - tableNames := make([]string, 0) - for tables.Next() { - var tableName string - if err := tables.Scan(&tableName); err != nil { - return err - } - if len(tableName) > 0 { - tableNames = append(tableNames, tableName) - } - } - - if len(tableNames) > 0 { - // delete one by one ... - for _, t := range tableNames { - query = "DROP TABLE IF EXISTS `" + t + "` CASCADE" - if _, err := m.db.Exec(query); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - } - if err := m.ensureVersionTable(); err != nil { - return err - } - } - - return nil -} - -func (m *Mysql) ensureVersionTable() error { - // check if migration table exists - var result string - query := `SHOW TABLES LIKE "` + m.config.MigrationsTable + `"` - if err := m.db.QueryRow(query).Scan(&result); err != nil { - if err != sql.ErrNoRows { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - } else { - return nil - } - - // if not, create the empty migration table - query = "CREATE TABLE `" + m.config.MigrationsTable + "` (version bigint not null primary key, dirty boolean not null)" - if _, err := m.db.Exec(query); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - return nil -} - -// Returns the bool value of the input. -// The 2nd return value indicates if the input was a valid bool value -// See https://github.com/go-sql-driver/mysql/blob/a059889267dc7170331388008528b3b44479bffb/utils.go#L71 -func readBool(input string) (value bool, valid bool) { - switch input { - case "1", "true", "TRUE", "True": - return true, true - case "0", "false", "FALSE", "False": - return false, true - } - - // Not a valid bool value - return -} diff --git a/vendor/github.com/mattes/migrate/database/mysql/mysql_test.go b/vendor/github.com/mattes/migrate/database/mysql/mysql_test.go deleted file mode 100644 index 3eb22ef03..000000000 --- a/vendor/github.com/mattes/migrate/database/mysql/mysql_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package mysql - -import ( - "database/sql" - sqldriver "database/sql/driver" - "fmt" - // "io/ioutil" - // "log" - "testing" - - // "github.com/go-sql-driver/mysql" - dt "github.com/mattes/migrate/database/testing" - mt "github.com/mattes/migrate/testing" -) - -var versions = []mt.Version{ - {"mysql:8", []string{"MYSQL_ROOT_PASSWORD=root", "MYSQL_DATABASE=public"}}, - {"mysql:5.7", []string{"MYSQL_ROOT_PASSWORD=root", "MYSQL_DATABASE=public"}}, - {"mysql:5.6", []string{"MYSQL_ROOT_PASSWORD=root", "MYSQL_DATABASE=public"}}, - {"mysql:5.5", []string{"MYSQL_ROOT_PASSWORD=root", "MYSQL_DATABASE=public"}}, -} - -func isReady(i mt.Instance) bool { - db, err := sql.Open("mysql", fmt.Sprintf("root:root@tcp(%v:%v)/public", i.Host(), i.Port())) - if err != nil { - return false - } - defer db.Close() - err = db.Ping() - - if err == sqldriver.ErrBadConn { - return false - } - - return true -} - -func Test(t *testing.T) { - // mysql.SetLogger(mysql.Logger(log.New(ioutil.Discard, "", log.Ltime))) - - mt.ParallelTest(t, versions, isReady, - func(t *testing.T, i mt.Instance) { - p := &Mysql{} - addr := fmt.Sprintf("mysql://root:root@tcp(%v:%v)/public", i.Host(), i.Port()) - d, err := p.Open(addr) - if err != nil { - t.Fatalf("%v", err) - } - dt.Test(t, d, []byte("SELECT 1")) - - // check ensureVersionTable - if err := d.(*Mysql).ensureVersionTable(); err != nil { - t.Fatal(err) - } - // check again - if err := d.(*Mysql).ensureVersionTable(); err != nil { - t.Fatal(err) - } - }) -} diff --git a/vendor/github.com/mattes/migrate/database/neo4j/README.md b/vendor/github.com/mattes/migrate/database/neo4j/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/mattes/migrate/database/postgres/README.md b/vendor/github.com/mattes/migrate/database/postgres/README.md deleted file mode 100644 index f6312392b..000000000 --- a/vendor/github.com/mattes/migrate/database/postgres/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# postgres - -`postgres://user:password@host:port/dbname?query` (`postgresql://` works, too) - -| URL Query | WithInstance Config | Description | -|------------|---------------------|-------------| -| `x-migrations-table` | `MigrationsTable` | Name of the migrations table | -| `dbname` | `DatabaseName` | The name of the database to connect to | -| `search_path` | | This variable specifies the order in which schemas are searched when an object is referenced by a simple name with no schema specified. | -| `user` | | The user to sign in as | -| `password` | | The user's password | -| `host` | | The host to connect to. Values that start with / are for unix domain sockets. (default is localhost) | -| `port` | | The port to bind to. (default is 5432) | -| `fallback_application_name` | | An application_name to fall back to if one isn't provided. | -| `connect_timeout` | | Maximum wait for connection, in seconds. Zero or not specified means wait indefinitely. | -| `sslcert` | | Cert file location. The file must contain PEM encoded data. | -| `sslkey` | | Key file location. The file must contain PEM encoded data. | -| `sslrootcert` | | The location of the root certificate file. The file must contain PEM encoded data. | -| `sslmode` | | Whether or not to use SSL (disable\|require\|verify-ca\|verify-full) | - - -## Upgrading from v1 - -1. Write down the current migration version from schema_migrations -1. `DROP TABLE schema_migrations` -2. Wrap your existing migrations in transactions ([BEGIN/COMMIT](https://www.postgresql.org/docs/current/static/transaction-iso.html)) if you use multiple statements within one migration. -3. Download and install the latest migrate version. -4. Force the current migration version with `migrate force `. diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1085649617_create_users_table.down.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1085649617_create_users_table.down.sql deleted file mode 100644 index c99ddcdc8..000000000 --- a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1085649617_create_users_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS users; diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1085649617_create_users_table.up.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1085649617_create_users_table.up.sql deleted file mode 100644 index 92897dcab..000000000 --- a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1085649617_create_users_table.up.sql +++ /dev/null @@ -1,5 +0,0 @@ -CREATE TABLE users ( - user_id integer unique, - name varchar(40), - email varchar(40) -); diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.down.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.down.sql deleted file mode 100644 index 940c60712..000000000 --- a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.down.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE users DROP COLUMN IF EXISTS city; diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.up.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.up.sql deleted file mode 100644 index 67823edc9..000000000 --- a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.up.sql +++ /dev/null @@ -1,3 +0,0 @@ -ALTER TABLE users ADD COLUMN city varchar(100); - - diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.down.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.down.sql deleted file mode 100644 index 3e87dd229..000000000 --- a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP INDEX IF EXISTS users_email_index; diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.up.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.up.sql deleted file mode 100644 index fbeb4ab4e..000000000 --- a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.up.sql +++ /dev/null @@ -1,3 +0,0 @@ -CREATE UNIQUE INDEX CONCURRENTLY users_email_index ON users (email); - --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1385949617_create_books_table.down.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1385949617_create_books_table.down.sql deleted file mode 100644 index 1a0b1a214..000000000 --- a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1385949617_create_books_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS books; diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1385949617_create_books_table.up.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1385949617_create_books_table.up.sql deleted file mode 100644 index f1503b518..000000000 --- a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1385949617_create_books_table.up.sql +++ /dev/null @@ -1,5 +0,0 @@ -CREATE TABLE books ( - user_id integer, - name varchar(40), - author varchar(40) -); diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.down.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.down.sql deleted file mode 100644 index 3a5187689..000000000 --- a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS movies; diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.up.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.up.sql deleted file mode 100644 index f0ef5943b..000000000 --- a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.up.sql +++ /dev/null @@ -1,5 +0,0 @@ -CREATE TABLE movies ( - user_id integer, - name varchar(40), - director varchar(40) -); diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1585849751_just_a_comment.up.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1585849751_just_a_comment.up.sql deleted file mode 100644 index 9b6b57a61..000000000 --- a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1585849751_just_a_comment.up.sql +++ /dev/null @@ -1 +0,0 @@ --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1685849751_another_comment.up.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1685849751_another_comment.up.sql deleted file mode 100644 index 9b6b57a61..000000000 --- a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1685849751_another_comment.up.sql +++ /dev/null @@ -1 +0,0 @@ --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1785849751_another_comment.up.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1785849751_another_comment.up.sql deleted file mode 100644 index 9b6b57a61..000000000 --- a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1785849751_another_comment.up.sql +++ /dev/null @@ -1 +0,0 @@ --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1885849751_another_comment.up.sql b/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1885849751_another_comment.up.sql deleted file mode 100644 index 9b6b57a61..000000000 --- a/vendor/github.com/mattes/migrate/database/postgres/examples/migrations/1885849751_another_comment.up.sql +++ /dev/null @@ -1 +0,0 @@ --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/database/postgres/postgres.go b/vendor/github.com/mattes/migrate/database/postgres/postgres.go deleted file mode 100644 index 3358b4a0a..000000000 --- a/vendor/github.com/mattes/migrate/database/postgres/postgres.go +++ /dev/null @@ -1,273 +0,0 @@ -package postgres - -import ( - "database/sql" - "fmt" - "io" - "io/ioutil" - nurl "net/url" - - "github.com/lib/pq" - "github.com/mattes/migrate" - "github.com/mattes/migrate/database" -) - -func init() { - db := Postgres{} - database.Register("postgres", &db) - database.Register("postgresql", &db) -} - -var DefaultMigrationsTable = "schema_migrations" - -var ( - ErrNilConfig = fmt.Errorf("no config") - ErrNoDatabaseName = fmt.Errorf("no database name") - ErrNoSchema = fmt.Errorf("no schema") - ErrDatabaseDirty = fmt.Errorf("database is dirty") -) - -type Config struct { - MigrationsTable string - DatabaseName string -} - -type Postgres struct { - db *sql.DB - isLocked bool - - // Open and WithInstance need to garantuee that config is never nil - config *Config -} - -func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { - if config == nil { - return nil, ErrNilConfig - } - - if err := instance.Ping(); err != nil { - return nil, err - } - - query := `SELECT CURRENT_DATABASE()` - var databaseName string - if err := instance.QueryRow(query).Scan(&databaseName); err != nil { - return nil, &database.Error{OrigErr: err, Query: []byte(query)} - } - - if len(databaseName) == 0 { - return nil, ErrNoDatabaseName - } - - config.DatabaseName = databaseName - - if len(config.MigrationsTable) == 0 { - config.MigrationsTable = DefaultMigrationsTable - } - - px := &Postgres{ - db: instance, - config: config, - } - - if err := px.ensureVersionTable(); err != nil { - return nil, err - } - - return px, nil -} - -func (p *Postgres) Open(url string) (database.Driver, error) { - purl, err := nurl.Parse(url) - if err != nil { - return nil, err - } - - db, err := sql.Open("postgres", migrate.FilterCustomQuery(purl).String()) - if err != nil { - return nil, err - } - - migrationsTable := purl.Query().Get("x-migrations-table") - if len(migrationsTable) == 0 { - migrationsTable = DefaultMigrationsTable - } - - px, err := WithInstance(db, &Config{ - DatabaseName: purl.Path, - MigrationsTable: migrationsTable, - }) - if err != nil { - return nil, err - } - - return px, nil -} - -func (p *Postgres) Close() error { - return p.db.Close() -} - -// https://www.postgresql.org/docs/9.6/static/explicit-locking.html#ADVISORY-LOCKS -func (p *Postgres) Lock() error { - if p.isLocked { - return database.ErrLocked - } - - aid, err := database.GenerateAdvisoryLockId(p.config.DatabaseName) - if err != nil { - return err - } - - // This will either obtain the lock immediately and return true, - // or return false if the lock cannot be acquired immediately. - query := `SELECT pg_try_advisory_lock($1)` - var success bool - if err := p.db.QueryRow(query, aid).Scan(&success); err != nil { - return &database.Error{OrigErr: err, Err: "try lock failed", Query: []byte(query)} - } - - if success { - p.isLocked = true - return nil - } - - return database.ErrLocked -} - -func (p *Postgres) Unlock() error { - if !p.isLocked { - return nil - } - - aid, err := database.GenerateAdvisoryLockId(p.config.DatabaseName) - if err != nil { - return err - } - - query := `SELECT pg_advisory_unlock($1)` - if _, err := p.db.Exec(query, aid); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - p.isLocked = false - return nil -} - -func (p *Postgres) Run(migration io.Reader) error { - migr, err := ioutil.ReadAll(migration) - if err != nil { - return err - } - - // run migration - query := string(migr[:]) - if _, err := p.db.Exec(query); err != nil { - // TODO: cast to postgress error and get line number - return database.Error{OrigErr: err, Err: "migration failed", Query: migr} - } - - return nil -} - -func (p *Postgres) SetVersion(version int, dirty bool) error { - tx, err := p.db.Begin() - if err != nil { - return &database.Error{OrigErr: err, Err: "transaction start failed"} - } - - query := `TRUNCATE "` + p.config.MigrationsTable + `"` - if _, err := p.db.Exec(query); err != nil { - tx.Rollback() - return &database.Error{OrigErr: err, Query: []byte(query)} - } - - if version >= 0 { - query = `INSERT INTO "` + p.config.MigrationsTable + `" (version, dirty) VALUES ($1, $2)` - if _, err := p.db.Exec(query, version, dirty); err != nil { - tx.Rollback() - return &database.Error{OrigErr: err, Query: []byte(query)} - } - } - - if err := tx.Commit(); err != nil { - return &database.Error{OrigErr: err, Err: "transaction commit failed"} - } - - return nil -} - -func (p *Postgres) Version() (version int, dirty bool, err error) { - query := `SELECT version, dirty FROM "` + p.config.MigrationsTable + `" LIMIT 1` - err = p.db.QueryRow(query).Scan(&version, &dirty) - switch { - case err == sql.ErrNoRows: - return database.NilVersion, false, nil - - case err != nil: - if e, ok := err.(*pq.Error); ok { - if e.Code.Name() == "undefined_table" { - return database.NilVersion, false, nil - } - } - return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} - - default: - return version, dirty, nil - } -} - -func (p *Postgres) Drop() error { - // select all tables in current schema - query := `SELECT table_name FROM information_schema.tables WHERE table_schema=(SELECT current_schema())` - tables, err := p.db.Query(query) - if err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - defer tables.Close() - - // delete one table after another - tableNames := make([]string, 0) - for tables.Next() { - var tableName string - if err := tables.Scan(&tableName); err != nil { - return err - } - if len(tableName) > 0 { - tableNames = append(tableNames, tableName) - } - } - - if len(tableNames) > 0 { - // delete one by one ... - for _, t := range tableNames { - query = `DROP TABLE IF EXISTS ` + t + ` CASCADE` - if _, err := p.db.Exec(query); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - } - if err := p.ensureVersionTable(); err != nil { - return err - } - } - - return nil -} - -func (p *Postgres) ensureVersionTable() error { - // check if migration table exists - var count int - query := `SELECT COUNT(1) FROM information_schema.tables WHERE table_name = $1 AND table_schema = (SELECT current_schema()) LIMIT 1` - if err := p.db.QueryRow(query, p.config.MigrationsTable).Scan(&count); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - if count == 1 { - return nil - } - - // if not, create the empty migration table - query = `CREATE TABLE "` + p.config.MigrationsTable + `" (version bigint not null primary key, dirty boolean not null)` - if _, err := p.db.Exec(query); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - return nil -} diff --git a/vendor/github.com/mattes/migrate/database/postgres/postgres_test.go b/vendor/github.com/mattes/migrate/database/postgres/postgres_test.go deleted file mode 100644 index 9a367a059..000000000 --- a/vendor/github.com/mattes/migrate/database/postgres/postgres_test.go +++ /dev/null @@ -1,150 +0,0 @@ -package postgres - -// error codes https://github.com/lib/pq/blob/master/error.go - -import ( - "bytes" - "database/sql" - "fmt" - "io" - "testing" - - "github.com/lib/pq" - dt "github.com/mattes/migrate/database/testing" - mt "github.com/mattes/migrate/testing" -) - -var versions = []mt.Version{ - {Image: "postgres:9.6"}, - {Image: "postgres:9.5"}, - {Image: "postgres:9.4"}, - {Image: "postgres:9.3"}, - {Image: "postgres:9.2"}, -} - -func isReady(i mt.Instance) bool { - db, err := sql.Open("postgres", fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port())) - if err != nil { - return false - } - defer db.Close() - err = db.Ping() - if err == io.EOF { - return false - - } else if e, ok := err.(*pq.Error); ok { - if e.Code.Name() == "cannot_connect_now" { - return false - } - } - - return true -} - -func Test(t *testing.T) { - mt.ParallelTest(t, versions, isReady, - func(t *testing.T, i mt.Instance) { - p := &Postgres{} - addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port()) - d, err := p.Open(addr) - if err != nil { - t.Fatalf("%v", err) - } - dt.Test(t, d, []byte("SELECT 1")) - }) -} - -func TestMultiStatement(t *testing.T) { - mt.ParallelTest(t, versions, isReady, - func(t *testing.T, i mt.Instance) { - p := &Postgres{} - addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port()) - d, err := p.Open(addr) - if err != nil { - t.Fatalf("%v", err) - } - if err := d.Run(bytes.NewReader([]byte("CREATE TABLE foo (foo text); CREATE TABLE bar (bar text);"))); err != nil { - t.Fatalf("expected err to be nil, got %v", err) - } - - // make sure second table exists - var exists bool - if err := d.(*Postgres).db.QueryRow("SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'bar' AND table_schema = (SELECT current_schema()))").Scan(&exists); err != nil { - t.Fatal(err) - } - if !exists { - t.Fatalf("expected table bar to exist") - } - }) -} - -func TestFilterCustomQuery(t *testing.T) { - mt.ParallelTest(t, versions, isReady, - func(t *testing.T, i mt.Instance) { - p := &Postgres{} - addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable&x-custom=foobar", i.Host(), i.Port()) - _, err := p.Open(addr) - if err != nil { - t.Fatalf("%v", err) - } - }) -} - -func TestWithSchema(t *testing.T) { - mt.ParallelTest(t, versions, isReady, - func(t *testing.T, i mt.Instance) { - p := &Postgres{} - addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port()) - d, err := p.Open(addr) - if err != nil { - t.Fatalf("%v", err) - } - - // create foobar schema - if err := d.Run(bytes.NewReader([]byte("CREATE SCHEMA foobar AUTHORIZATION postgres"))); err != nil { - t.Fatal(err) - } - if err := d.SetVersion(1, false); err != nil { - t.Fatal(err) - } - - // re-connect using that schema - d2, err := p.Open(fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable&search_path=foobar", i.Host(), i.Port())) - if err != nil { - t.Fatalf("%v", err) - } - - version, _, err := d2.Version() - if err != nil { - t.Fatal(err) - } - if version != -1 { - t.Fatal("expected NilVersion") - } - - // now update version and compare - if err := d2.SetVersion(2, false); err != nil { - t.Fatal(err) - } - version, _, err = d2.Version() - if err != nil { - t.Fatal(err) - } - if version != 2 { - t.Fatal("expected version 2") - } - - // meanwhile, the public schema still has the other version - version, _, err = d.Version() - if err != nil { - t.Fatal(err) - } - if version != 1 { - t.Fatal("expected version 2") - } - }) -} - -func TestWithInstance(t *testing.T) { - -} diff --git a/vendor/github.com/mattes/migrate/database/ql/README.md b/vendor/github.com/mattes/migrate/database/ql/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/mattes/migrate/database/ql/migration/33_create_table.down.sql b/vendor/github.com/mattes/migrate/database/ql/migration/33_create_table.down.sql deleted file mode 100644 index 72d18c554..000000000 --- a/vendor/github.com/mattes/migrate/database/ql/migration/33_create_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS pets; \ No newline at end of file diff --git a/vendor/github.com/mattes/migrate/database/ql/migration/33_create_table.up.sql b/vendor/github.com/mattes/migrate/database/ql/migration/33_create_table.up.sql deleted file mode 100644 index 5ad3404d1..000000000 --- a/vendor/github.com/mattes/migrate/database/ql/migration/33_create_table.up.sql +++ /dev/null @@ -1,3 +0,0 @@ -CREATE TABLE pets ( - name string -); \ No newline at end of file diff --git a/vendor/github.com/mattes/migrate/database/ql/migration/44_alter_table.down.sql b/vendor/github.com/mattes/migrate/database/ql/migration/44_alter_table.down.sql deleted file mode 100644 index 72d18c554..000000000 --- a/vendor/github.com/mattes/migrate/database/ql/migration/44_alter_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS pets; \ No newline at end of file diff --git a/vendor/github.com/mattes/migrate/database/ql/migration/44_alter_table.up.sql b/vendor/github.com/mattes/migrate/database/ql/migration/44_alter_table.up.sql deleted file mode 100644 index 3993698de..000000000 --- a/vendor/github.com/mattes/migrate/database/ql/migration/44_alter_table.up.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE pets ADD predator bool;; \ No newline at end of file diff --git a/vendor/github.com/mattes/migrate/database/ql/ql.go b/vendor/github.com/mattes/migrate/database/ql/ql.go deleted file mode 100644 index 46722a9c2..000000000 --- a/vendor/github.com/mattes/migrate/database/ql/ql.go +++ /dev/null @@ -1,212 +0,0 @@ -package ql - -import ( - "database/sql" - "fmt" - "io" - "io/ioutil" - "strings" - - nurl "net/url" - - _ "github.com/cznic/ql/driver" - "github.com/mattes/migrate" - "github.com/mattes/migrate/database" -) - -func init() { - database.Register("ql", &Ql{}) -} - -var DefaultMigrationsTable = "schema_migrations" -var ( - ErrDatabaseDirty = fmt.Errorf("database is dirty") - ErrNilConfig = fmt.Errorf("no config") - ErrNoDatabaseName = fmt.Errorf("no database name") - ErrAppendPEM = fmt.Errorf("failed to append PEM") -) - -type Config struct { - MigrationsTable string - DatabaseName string -} - -type Ql struct { - db *sql.DB - isLocked bool - - config *Config -} - -func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { - if config == nil { - return nil, ErrNilConfig - } - - if err := instance.Ping(); err != nil { - return nil, err - } - if len(config.MigrationsTable) == 0 { - config.MigrationsTable = DefaultMigrationsTable - } - - mx := &Ql{ - db: instance, - config: config, - } - if err := mx.ensureVersionTable(); err != nil { - return nil, err - } - return mx, nil -} -func (m *Ql) ensureVersionTable() error { - tx, err := m.db.Begin() - if err != nil { - return err - } - if _, err := tx.Exec(fmt.Sprintf(` - CREATE TABLE IF NOT EXISTS %s (version uint64,dirty bool); - CREATE UNIQUE INDEX IF NOT EXISTS version_unique ON %s (version); -`, m.config.MigrationsTable, m.config.MigrationsTable)); err != nil { - if err := tx.Rollback(); err != nil { - return err - } - return err - } - if err := tx.Commit(); err != nil { - return err - } - return nil -} - -func (m *Ql) Open(url string) (database.Driver, error) { - purl, err := nurl.Parse(url) - if err != nil { - return nil, err - } - dbfile := strings.Replace(migrate.FilterCustomQuery(purl).String(), "ql://", "", 1) - db, err := sql.Open("ql", dbfile) - if err != nil { - return nil, err - } - migrationsTable := purl.Query().Get("x-migrations-table") - if len(migrationsTable) == 0 { - migrationsTable = DefaultMigrationsTable - } - mx, err := WithInstance(db, &Config{ - DatabaseName: purl.Path, - MigrationsTable: migrationsTable, - }) - if err != nil { - return nil, err - } - return mx, nil -} -func (m *Ql) Close() error { - return m.db.Close() -} -func (m *Ql) Drop() error { - query := `SELECT Name FROM __Table` - tables, err := m.db.Query(query) - if err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - defer tables.Close() - tableNames := make([]string, 0) - for tables.Next() { - var tableName string - if err := tables.Scan(&tableName); err != nil { - return err - } - if len(tableName) > 0 { - if strings.HasPrefix(tableName, "__") == false { - tableNames = append(tableNames, tableName) - } - } - } - if len(tableNames) > 0 { - for _, t := range tableNames { - query := "DROP TABLE " + t - err = m.executeQuery(query) - if err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - } - if err := m.ensureVersionTable(); err != nil { - return err - } - } - - return nil -} -func (m *Ql) Lock() error { - if m.isLocked { - return database.ErrLocked - } - m.isLocked = true - return nil -} -func (m *Ql) Unlock() error { - if !m.isLocked { - return nil - } - m.isLocked = false - return nil -} -func (m *Ql) Run(migration io.Reader) error { - migr, err := ioutil.ReadAll(migration) - if err != nil { - return err - } - query := string(migr[:]) - - return m.executeQuery(query) -} -func (m *Ql) executeQuery(query string) error { - tx, err := m.db.Begin() - if err != nil { - return &database.Error{OrigErr: err, Err: "transaction start failed"} - } - if _, err := tx.Exec(query); err != nil { - tx.Rollback() - return &database.Error{OrigErr: err, Query: []byte(query)} - } - if err := tx.Commit(); err != nil { - return &database.Error{OrigErr: err, Err: "transaction commit failed"} - } - return nil -} -func (m *Ql) SetVersion(version int, dirty bool) error { - tx, err := m.db.Begin() - if err != nil { - return &database.Error{OrigErr: err, Err: "transaction start failed"} - } - - query := "TRUNCATE TABLE " + m.config.MigrationsTable - if _, err := tx.Exec(query); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - - if version >= 0 { - query := fmt.Sprintf(`INSERT INTO %s (version, dirty) VALUES (%d, %t)`, m.config.MigrationsTable, version, dirty) - if _, err := tx.Exec(query); err != nil { - tx.Rollback() - return &database.Error{OrigErr: err, Query: []byte(query)} - } - } - - if err := tx.Commit(); err != nil { - return &database.Error{OrigErr: err, Err: "transaction commit failed"} - } - - return nil -} - -func (m *Ql) Version() (version int, dirty bool, err error) { - query := "SELECT version, dirty FROM " + m.config.MigrationsTable + " LIMIT 1" - err = m.db.QueryRow(query).Scan(&version, &dirty) - if err != nil { - return database.NilVersion, false, nil - } - return version, dirty, nil -} diff --git a/vendor/github.com/mattes/migrate/database/ql/ql_test.go b/vendor/github.com/mattes/migrate/database/ql/ql_test.go deleted file mode 100644 index f04383fa2..000000000 --- a/vendor/github.com/mattes/migrate/database/ql/ql_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package ql - -import ( - "database/sql" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" - - _ "github.com/cznic/ql/driver" - "github.com/mattes/migrate" - dt "github.com/mattes/migrate/database/testing" - _ "github.com/mattes/migrate/source/file" -) - -func Test(t *testing.T) { - dir, err := ioutil.TempDir("", "ql-driver-test") - if err != nil { - return - } - defer func() { - os.RemoveAll(dir) - }() - fmt.Printf("DB path : %s\n", filepath.Join(dir, "ql.db")) - p := &Ql{} - addr := fmt.Sprintf("ql://%s", filepath.Join(dir, "ql.db")) - d, err := p.Open(addr) - if err != nil { - t.Fatalf("%v", err) - } - - db, err := sql.Open("ql", filepath.Join(dir, "ql.db")) - if err != nil { - return - } - defer func() { - if err := db.Close(); err != nil { - return - } - }() - dt.Test(t, d, []byte("CREATE TABLE t (Qty int, Name string);")) - driver, err := WithInstance(db, &Config{}) - if err != nil { - t.Fatalf("%v", err) - } - if err := d.Drop(); err != nil { - t.Fatal(err) - } - - m, err := migrate.NewWithDatabaseInstance( - "file://./migration", - "ql", driver) - if err != nil { - t.Fatalf("%v", err) - } - fmt.Println("UP") - err = m.Up() - if err != nil { - t.Fatalf("%v", err) - } -} diff --git a/vendor/github.com/mattes/migrate/database/redshift/README.md b/vendor/github.com/mattes/migrate/database/redshift/README.md deleted file mode 100644 index a03d109ae..000000000 --- a/vendor/github.com/mattes/migrate/database/redshift/README.md +++ /dev/null @@ -1,6 +0,0 @@ -Redshift -=== - -This provides a Redshift driver for migrations. It is used whenever the URL of the database starts with `redshift://`. - -Redshift is PostgreSQL compatible but has some specific features (or lack thereof) that require slightly different behavior. diff --git a/vendor/github.com/mattes/migrate/database/redshift/redshift.go b/vendor/github.com/mattes/migrate/database/redshift/redshift.go deleted file mode 100644 index 99cdde725..000000000 --- a/vendor/github.com/mattes/migrate/database/redshift/redshift.go +++ /dev/null @@ -1,46 +0,0 @@ -package redshift - -import ( - "net/url" - - "github.com/mattes/migrate/database" - "github.com/mattes/migrate/database/postgres" -) - -// init registers the driver under the name 'redshift' -func init() { - db := new(Redshift) - db.Driver = new(postgres.Postgres) - - database.Register("redshift", db) -} - -// Redshift is a wrapper around the PostgreSQL driver which implements Redshift-specific behavior. -// -// Currently, the only different behaviour is the lack of locking in Redshift. The (Un)Lock() method(s) have been overridden from the PostgreSQL adapter to simply return nil. -type Redshift struct { - // The wrapped PostgreSQL driver. - database.Driver -} - -// Open implements the database.Driver interface by parsing the URL, switching the scheme from "redshift" to "postgres", and delegating to the underlying PostgreSQL driver. -func (driver *Redshift) Open(dsn string) (database.Driver, error) { - parsed, err := url.Parse(dsn) - if err != nil { - return nil, err - } - - parsed.Scheme = "postgres" - psql, err := driver.Driver.Open(parsed.String()) - if err != nil { - return nil, err - } - - return &Redshift{Driver: psql}, nil -} - -// Lock implements the database.Driver interface by not locking and returning nil. -func (driver *Redshift) Lock() error { return nil } - -// Unlock implements the database.Driver interface by not unlocking and returning nil. -func (driver *Redshift) Unlock() error { return nil } diff --git a/vendor/github.com/mattes/migrate/database/shell/README.md b/vendor/github.com/mattes/migrate/database/shell/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/mattes/migrate/database/sqlite/README.md b/vendor/github.com/mattes/migrate/database/sqlite/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/mattes/migrate/database/stub/stub.go b/vendor/github.com/mattes/migrate/database/stub/stub.go deleted file mode 100644 index 172bcd37b..000000000 --- a/vendor/github.com/mattes/migrate/database/stub/stub.go +++ /dev/null @@ -1,95 +0,0 @@ -package stub - -import ( - "io" - "io/ioutil" - "reflect" - - "github.com/mattes/migrate/database" -) - -func init() { - database.Register("stub", &Stub{}) -} - -type Stub struct { - Url string - Instance interface{} - CurrentVersion int - MigrationSequence []string - LastRunMigration []byte // todo: make []string - IsDirty bool - IsLocked bool - - Config *Config -} - -func (s *Stub) Open(url string) (database.Driver, error) { - return &Stub{ - Url: url, - CurrentVersion: -1, - MigrationSequence: make([]string, 0), - Config: &Config{}, - }, nil -} - -type Config struct{} - -func WithInstance(instance interface{}, config *Config) (database.Driver, error) { - return &Stub{ - Instance: instance, - CurrentVersion: -1, - MigrationSequence: make([]string, 0), - Config: config, - }, nil -} - -func (s *Stub) Close() error { - return nil -} - -func (s *Stub) Lock() error { - if s.IsLocked { - return database.ErrLocked - } - s.IsLocked = true - return nil -} - -func (s *Stub) Unlock() error { - s.IsLocked = false - return nil -} - -func (s *Stub) Run(migration io.Reader) error { - m, err := ioutil.ReadAll(migration) - if err != nil { - return err - } - s.LastRunMigration = m - s.MigrationSequence = append(s.MigrationSequence, string(m[:])) - return nil -} - -func (s *Stub) SetVersion(version int, state bool) error { - s.CurrentVersion = version - s.IsDirty = state - return nil -} - -func (s *Stub) Version() (version int, dirty bool, err error) { - return s.CurrentVersion, s.IsDirty, nil -} - -const DROP = "DROP" - -func (s *Stub) Drop() error { - s.CurrentVersion = -1 - s.LastRunMigration = nil - s.MigrationSequence = append(s.MigrationSequence, DROP) - return nil -} - -func (s *Stub) EqualSequence(seq []string) bool { - return reflect.DeepEqual(seq, s.MigrationSequence) -} diff --git a/vendor/github.com/mattes/migrate/database/stub/stub_test.go b/vendor/github.com/mattes/migrate/database/stub/stub_test.go deleted file mode 100644 index 3d8b8926c..000000000 --- a/vendor/github.com/mattes/migrate/database/stub/stub_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package stub - -import ( - "testing" - - dt "github.com/mattes/migrate/database/testing" -) - -func Test(t *testing.T) { - s := &Stub{} - d, err := s.Open("") - if err != nil { - t.Fatal(err) - } - dt.Test(t, d, []byte("/* foobar migration */")) -} diff --git a/vendor/github.com/mattes/migrate/database/testing/testing.go b/vendor/github.com/mattes/migrate/database/testing/testing.go deleted file mode 100644 index 4ab090d1a..000000000 --- a/vendor/github.com/mattes/migrate/database/testing/testing.go +++ /dev/null @@ -1,138 +0,0 @@ -// Package testing has the database tests. -// All database drivers must pass the Test function. -// This lives in it's own package so it stays a test dependency. -package testing - -import ( - "bytes" - "fmt" - "io" - "testing" - "time" - - "github.com/mattes/migrate/database" -) - -// Test runs tests against database implementations. -func Test(t *testing.T, d database.Driver, migration []byte) { - if migration == nil { - panic("test must provide migration reader") - } - - TestNilVersion(t, d) // test first - TestLockAndUnlock(t, d) - TestRun(t, d, bytes.NewReader(migration)) - TestDrop(t, d) - TestSetVersion(t, d) // also tests Version() -} - -func TestNilVersion(t *testing.T, d database.Driver) { - v, _, err := d.Version() - if err != nil { - t.Fatal(err) - } - if v != database.NilVersion { - t.Fatalf("Version: expected version to be NilVersion (-1), got %v", v) - } -} - -func TestLockAndUnlock(t *testing.T, d database.Driver) { - // add a timeout, in case there is a deadlock - done := make(chan bool, 1) - go func() { - timeout := time.After(15 * time.Second) - for { - select { - case <-done: - return - case <-timeout: - panic(fmt.Sprintf("Timeout after 15 seconds. Looks like a deadlock in Lock/UnLock.\n%#v", d)) - } - } - }() - defer func() { - done <- true - }() - - // run the locking test ... - - if err := d.Lock(); err != nil { - t.Fatal(err) - } - - // try to acquire lock again - if err := d.Lock(); err == nil { - t.Fatal("Lock: expected err not to be nil") - } - - // unlock - if err := d.Unlock(); err != nil { - t.Fatal(err) - } - - // try to lock - if err := d.Lock(); err != nil { - t.Fatal(err) - } - if err := d.Unlock(); err != nil { - t.Fatal(err) - } -} - -func TestRun(t *testing.T, d database.Driver, migration io.Reader) { - if migration == nil { - panic("migration can't be nil") - } - - if err := d.Run(migration); err != nil { - t.Fatal(err) - } -} - -func TestDrop(t *testing.T, d database.Driver) { - if err := d.Drop(); err != nil { - t.Fatal(err) - } -} - -func TestSetVersion(t *testing.T, d database.Driver) { - if err := d.SetVersion(1, true); err != nil { - t.Fatal(err) - } - - // call again - if err := d.SetVersion(1, true); err != nil { - t.Fatal(err) - } - - v, dirty, err := d.Version() - if err != nil { - t.Fatal(err) - } - if !dirty { - t.Fatal("expected dirty") - } - if v != 1 { - t.Fatal("expected version to be 1") - } - - if err := d.SetVersion(2, false); err != nil { - t.Fatal(err) - } - - // call again - if err := d.SetVersion(2, false); err != nil { - t.Fatal(err) - } - - v, dirty, err = d.Version() - if err != nil { - t.Fatal(err) - } - if dirty { - t.Fatal("expected not dirty") - } - if v != 2 { - t.Fatal("expected version to be 2") - } -} diff --git a/vendor/github.com/mattes/migrate/database/util.go b/vendor/github.com/mattes/migrate/database/util.go deleted file mode 100644 index c636a7abe..000000000 --- a/vendor/github.com/mattes/migrate/database/util.go +++ /dev/null @@ -1,15 +0,0 @@ -package database - -import ( - "fmt" - "hash/crc32" -) - -const advisoryLockIdSalt uint = 1486364155 - -// inspired by rails migrations, see https://goo.gl/8o9bCT -func GenerateAdvisoryLockId(databaseName string) (string, error) { - sum := crc32.ChecksumIEEE([]byte(databaseName)) - sum = sum * uint32(advisoryLockIdSalt) - return fmt.Sprintf("%v", sum), nil -} diff --git a/vendor/github.com/mattes/migrate/database/util_test.go b/vendor/github.com/mattes/migrate/database/util_test.go deleted file mode 100644 index 905c840b9..000000000 --- a/vendor/github.com/mattes/migrate/database/util_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package database - -func TestGenerateAdvisoryLockId(t *testing.T) { - id, err := p.generateAdvisoryLockId("database_name") - if err != nil { - t.Errorf("expected err to be nil, got %v", err) - } - if len(id) == 0 { - t.Errorf("expected generated id not to be empty") - } - t.Logf("generated id: %v", id) -} diff --git a/vendor/github.com/mattes/migrate/log.go b/vendor/github.com/mattes/migrate/log.go deleted file mode 100644 index cb00b7798..000000000 --- a/vendor/github.com/mattes/migrate/log.go +++ /dev/null @@ -1,12 +0,0 @@ -package migrate - -// Logger is an interface so you can pass in your own -// logging implementation. -type Logger interface { - - // Printf is like fmt.Printf - Printf(format string, v ...interface{}) - - // Verbose should return true when verbose logging output is wanted - Verbose() bool -} diff --git a/vendor/github.com/mattes/migrate/migrate.go b/vendor/github.com/mattes/migrate/migrate.go deleted file mode 100644 index 17123eb11..000000000 --- a/vendor/github.com/mattes/migrate/migrate.go +++ /dev/null @@ -1,920 +0,0 @@ -// Package migrate reads migrations from sources and runs them against databases. -// Sources are defined by the `source.Driver` and databases by the `database.Driver` -// interface. The driver interfaces are kept "dump", all migration logic is kept -// in this package. -package migrate - -import ( - "fmt" - "os" - "sync" - "time" - - "github.com/mattes/migrate/database" - "github.com/mattes/migrate/source" -) - -// DefaultPrefetchMigrations sets the number of migrations to pre-read -// from the source. This is helpful if the source is remote, but has little -// effect for a local source (i.e. file system). -// Please note that this setting has a major impact on the memory usage, -// since each pre-read migration is buffered in memory. See DefaultBufferSize. -var DefaultPrefetchMigrations = uint(10) - -// DefaultLockTimeout sets the max time a database driver has to acquire a lock. -var DefaultLockTimeout = 15 * time.Second - -var ( - ErrNoChange = fmt.Errorf("no change") - ErrNilVersion = fmt.Errorf("no migration") - ErrLocked = fmt.Errorf("database locked") - ErrLockTimeout = fmt.Errorf("timeout: can't acquire database lock") -) - -// ErrShortLimit is an error returned when not enough migrations -// can be returned by a source for a given limit. -type ErrShortLimit struct { - Short uint -} - -// Error implements the error interface. -func (e ErrShortLimit) Error() string { - return fmt.Sprintf("limit %v short", e.Short) -} - -type ErrDirty struct { - Version int -} - -func (e ErrDirty) Error() string { - return fmt.Sprintf("Dirty database version %v. Fix and force version.", e.Version) -} - -type Migrate struct { - sourceName string - sourceDrv source.Driver - databaseName string - databaseDrv database.Driver - - // Log accepts a Logger interface - Log Logger - - // GracefulStop accepts `true` and will stop executing migrations - // as soon as possible at a safe break point, so that the database - // is not corrpupted. - GracefulStop chan bool - isGracefulStop bool - - isLockedMu *sync.Mutex - isLocked bool - - // PrefetchMigrations defaults to DefaultPrefetchMigrations, - // but can be set per Migrate instance. - PrefetchMigrations uint - - // LockTimeout defaults to DefaultLockTimeout, - // but can be set per Migrate instance. - LockTimeout time.Duration -} - -// New returns a new Migrate instance from a source URL and a database URL. -// The URL scheme is defined by each driver. -func New(sourceUrl, databaseUrl string) (*Migrate, error) { - m := newCommon() - - sourceName, err := schemeFromUrl(sourceUrl) - if err != nil { - return nil, err - } - m.sourceName = sourceName - - databaseName, err := schemeFromUrl(databaseUrl) - if err != nil { - return nil, err - } - m.databaseName = databaseName - - sourceDrv, err := source.Open(sourceUrl) - if err != nil { - return nil, err - } - m.sourceDrv = sourceDrv - - databaseDrv, err := database.Open(databaseUrl) - if err != nil { - return nil, err - } - m.databaseDrv = databaseDrv - - return m, nil -} - -// NewWithDatabaseInstance returns a new Migrate instance from a source URL -// and an existing database instance. The source URL scheme is defined by each driver. -// Use any string that can serve as an identifier during logging as databaseName. -// You are responsible for closing the underlying database client if necessary. -func NewWithDatabaseInstance(sourceUrl string, databaseName string, databaseInstance database.Driver) (*Migrate, error) { - m := newCommon() - - sourceName, err := schemeFromUrl(sourceUrl) - if err != nil { - return nil, err - } - m.sourceName = sourceName - - m.databaseName = databaseName - - sourceDrv, err := source.Open(sourceUrl) - if err != nil { - return nil, err - } - m.sourceDrv = sourceDrv - - m.databaseDrv = databaseInstance - - return m, nil -} - -// NewWithSourceInstance returns a new Migrate instance from an existing source instance -// and a database URL. The database URL scheme is defined by each driver. -// Use any string that can serve as an identifier during logging as sourceName. -// You are responsible for closing the underlying source client if necessary. -func NewWithSourceInstance(sourceName string, sourceInstance source.Driver, databaseUrl string) (*Migrate, error) { - m := newCommon() - - databaseName, err := schemeFromUrl(databaseUrl) - if err != nil { - return nil, err - } - m.databaseName = databaseName - - m.sourceName = sourceName - - databaseDrv, err := database.Open(databaseUrl) - if err != nil { - return nil, err - } - m.databaseDrv = databaseDrv - - m.sourceDrv = sourceInstance - - return m, nil -} - -// NewWithInstance returns a new Migrate instance from an existing source and -// database instance. Use any string that can serve as an identifier during logging -// as sourceName and databaseName. You are responsible for closing down -// the underlying source and database client if necessary. -func NewWithInstance(sourceName string, sourceInstance source.Driver, databaseName string, databaseInstance database.Driver) (*Migrate, error) { - m := newCommon() - - m.sourceName = sourceName - m.databaseName = databaseName - - m.sourceDrv = sourceInstance - m.databaseDrv = databaseInstance - - return m, nil -} - -func newCommon() *Migrate { - return &Migrate{ - GracefulStop: make(chan bool, 1), - PrefetchMigrations: DefaultPrefetchMigrations, - LockTimeout: DefaultLockTimeout, - isLockedMu: &sync.Mutex{}, - } -} - -// Close closes the the source and the database. -func (m *Migrate) Close() (source error, database error) { - databaseSrvClose := make(chan error) - sourceSrvClose := make(chan error) - - m.logVerbosePrintf("Closing source and database\n") - - go func() { - databaseSrvClose <- m.databaseDrv.Close() - }() - - go func() { - sourceSrvClose <- m.sourceDrv.Close() - }() - - return <-sourceSrvClose, <-databaseSrvClose -} - -// Migrate looks at the currently active migration version, -// then migrates either up or down to the specified version. -func (m *Migrate) Migrate(version uint) error { - if err := m.lock(); err != nil { - return err - } - - curVersion, dirty, err := m.databaseDrv.Version() - if err != nil { - return m.unlockErr(err) - } - - if dirty { - return m.unlockErr(ErrDirty{curVersion}) - } - - ret := make(chan interface{}, m.PrefetchMigrations) - go m.read(curVersion, int(version), ret) - - return m.unlockErr(m.runMigrations(ret)) -} - -// Steps looks at the currently active migration version. -// It will migrate up if n > 0, and down if n < 0. -func (m *Migrate) Steps(n int) error { - if n == 0 { - return ErrNoChange - } - - if err := m.lock(); err != nil { - return err - } - - curVersion, dirty, err := m.databaseDrv.Version() - if err != nil { - return m.unlockErr(err) - } - - if dirty { - return m.unlockErr(ErrDirty{curVersion}) - } - - ret := make(chan interface{}, m.PrefetchMigrations) - - if n > 0 { - go m.readUp(curVersion, n, ret) - } else { - go m.readDown(curVersion, -n, ret) - } - - return m.unlockErr(m.runMigrations(ret)) -} - -// Up looks at the currently active migration version -// and will migrate all the way up (applying all up migrations). -func (m *Migrate) Up() error { - if err := m.lock(); err != nil { - return err - } - - curVersion, dirty, err := m.databaseDrv.Version() - if err != nil { - return m.unlockErr(err) - } - - if dirty { - return m.unlockErr(ErrDirty{curVersion}) - } - - ret := make(chan interface{}, m.PrefetchMigrations) - - go m.readUp(curVersion, -1, ret) - return m.unlockErr(m.runMigrations(ret)) -} - -// Down looks at the currently active migration version -// and will migrate all the way down (applying all down migrations). -func (m *Migrate) Down() error { - if err := m.lock(); err != nil { - return err - } - - curVersion, dirty, err := m.databaseDrv.Version() - if err != nil { - return m.unlockErr(err) - } - - if dirty { - return m.unlockErr(ErrDirty{curVersion}) - } - - ret := make(chan interface{}, m.PrefetchMigrations) - go m.readDown(curVersion, -1, ret) - return m.unlockErr(m.runMigrations(ret)) -} - -// Drop deletes everyting in the database. -func (m *Migrate) Drop() error { - if err := m.lock(); err != nil { - return err - } - if err := m.databaseDrv.Drop(); err != nil { - return m.unlockErr(err) - } - return m.unlock() -} - -// Run runs any migration provided by you against the database. -// It does not check any currently active version in database. -// Usually you don't need this function at all. Use Migrate, -// Steps, Up or Down instead. -func (m *Migrate) Run(migration ...*Migration) error { - if len(migration) == 0 { - return ErrNoChange - } - - if err := m.lock(); err != nil { - return err - } - - curVersion, dirty, err := m.databaseDrv.Version() - if err != nil { - return m.unlockErr(err) - } - - if dirty { - return m.unlockErr(ErrDirty{curVersion}) - } - - ret := make(chan interface{}, m.PrefetchMigrations) - - go func() { - defer close(ret) - for _, migr := range migration { - if m.PrefetchMigrations > 0 && migr.Body != nil { - m.logVerbosePrintf("Start buffering %v\n", migr.LogString()) - } else { - m.logVerbosePrintf("Scheduled %v\n", migr.LogString()) - } - - ret <- migr - go migr.Buffer() - } - }() - - return m.unlockErr(m.runMigrations(ret)) -} - -// Force sets a migration version. -// It does not check any currently active version in database. -// It resets the dirty state to false. -func (m *Migrate) Force(version int) error { - if version < -1 { - panic("version must be >= -1") - } - - if err := m.lock(); err != nil { - return err - } - - if err := m.databaseDrv.SetVersion(version, false); err != nil { - return m.unlockErr(err) - } - - return m.unlock() -} - -// Version returns the currently active migration version. -// If no migration has been applied, yet, it will return ErrNilVersion. -func (m *Migrate) Version() (version uint, dirty bool, err error) { - v, d, err := m.databaseDrv.Version() - if err != nil { - return 0, false, err - } - - if v == database.NilVersion { - return 0, false, ErrNilVersion - } - - return suint(v), d, nil -} - -// read reads either up or down migrations from source `from` to `to`. -// Each migration is then written to the ret channel. -// If an error occurs during reading, that error is written to the ret channel, too. -// Once read is done reading it will close the ret channel. -func (m *Migrate) read(from int, to int, ret chan<- interface{}) { - defer close(ret) - - // check if from version exists - if from >= 0 { - if m.versionExists(suint(from)) != nil { - ret <- os.ErrNotExist - return - } - } - - // check if to version exists - if to >= 0 { - if m.versionExists(suint(to)) != nil { - ret <- os.ErrNotExist - return - } - } - - // no change? - if from == to { - ret <- ErrNoChange - return - } - - if from < to { - // it's going up - // apply first migration if from is nil version - if from == -1 { - firstVersion, err := m.sourceDrv.First() - if err != nil { - ret <- err - return - } - - migr, err := m.newMigration(firstVersion, int(firstVersion)) - if err != nil { - ret <- err - return - } - - ret <- migr - go migr.Buffer() - from = int(firstVersion) - } - - // run until we reach target ... - for from < to { - if m.stop() { - return - } - - next, err := m.sourceDrv.Next(suint(from)) - if err != nil { - ret <- err - return - } - - migr, err := m.newMigration(next, int(next)) - if err != nil { - ret <- err - return - } - - ret <- migr - go migr.Buffer() - from = int(next) - } - - } else { - // it's going down - // run until we reach target ... - for from > to && from >= 0 { - if m.stop() { - return - } - - prev, err := m.sourceDrv.Prev(suint(from)) - if os.IsNotExist(err) && to == -1 { - // apply nil migration - migr, err := m.newMigration(suint(from), -1) - if err != nil { - ret <- err - return - } - ret <- migr - go migr.Buffer() - return - - } else if err != nil { - ret <- err - return - } - - migr, err := m.newMigration(suint(from), int(prev)) - if err != nil { - ret <- err - return - } - - ret <- migr - go migr.Buffer() - from = int(prev) - } - } -} - -// readUp reads up migrations from `from` limitted by `limit`. -// limit can be -1, implying no limit and reading until there are no more migrations. -// Each migration is then written to the ret channel. -// If an error occurs during reading, that error is written to the ret channel, too. -// Once readUp is done reading it will close the ret channel. -func (m *Migrate) readUp(from int, limit int, ret chan<- interface{}) { - defer close(ret) - - // check if from version exists - if from >= 0 { - if m.versionExists(suint(from)) != nil { - ret <- os.ErrNotExist - return - } - } - - if limit == 0 { - ret <- ErrNoChange - return - } - - count := 0 - for count < limit || limit == -1 { - if m.stop() { - return - } - - // apply first migration if from is nil version - if from == -1 { - firstVersion, err := m.sourceDrv.First() - if err != nil { - ret <- err - return - } - - migr, err := m.newMigration(firstVersion, int(firstVersion)) - if err != nil { - ret <- err - return - } - - ret <- migr - go migr.Buffer() - from = int(firstVersion) - count++ - continue - } - - // apply next migration - next, err := m.sourceDrv.Next(suint(from)) - if os.IsNotExist(err) { - // no limit, but no migrations applied? - if limit == -1 && count == 0 { - ret <- ErrNoChange - return - } - - // no limit, reached end - if limit == -1 { - return - } - - // reached end, and didn't apply any migrations - if limit > 0 && count == 0 { - ret <- os.ErrNotExist - return - } - - // applied less migrations than limit? - if count < limit { - ret <- ErrShortLimit{suint(limit - count)} - return - } - } - if err != nil { - ret <- err - return - } - - migr, err := m.newMigration(next, int(next)) - if err != nil { - ret <- err - return - } - - ret <- migr - go migr.Buffer() - from = int(next) - count++ - } -} - -// readDown reads down migrations from `from` limitted by `limit`. -// limit can be -1, implying no limit and reading until there are no more migrations. -// Each migration is then written to the ret channel. -// If an error occurs during reading, that error is written to the ret channel, too. -// Once readDown is done reading it will close the ret channel. -func (m *Migrate) readDown(from int, limit int, ret chan<- interface{}) { - defer close(ret) - - // check if from version exists - if from >= 0 { - if m.versionExists(suint(from)) != nil { - ret <- os.ErrNotExist - return - } - } - - if limit == 0 { - ret <- ErrNoChange - return - } - - // no change if already at nil version - if from == -1 && limit == -1 { - ret <- ErrNoChange - return - } - - // can't go over limit if already at nil version - if from == -1 && limit > 0 { - ret <- os.ErrNotExist - return - } - - count := 0 - for count < limit || limit == -1 { - if m.stop() { - return - } - - prev, err := m.sourceDrv.Prev(suint(from)) - if os.IsNotExist(err) { - // no limit or haven't reached limit, apply "first" migration - if limit == -1 || limit-count > 0 { - firstVersion, err := m.sourceDrv.First() - if err != nil { - ret <- err - return - } - - migr, err := m.newMigration(firstVersion, -1) - if err != nil { - ret <- err - return - } - ret <- migr - go migr.Buffer() - count++ - } - - if count < limit { - ret <- ErrShortLimit{suint(limit - count)} - } - return - } - if err != nil { - ret <- err - return - } - - migr, err := m.newMigration(suint(from), int(prev)) - if err != nil { - ret <- err - return - } - - ret <- migr - go migr.Buffer() - from = int(prev) - count++ - } -} - -// runMigrations reads *Migration and error from a channel. Any other type -// sent on this channel will result in a panic. Each migration is then -// proxied to the database driver and run against the database. -// Before running a newly received migration it will check if it's supposed -// to stop execution because it might have received a stop signal on the -// GracefulStop channel. -func (m *Migrate) runMigrations(ret <-chan interface{}) error { - for r := range ret { - - if m.stop() { - return nil - } - - switch r.(type) { - case error: - return r.(error) - - case *Migration: - migr := r.(*Migration) - - // set version with dirty state - if err := m.databaseDrv.SetVersion(migr.TargetVersion, true); err != nil { - return err - } - - if migr.Body != nil { - m.logVerbosePrintf("Read and execute %v\n", migr.LogString()) - if err := m.databaseDrv.Run(migr.BufferedBody); err != nil { - return err - } - } - - // set clean state - if err := m.databaseDrv.SetVersion(migr.TargetVersion, false); err != nil { - return err - } - - endTime := time.Now() - readTime := migr.FinishedReading.Sub(migr.StartedBuffering) - runTime := endTime.Sub(migr.FinishedReading) - - // log either verbose or normal - if m.Log != nil { - if m.Log.Verbose() { - m.logPrintf("Finished %v (read %v, ran %v)\n", migr.LogString(), readTime, runTime) - } else { - m.logPrintf("%v (%v)\n", migr.LogString(), readTime+runTime) - } - } - - default: - panic("unknown type") - } - } - return nil -} - -// versionExists checks the source if either the up or down migration for -// the specified migration version exists. -func (m *Migrate) versionExists(version uint) error { - // try up migration first - up, _, err := m.sourceDrv.ReadUp(version) - if err == nil { - defer up.Close() - } - if os.IsExist(err) { - return nil - } else if !os.IsNotExist(err) { - return err - } - - // then try down migration - down, _, err := m.sourceDrv.ReadDown(version) - if err == nil { - defer down.Close() - } - if os.IsExist(err) { - return nil - } else if !os.IsNotExist(err) { - return err - } - - return os.ErrNotExist -} - -// stop returns true if no more migrations should be run against the database -// because a stop signal was received on the GracefulStop channel. -// Calls are cheap and this function is not blocking. -func (m *Migrate) stop() bool { - if m.isGracefulStop { - return true - } - - select { - case <-m.GracefulStop: - m.isGracefulStop = true - return true - - default: - return false - } -} - -// newMigration is a helper func that returns a *Migration for the -// specified version and targetVersion. -func (m *Migrate) newMigration(version uint, targetVersion int) (*Migration, error) { - var migr *Migration - - if targetVersion >= int(version) { - r, identifier, err := m.sourceDrv.ReadUp(version) - if os.IsNotExist(err) { - // create "empty" migration - migr, err = NewMigration(nil, "", version, targetVersion) - if err != nil { - return nil, err - } - - } else if err != nil { - return nil, err - - } else { - // create migration from up source - migr, err = NewMigration(r, identifier, version, targetVersion) - if err != nil { - return nil, err - } - } - - } else { - r, identifier, err := m.sourceDrv.ReadDown(version) - if os.IsNotExist(err) { - // create "empty" migration - migr, err = NewMigration(nil, "", version, targetVersion) - if err != nil { - return nil, err - } - - } else if err != nil { - return nil, err - - } else { - // create migration from down source - migr, err = NewMigration(r, identifier, version, targetVersion) - if err != nil { - return nil, err - } - } - } - - if m.PrefetchMigrations > 0 && migr.Body != nil { - m.logVerbosePrintf("Start buffering %v\n", migr.LogString()) - } else { - m.logVerbosePrintf("Scheduled %v\n", migr.LogString()) - } - - return migr, nil -} - -// lock is a thread safe helper function to lock the database. -// It should be called as late as possible when running migrations. -func (m *Migrate) lock() error { - m.isLockedMu.Lock() - defer m.isLockedMu.Unlock() - - if m.isLocked { - return ErrLocked - } - - // create done channel, used in the timeout goroutine - done := make(chan bool, 1) - defer func() { - done <- true - }() - - // use errchan to signal error back to this context - errchan := make(chan error, 2) - - // start timeout goroutine - timeout := time.After(m.LockTimeout) - go func() { - for { - select { - case <-done: - return - case <-timeout: - errchan <- ErrLockTimeout - return - } - } - }() - - // now try to acquire the lock - go func() { - if err := m.databaseDrv.Lock(); err != nil { - errchan <- err - } else { - errchan <- nil - } - return - }() - - // wait until we either recieve ErrLockTimeout or error from Lock operation - err := <-errchan - if err == nil { - m.isLocked = true - } - return err -} - -// unlock is a thread safe helper function to unlock the database. -// It should be called as early as possible when no more migrations are -// expected to be executed. -func (m *Migrate) unlock() error { - m.isLockedMu.Lock() - defer m.isLockedMu.Unlock() - - if err := m.databaseDrv.Unlock(); err != nil { - // BUG: Can potentially create a deadlock. Add a timeout. - return err - } - - m.isLocked = false - return nil -} - -// unlockErr calls unlock and returns a combined error -// if a prevErr is not nil. -func (m *Migrate) unlockErr(prevErr error) error { - if err := m.unlock(); err != nil { - return NewMultiError(prevErr, err) - } - return prevErr -} - -// logPrintf writes to m.Log if not nil -func (m *Migrate) logPrintf(format string, v ...interface{}) { - if m.Log != nil { - m.Log.Printf(format, v...) - } -} - -// logVerbosePrintf writes to m.Log if not nil. Use for verbose logging output. -func (m *Migrate) logVerbosePrintf(format string, v ...interface{}) { - if m.Log != nil && m.Log.Verbose() { - m.Log.Printf(format, v...) - } -} diff --git a/vendor/github.com/mattes/migrate/migrate_test.go b/vendor/github.com/mattes/migrate/migrate_test.go deleted file mode 100644 index 0ec4bce21..000000000 --- a/vendor/github.com/mattes/migrate/migrate_test.go +++ /dev/null @@ -1,941 +0,0 @@ -package migrate - -import ( - "bytes" - "database/sql" - "io/ioutil" - "log" - "os" - "testing" - - dStub "github.com/mattes/migrate/database/stub" - "github.com/mattes/migrate/source" - sStub "github.com/mattes/migrate/source/stub" -) - -// sourceStubMigrations hold the following migrations: -// u = up migration, d = down migration, n = version -// | 1 | - | 3 | 4 | 5 | - | 7 | -// | u d | - | u | u d | d | - | u d | -var sourceStubMigrations *source.Migrations - -func init() { - sourceStubMigrations = source.NewMigrations() - sourceStubMigrations.Append(&source.Migration{Version: 1, Direction: source.Up}) - sourceStubMigrations.Append(&source.Migration{Version: 1, Direction: source.Down}) - sourceStubMigrations.Append(&source.Migration{Version: 3, Direction: source.Up}) - sourceStubMigrations.Append(&source.Migration{Version: 4, Direction: source.Up}) - sourceStubMigrations.Append(&source.Migration{Version: 4, Direction: source.Down}) - sourceStubMigrations.Append(&source.Migration{Version: 5, Direction: source.Down}) - sourceStubMigrations.Append(&source.Migration{Version: 7, Direction: source.Up}) - sourceStubMigrations.Append(&source.Migration{Version: 7, Direction: source.Down}) -} - -type DummyInstance struct{ Name string } - -func TestNew(t *testing.T) { - m, err := New("stub://", "stub://") - if err != nil { - t.Fatal(err) - } - - if m.sourceName != "stub" { - t.Errorf("expected stub, got %v", m.sourceName) - } - if m.sourceDrv == nil { - t.Error("expected sourceDrv not to be nil") - } - - if m.databaseName != "stub" { - t.Errorf("expected stub, got %v", m.databaseName) - } - if m.databaseDrv == nil { - t.Error("expected databaseDrv not to be nil") - } -} - -func ExampleNew() { - // Read migrations from /home/mattes/migrations and connect to a local postgres database. - m, err := New("file:///home/mattes/migrations", "postgres://mattes:secret@localhost:5432/database?sslmode=disable") - if err != nil { - log.Fatal(err) - } - - // Migrate all the way up ... - if err := m.Up(); err != nil { - log.Fatal(err) - } -} - -func TestNewWithDatabaseInstance(t *testing.T) { - dummyDb := &DummyInstance{"database"} - dbInst, err := dStub.WithInstance(dummyDb, &dStub.Config{}) - if err != nil { - t.Fatal(err) - } - - m, err := NewWithDatabaseInstance("stub://", "stub", dbInst) - if err != nil { - t.Fatal(err) - } - - if m.sourceName != "stub" { - t.Errorf("expected stub, got %v", m.sourceName) - } - if m.sourceDrv == nil { - t.Error("expected sourceDrv not to be nil") - } - - if m.databaseName != "stub" { - t.Errorf("expected stub, got %v", m.databaseName) - } - if m.databaseDrv == nil { - t.Error("expected databaseDrv not to be nil") - } -} - -func ExampleNewWithDatabaseInstance() { - // Create and use an existing database instance. - db, err := sql.Open("postgres", "postgres://mattes:secret@localhost:5432/database?sslmode=disable") - if err != nil { - log.Fatal(err) - } - defer db.Close() - - // Create driver instance from db. - // Check each driver if it supports the WithInstance function. - // `import "github.com/mattes/migrate/database/postgres"` - instance, err := dStub.WithInstance(db, &dStub.Config{}) - if err != nil { - log.Fatal(err) - } - - // Read migrations from /home/mattes/migrations and connect to a local postgres database. - m, err := NewWithDatabaseInstance("file:///home/mattes/migrations", "postgres", instance) - if err != nil { - log.Fatal(err) - } - - // Migrate all the way up ... - if err := m.Up(); err != nil { - log.Fatal(err) - } -} - -func TestNewWithSourceInstance(t *testing.T) { - dummySource := &DummyInstance{"source"} - sInst, err := sStub.WithInstance(dummySource, &sStub.Config{}) - if err != nil { - t.Fatal(err) - } - - m, err := NewWithSourceInstance("stub", sInst, "stub://") - if err != nil { - t.Fatal(err) - } - - if m.sourceName != "stub" { - t.Errorf("expected stub, got %v", m.sourceName) - } - if m.sourceDrv == nil { - t.Error("expected sourceDrv not to be nil") - } - - if m.databaseName != "stub" { - t.Errorf("expected stub, got %v", m.databaseName) - } - if m.databaseDrv == nil { - t.Error("expected databaseDrv not to be nil") - } -} - -func ExampleNewWithSourceInstance() { - di := &DummyInstance{"think any client required for a source here"} - - // Create driver instance from DummyInstance di. - // Check each driver if it support the WithInstance function. - // `import "github.com/mattes/migrate/source/stub"` - instance, err := sStub.WithInstance(di, &sStub.Config{}) - if err != nil { - log.Fatal(err) - } - - // Read migrations from Stub and connect to a local postgres database. - m, err := NewWithSourceInstance("stub", instance, "postgres://mattes:secret@localhost:5432/database?sslmode=disable") - if err != nil { - log.Fatal(err) - } - - // Migrate all the way up ... - if err := m.Up(); err != nil { - log.Fatal(err) - } -} - -func TestNewWithInstance(t *testing.T) { - dummyDb := &DummyInstance{"database"} - dbInst, err := dStub.WithInstance(dummyDb, &dStub.Config{}) - if err != nil { - t.Fatal(err) - } - - dummySource := &DummyInstance{"source"} - sInst, err := sStub.WithInstance(dummySource, &sStub.Config{}) - if err != nil { - t.Fatal(err) - } - - m, err := NewWithInstance("stub", sInst, "stub", dbInst) - if err != nil { - t.Fatal(err) - } - - if m.sourceName != "stub" { - t.Errorf("expected stub, got %v", m.sourceName) - } - if m.sourceDrv == nil { - t.Error("expected sourceDrv not to be nil") - } - - if m.databaseName != "stub" { - t.Errorf("expected stub, got %v", m.databaseName) - } - if m.databaseDrv == nil { - t.Error("expected databaseDrv not to be nil") - } -} - -func ExampleNewWithInstance() { - // See NewWithDatabaseInstance and NewWithSourceInstance for an example. -} - -func TestClose(t *testing.T) { - m, _ := New("stub://", "stub://") - sourceErr, databaseErr := m.Close() - if sourceErr != nil { - t.Error(sourceErr) - } - if databaseErr != nil { - t.Error(databaseErr) - } -} - -func TestMigrate(t *testing.T) { - m, _ := New("stub://", "stub://") - m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations - dbDrv := m.databaseDrv.(*dStub.Stub) - seq := newMigSeq() - - tt := []struct { - version uint - expectErr error - expectVersion uint - expectSeq migrationSequence - }{ - // migrate all the way Up in single steps - {version: 0, expectErr: os.ErrNotExist}, - {version: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(1))}, - {version: 2, expectErr: os.ErrNotExist}, - {version: 3, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(3))}, - {version: 4, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(4))}, - {version: 5, expectErr: nil, expectVersion: 5, expectSeq: seq.add()}, // 5 has no up migration - {version: 6, expectErr: os.ErrNotExist}, - {version: 7, expectErr: nil, expectVersion: 7, expectSeq: seq.add(M(7))}, - {version: 8, expectErr: os.ErrNotExist}, - - // migrate all the way Down in single steps - {version: 6, expectErr: os.ErrNotExist}, - {version: 5, expectErr: nil, expectVersion: 5, expectSeq: seq.add(M(7, 5))}, - {version: 4, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(5, 4))}, - {version: 3, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(4, 3))}, - {version: 2, expectErr: os.ErrNotExist}, - {version: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add()}, // 3 has no down migration - {version: 0, expectErr: os.ErrNotExist}, - - // migrate all the way Up in one step - {version: 7, expectErr: nil, expectVersion: 7, expectSeq: seq.add(M(3), M(4), M(7))}, - - // migrate all the way Down in one step - {version: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1))}, - - // can't migrate the same version twice - {version: 1, expectErr: ErrNoChange}, - } - - for i, v := range tt { - err := m.Migrate(v.version) - if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || - (v.expectErr != os.ErrNotExist && err != v.expectErr) { - t.Errorf("expected err %v, got %v, in %v", v.expectErr, err, i) - - } else if err == nil { - version, _, err := m.Version() - if err != nil { - t.Error(err) - } - if version != v.expectVersion { - t.Errorf("expected version %v, got %v, in %v", v.expectVersion, version, i) - } - equalDbSeq(t, i, v.expectSeq, dbDrv) - } - } -} - -func TestMigrateDirty(t *testing.T) { - m, _ := New("stub://", "stub://") - dbDrv := m.databaseDrv.(*dStub.Stub) - if err := dbDrv.SetVersion(0, true); err != nil { - t.Fatal(err) - } - - err := m.Migrate(1) - if _, ok := err.(ErrDirty); !ok { - t.Fatalf("expected ErrDirty, got %v", err) - } -} - -func TestSteps(t *testing.T) { - m, _ := New("stub://", "stub://") - m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations - dbDrv := m.databaseDrv.(*dStub.Stub) - seq := newMigSeq() - - tt := []struct { - n int - expectErr error - expectVersion int - expectSeq migrationSequence - }{ - // step must be != 0 - {n: 0, expectErr: ErrNoChange}, - - // can't go Down if ErrNilVersion - {n: -1, expectErr: os.ErrNotExist}, - - // migrate all the way Up - {n: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(1))}, - {n: 1, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(3))}, - {n: 1, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(4))}, - {n: 1, expectErr: nil, expectVersion: 5, expectSeq: seq.add()}, - {n: 1, expectErr: nil, expectVersion: 7, expectSeq: seq.add(M(7))}, - {n: 1, expectErr: os.ErrNotExist}, - - // migrate all the way Down - {n: -1, expectErr: nil, expectVersion: 5, expectSeq: seq.add(M(7, 5))}, - {n: -1, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(5, 4))}, - {n: -1, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(4, 3))}, - {n: -1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(3, 1))}, - {n: -1, expectErr: nil, expectVersion: -1, expectSeq: seq.add(M(1, -1))}, - - // migrate Up in bigger step - {n: 4, expectErr: nil, expectVersion: 5, expectSeq: seq.add(M(1), M(3), M(4), M(5))}, - - // apply one migration, then reaches out of boundary - {n: 2, expectErr: ErrShortLimit{1}, expectVersion: 7, expectSeq: seq.add(M(7))}, - - // migrate Down in bigger step - {n: -4, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1))}, - - // apply one migration, then reaches out of boundary - {n: -2, expectErr: ErrShortLimit{1}, expectVersion: -1, expectSeq: seq.add(M(1, -1))}, - } - - for i, v := range tt { - err := m.Steps(v.n) - if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || - (v.expectErr != os.ErrNotExist && err != v.expectErr) { - t.Errorf("expected err %v, got %v, in %v", v.expectErr, err, i) - - } else if err == nil { - version, _, err := m.Version() - if err != ErrNilVersion && err != nil { - t.Error(err) - } - if v.expectVersion == -1 && err != ErrNilVersion { - t.Errorf("expected ErrNilVersion, got %v, in %v", version, i) - - } else if v.expectVersion >= 0 && version != uint(v.expectVersion) { - t.Errorf("expected version %v, got %v, in %v", v.expectVersion, version, i) - } - equalDbSeq(t, i, v.expectSeq, dbDrv) - } - } -} - -func TestStepsDirty(t *testing.T) { - m, _ := New("stub://", "stub://") - dbDrv := m.databaseDrv.(*dStub.Stub) - if err := dbDrv.SetVersion(0, true); err != nil { - t.Fatal(err) - } - - err := m.Steps(1) - if _, ok := err.(ErrDirty); !ok { - t.Fatalf("expected ErrDirty, got %v", err) - } -} - -func TestUpAndDown(t *testing.T) { - m, _ := New("stub://", "stub://") - m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations - dbDrv := m.databaseDrv.(*dStub.Stub) - seq := newMigSeq() - - // go Up first - if err := m.Up(); err != nil { - t.Fatal(err) - } - equalDbSeq(t, 0, seq.add(M(1), M(3), M(4), M(5), M(7)), dbDrv) - - // go Down - if err := m.Down(); err != nil { - t.Fatal(err) - } - equalDbSeq(t, 1, seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1)), dbDrv) - - // go 1 Up and then all the way Up - if err := m.Steps(1); err != nil { - t.Fatal(err) - } - if err := m.Up(); err != nil { - t.Fatal(err) - } - equalDbSeq(t, 2, seq.add(M(1), M(3), M(4), M(5), M(7)), dbDrv) - - // go 1 Down and then all the way Down - if err := m.Steps(-1); err != nil { - t.Fatal(err) - } - if err := m.Down(); err != nil { - t.Fatal(err) - } - equalDbSeq(t, 0, seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1)), dbDrv) -} - -func TestUpDirty(t *testing.T) { - m, _ := New("stub://", "stub://") - dbDrv := m.databaseDrv.(*dStub.Stub) - if err := dbDrv.SetVersion(0, true); err != nil { - t.Fatal(err) - } - - err := m.Up() - if _, ok := err.(ErrDirty); !ok { - t.Fatalf("expected ErrDirty, got %v", err) - } -} - -func TestDownDirty(t *testing.T) { - m, _ := New("stub://", "stub://") - dbDrv := m.databaseDrv.(*dStub.Stub) - if err := dbDrv.SetVersion(0, true); err != nil { - t.Fatal(err) - } - - err := m.Down() - if _, ok := err.(ErrDirty); !ok { - t.Fatalf("expected ErrDirty, got %v", err) - } -} - -func TestDrop(t *testing.T) { - m, _ := New("stub://", "stub://") - m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations - dbDrv := m.databaseDrv.(*dStub.Stub) - - if err := m.Drop(); err != nil { - t.Fatal(err) - } - - if dbDrv.MigrationSequence[len(dbDrv.MigrationSequence)-1] != dStub.DROP { - t.Fatalf("expected database to DROP, got sequence %v", dbDrv.MigrationSequence) - } -} - -func TestVersion(t *testing.T) { - m, _ := New("stub://", "stub://") - dbDrv := m.databaseDrv.(*dStub.Stub) - - _, _, err := m.Version() - if err != ErrNilVersion { - t.Fatalf("expected ErrNilVersion, got %v", err) - } - - if err := dbDrv.Run(bytes.NewBufferString("1_up")); err != nil { - t.Fatal(err) - } - - if err := dbDrv.SetVersion(1, false); err != nil { - t.Fatal(err) - } - - v, _, err := m.Version() - if err != nil { - t.Fatal(err) - } - - if v != 1 { - t.Fatalf("expected version 1, got %v", v) - } -} - -func TestRun(t *testing.T) { - m, _ := New("stub://", "stub://") - - mx, err := NewMigration(nil, "", 1, 2) - if err != nil { - t.Fatal(err) - } - - if err := m.Run(mx); err != nil { - t.Fatal(err) - } - - v, _, err := m.Version() - if err != nil { - t.Fatal(err) - } - - if v != 2 { - t.Errorf("expected version 2, got %v", v) - } -} - -func TestRunDirty(t *testing.T) { - m, _ := New("stub://", "stub://") - dbDrv := m.databaseDrv.(*dStub.Stub) - if err := dbDrv.SetVersion(0, true); err != nil { - t.Fatal(err) - } - - migr, err := NewMigration(nil, "", 1, 2) - if err != nil { - t.Fatal(err) - } - - err = m.Run(migr) - if _, ok := err.(ErrDirty); !ok { - t.Fatalf("expected ErrDirty, got %v", err) - } -} - -func TestForce(t *testing.T) { - m, _ := New("stub://", "stub://") - m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations - - if err := m.Force(7); err != nil { - t.Fatal(err) - } - - v, dirty, err := m.Version() - if err != nil { - t.Fatal(err) - } - if dirty { - t.Errorf("expected dirty to be false") - } - if v != 7 { - t.Errorf("expected version to be 7") - } -} - -func TestForceDirty(t *testing.T) { - m, _ := New("stub://", "stub://") - dbDrv := m.databaseDrv.(*dStub.Stub) - if err := dbDrv.SetVersion(0, true); err != nil { - t.Fatal(err) - } - - if err := m.Force(1); err != nil { - t.Fatal(err) - } -} - -func TestRead(t *testing.T) { - m, _ := New("stub://", "stub://") - m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations - - tt := []struct { - from int - to int - expectErr error - expectMigrations migrationSequence - }{ - {from: -1, to: -1, expectErr: ErrNoChange}, - {from: -1, to: 0, expectErr: os.ErrNotExist}, - {from: -1, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(1))}, - {from: -1, to: 2, expectErr: os.ErrNotExist}, - {from: -1, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3))}, - {from: -1, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4))}, - {from: -1, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4), M(5))}, - {from: -1, to: 6, expectErr: os.ErrNotExist}, - {from: -1, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4), M(5), M(7))}, - {from: -1, to: 8, expectErr: os.ErrNotExist}, - - {from: 0, to: -1, expectErr: os.ErrNotExist}, - {from: 0, to: 0, expectErr: os.ErrNotExist}, - {from: 0, to: 1, expectErr: os.ErrNotExist}, - {from: 0, to: 2, expectErr: os.ErrNotExist}, - {from: 0, to: 3, expectErr: os.ErrNotExist}, - {from: 0, to: 4, expectErr: os.ErrNotExist}, - {from: 0, to: 5, expectErr: os.ErrNotExist}, - {from: 0, to: 6, expectErr: os.ErrNotExist}, - {from: 0, to: 7, expectErr: os.ErrNotExist}, - {from: 0, to: 8, expectErr: os.ErrNotExist}, - - {from: 1, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(1, -1))}, - {from: 1, to: 0, expectErr: os.ErrNotExist}, - {from: 1, to: 1, expectErr: ErrNoChange}, - {from: 1, to: 2, expectErr: os.ErrNotExist}, - {from: 1, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(3))}, - {from: 1, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4))}, - {from: 1, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4), M(5))}, - {from: 1, to: 6, expectErr: os.ErrNotExist}, - {from: 1, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4), M(5), M(7))}, - {from: 1, to: 8, expectErr: os.ErrNotExist}, - - {from: 2, to: -1, expectErr: os.ErrNotExist}, - {from: 2, to: 0, expectErr: os.ErrNotExist}, - {from: 2, to: 1, expectErr: os.ErrNotExist}, - {from: 2, to: 2, expectErr: os.ErrNotExist}, - {from: 2, to: 3, expectErr: os.ErrNotExist}, - {from: 2, to: 4, expectErr: os.ErrNotExist}, - {from: 2, to: 5, expectErr: os.ErrNotExist}, - {from: 2, to: 6, expectErr: os.ErrNotExist}, - {from: 2, to: 7, expectErr: os.ErrNotExist}, - {from: 2, to: 8, expectErr: os.ErrNotExist}, - - {from: 3, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1), M(1, -1))}, - {from: 3, to: 0, expectErr: os.ErrNotExist}, - {from: 3, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1))}, - {from: 3, to: 2, expectErr: os.ErrNotExist}, - {from: 3, to: 3, expectErr: ErrNoChange}, - {from: 3, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(4))}, - {from: 3, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5))}, - {from: 3, to: 6, expectErr: os.ErrNotExist}, - {from: 3, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5), M(7))}, - {from: 3, to: 8, expectErr: os.ErrNotExist}, - - {from: 4, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1), M(1, -1))}, - {from: 4, to: 0, expectErr: os.ErrNotExist}, - {from: 4, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1))}, - {from: 4, to: 2, expectErr: os.ErrNotExist}, - {from: 4, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(4, 3))}, - {from: 4, to: 4, expectErr: ErrNoChange}, - {from: 4, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(5))}, - {from: 4, to: 6, expectErr: os.ErrNotExist}, - {from: 4, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(5), M(7))}, - {from: 4, to: 8, expectErr: os.ErrNotExist}, - - {from: 5, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3), M(3, 1), M(1, -1))}, - {from: 5, to: 0, expectErr: os.ErrNotExist}, - {from: 5, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3), M(3, 1))}, - {from: 5, to: 2, expectErr: os.ErrNotExist}, - {from: 5, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3))}, - {from: 5, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(5, 4))}, - {from: 5, to: 5, expectErr: ErrNoChange}, - {from: 5, to: 6, expectErr: os.ErrNotExist}, - {from: 5, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(7))}, - {from: 5, to: 8, expectErr: os.ErrNotExist}, - - {from: 6, to: -1, expectErr: os.ErrNotExist}, - {from: 6, to: 0, expectErr: os.ErrNotExist}, - {from: 6, to: 1, expectErr: os.ErrNotExist}, - {from: 6, to: 2, expectErr: os.ErrNotExist}, - {from: 6, to: 3, expectErr: os.ErrNotExist}, - {from: 6, to: 4, expectErr: os.ErrNotExist}, - {from: 6, to: 5, expectErr: os.ErrNotExist}, - {from: 6, to: 6, expectErr: os.ErrNotExist}, - {from: 6, to: 7, expectErr: os.ErrNotExist}, - {from: 6, to: 8, expectErr: os.ErrNotExist}, - - {from: 7, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1))}, - {from: 7, to: 0, expectErr: os.ErrNotExist}, - {from: 7, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3), M(3, 1))}, - {from: 7, to: 2, expectErr: os.ErrNotExist}, - {from: 7, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3))}, - {from: 7, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4))}, - {from: 7, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(7, 5))}, - {from: 7, to: 6, expectErr: os.ErrNotExist}, - {from: 7, to: 7, expectErr: ErrNoChange}, - {from: 7, to: 8, expectErr: os.ErrNotExist}, - - {from: 8, to: -1, expectErr: os.ErrNotExist}, - {from: 8, to: 0, expectErr: os.ErrNotExist}, - {from: 8, to: 1, expectErr: os.ErrNotExist}, - {from: 8, to: 2, expectErr: os.ErrNotExist}, - {from: 8, to: 3, expectErr: os.ErrNotExist}, - {from: 8, to: 4, expectErr: os.ErrNotExist}, - {from: 8, to: 5, expectErr: os.ErrNotExist}, - {from: 8, to: 6, expectErr: os.ErrNotExist}, - {from: 8, to: 7, expectErr: os.ErrNotExist}, - {from: 8, to: 8, expectErr: os.ErrNotExist}, - } - - for i, v := range tt { - ret := make(chan interface{}) - go m.read(v.from, v.to, ret) - migrations, err := migrationsFromChannel(ret) - - if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || - (v.expectErr != os.ErrNotExist && v.expectErr != err) { - t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) - t.Logf("%v, in %v", migrations, i) - } - if len(v.expectMigrations) > 0 { - equalMigSeq(t, i, v.expectMigrations, migrations) - } - } -} - -func TestReadUp(t *testing.T) { - m, _ := New("stub://", "stub://") - m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations - - tt := []struct { - from int - limit int // -1 means no limit - expectErr error - expectMigrations migrationSequence - }{ - {from: -1, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4), M(5), M(7))}, - {from: -1, limit: 0, expectErr: ErrNoChange}, - {from: -1, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(1))}, - {from: -1, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3))}, - - {from: 0, limit: -1, expectErr: os.ErrNotExist}, - {from: 0, limit: 0, expectErr: os.ErrNotExist}, - {from: 0, limit: 1, expectErr: os.ErrNotExist}, - {from: 0, limit: 2, expectErr: os.ErrNotExist}, - - {from: 1, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4), M(5), M(7))}, - {from: 1, limit: 0, expectErr: ErrNoChange}, - {from: 1, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(3))}, - {from: 1, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4))}, - - {from: 2, limit: -1, expectErr: os.ErrNotExist}, - {from: 2, limit: 0, expectErr: os.ErrNotExist}, - {from: 2, limit: 1, expectErr: os.ErrNotExist}, - {from: 2, limit: 2, expectErr: os.ErrNotExist}, - - {from: 3, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5), M(7))}, - {from: 3, limit: 0, expectErr: ErrNoChange}, - {from: 3, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(4))}, - {from: 3, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5))}, - - {from: 4, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(5), M(7))}, - {from: 4, limit: 0, expectErr: ErrNoChange}, - {from: 4, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(5))}, - {from: 4, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(5), M(7))}, - - {from: 5, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(7))}, - {from: 5, limit: 0, expectErr: ErrNoChange}, - {from: 5, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(7))}, - {from: 5, limit: 2, expectErr: ErrShortLimit{1}, expectMigrations: newMigSeq(M(7))}, - - {from: 6, limit: -1, expectErr: os.ErrNotExist}, - {from: 6, limit: 0, expectErr: os.ErrNotExist}, - {from: 6, limit: 1, expectErr: os.ErrNotExist}, - {from: 6, limit: 2, expectErr: os.ErrNotExist}, - - {from: 7, limit: -1, expectErr: ErrNoChange}, - {from: 7, limit: 0, expectErr: ErrNoChange}, - {from: 7, limit: 1, expectErr: os.ErrNotExist}, - {from: 7, limit: 2, expectErr: os.ErrNotExist}, - - {from: 8, limit: -1, expectErr: os.ErrNotExist}, - {from: 8, limit: 0, expectErr: os.ErrNotExist}, - {from: 8, limit: 1, expectErr: os.ErrNotExist}, - {from: 8, limit: 2, expectErr: os.ErrNotExist}, - } - - for i, v := range tt { - ret := make(chan interface{}) - go m.readUp(v.from, v.limit, ret) - migrations, err := migrationsFromChannel(ret) - - if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || - (v.expectErr != os.ErrNotExist && v.expectErr != err) { - t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) - t.Logf("%v, in %v", migrations, i) - } - if len(v.expectMigrations) > 0 { - equalMigSeq(t, i, v.expectMigrations, migrations) - } - } -} - -func TestReadDown(t *testing.T) { - m, _ := New("stub://", "stub://") - m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations - - tt := []struct { - from int - limit int // -1 means no limit - expectErr error - expectMigrations migrationSequence - }{ - {from: -1, limit: -1, expectErr: ErrNoChange}, - {from: -1, limit: 0, expectErr: ErrNoChange}, - {from: -1, limit: 1, expectErr: os.ErrNotExist}, - {from: -1, limit: 2, expectErr: os.ErrNotExist}, - - {from: 0, limit: -1, expectErr: os.ErrNotExist}, - {from: 0, limit: 0, expectErr: os.ErrNotExist}, - {from: 0, limit: 1, expectErr: os.ErrNotExist}, - {from: 0, limit: 2, expectErr: os.ErrNotExist}, - - {from: 1, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(1, -1))}, - {from: 1, limit: 0, expectErr: ErrNoChange}, - {from: 1, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(1, -1))}, - {from: 1, limit: 2, expectErr: ErrShortLimit{1}, expectMigrations: newMigSeq(M(1, -1))}, - - {from: 2, limit: -1, expectErr: os.ErrNotExist}, - {from: 2, limit: 0, expectErr: os.ErrNotExist}, - {from: 2, limit: 1, expectErr: os.ErrNotExist}, - {from: 2, limit: 2, expectErr: os.ErrNotExist}, - - {from: 3, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1), M(1, -1))}, - {from: 3, limit: 0, expectErr: ErrNoChange}, - {from: 3, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1))}, - {from: 3, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(3, 1), M(1, -1))}, - - {from: 4, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1), M(1, -1))}, - {from: 4, limit: 0, expectErr: ErrNoChange}, - {from: 4, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3))}, - {from: 4, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1))}, - - {from: 5, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3), M(3, 1), M(1, -1))}, - {from: 5, limit: 0, expectErr: ErrNoChange}, - {from: 5, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4))}, - {from: 5, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3))}, - - {from: 6, limit: -1, expectErr: os.ErrNotExist}, - {from: 6, limit: 0, expectErr: os.ErrNotExist}, - {from: 6, limit: 1, expectErr: os.ErrNotExist}, - {from: 6, limit: 2, expectErr: os.ErrNotExist}, - - {from: 7, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1))}, - {from: 7, limit: 0, expectErr: ErrNoChange}, - {from: 7, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5))}, - {from: 7, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4))}, - - {from: 8, limit: -1, expectErr: os.ErrNotExist}, - {from: 8, limit: 0, expectErr: os.ErrNotExist}, - {from: 8, limit: 1, expectErr: os.ErrNotExist}, - {from: 8, limit: 2, expectErr: os.ErrNotExist}, - } - - for i, v := range tt { - ret := make(chan interface{}) - go m.readDown(v.from, v.limit, ret) - migrations, err := migrationsFromChannel(ret) - - if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || - (v.expectErr != os.ErrNotExist && v.expectErr != err) { - t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) - t.Logf("%v, in %v", migrations, i) - } - if len(v.expectMigrations) > 0 { - equalMigSeq(t, i, v.expectMigrations, migrations) - } - } -} - -func TestLock(t *testing.T) { - m, _ := New("stub://", "stub://") - if err := m.lock(); err != nil { - t.Fatal(err) - } - - if err := m.lock(); err == nil { - t.Fatal("should be locked already") - } -} - -func migrationsFromChannel(ret chan interface{}) ([]*Migration, error) { - slice := make([]*Migration, 0) - for r := range ret { - switch r.(type) { - case error: - return slice, r.(error) - - case *Migration: - slice = append(slice, r.(*Migration)) - } - } - return slice, nil -} - -type migrationSequence []*Migration - -func newMigSeq(migr ...*Migration) migrationSequence { - return migr -} - -func (m *migrationSequence) add(migr ...*Migration) migrationSequence { - *m = append(*m, migr...) - return *m -} - -func (m *migrationSequence) bodySequence() []string { - r := make([]string, 0) - for _, v := range *m { - if v.Body != nil { - body, err := ioutil.ReadAll(v.Body) - if err != nil { - panic(err) // that should never happen - } - - // reset body reader - // TODO: is there a better/nicer way? - v.Body = ioutil.NopCloser(bytes.NewReader(body)) - - r = append(r, string(body[:])) - } - } - return r -} - -// M is a convenience func to create a new *Migration -func M(version uint, targetVersion ...int) *Migration { - if len(targetVersion) > 1 { - panic("only one targetVersion allowed") - } - ts := int(version) - if len(targetVersion) == 1 { - ts = targetVersion[0] - } - - m, _ := New("stub://", "stub://") - m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations - migr, err := m.newMigration(version, ts) - if err != nil { - panic(err) - } - return migr -} - -func equalMigSeq(t *testing.T, i int, expected, got migrationSequence) { - if len(expected) != len(got) { - t.Errorf("expected migrations %v, got %v, in %v", expected, got, i) - - } else { - for ii := 0; ii < len(expected); ii++ { - if expected[ii].Version != got[ii].Version { - t.Errorf("expected version %v, got %v, in %v", expected[ii].Version, got[ii].Version, i) - } - - if expected[ii].TargetVersion != got[ii].TargetVersion { - t.Errorf("expected targetVersion %v, got %v, in %v", expected[ii].TargetVersion, got[ii].TargetVersion, i) - } - } - } -} - -func equalDbSeq(t *testing.T, i int, expected migrationSequence, got *dStub.Stub) { - bs := expected.bodySequence() - if !got.EqualSequence(bs) { - t.Fatalf("\nexpected sequence %v,\ngot %v, in %v", bs, got.MigrationSequence, i) - } -} diff --git a/vendor/github.com/mattes/migrate/migration.go b/vendor/github.com/mattes/migrate/migration.go deleted file mode 100644 index 069e7f038..000000000 --- a/vendor/github.com/mattes/migrate/migration.go +++ /dev/null @@ -1,154 +0,0 @@ -package migrate - -import ( - "bufio" - "fmt" - "io" - "time" -) - -// DefaultBufferSize sets the in memory buffer size (in Bytes) for every -// pre-read migration (see DefaultPrefetchMigrations). -var DefaultBufferSize = uint(100000) - -// Migration holds information about a migration. -// It is initially created from data coming from the source and then -// used when run against the database. -type Migration struct { - // Identifier can be any string to help identifying - // the migration in the source. - Identifier string - - // Version is the version of this migration. - Version uint - - // TargetVersion is the migration version after this migration - // has been applied to the database. - // Can be -1, implying that this is a NilVersion. - TargetVersion int - - // Body holds an io.ReadCloser to the source. - Body io.ReadCloser - - // BufferedBody holds an buffered io.Reader to the underlying Body. - BufferedBody io.Reader - - // BufferSize defaults to DefaultBufferSize - BufferSize uint - - // bufferWriter holds an io.WriteCloser and pipes to BufferBody. - // It's an *Closer for flow control. - bufferWriter io.WriteCloser - - // Scheduled is the time when the migration was scheduled/ queued. - Scheduled time.Time - - // StartedBuffering is the time when buffering of the migration source started. - StartedBuffering time.Time - - // FinishedBuffering is the time when buffering of the migration source finished. - FinishedBuffering time.Time - - // FinishedReading is the time when the migration source is fully read. - FinishedReading time.Time - - // BytesRead holds the number of Bytes read from the migration source. - BytesRead int64 -} - -// NewMigration returns a new Migration and sets the body, identifier, -// version and targetVersion. Body can be nil, which turns this migration -// into a "NilMigration". If no identifier is provided, it will default to "". -// targetVersion can be -1, implying it is a NilVersion. -// -// What is a NilMigration? -// Usually each migration version coming from source is expected to have an -// Up and Down migration. This is not a hard requirement though, leading to -// a situation where only the Up or Down migration is present. So let's say -// the user wants to migrate up to a version that doesn't have the actual Up -// migration, in that case we still want to apply the version, but with an empty -// body. We are calling that a NilMigration, a migration with an empty body. -// -// What is a NilVersion? -// NilVersion is a const(-1). When running down migrations and we are at the -// last down migration, there is no next down migration, the targetVersion should -// be nil. Nil in this case is represented by -1 (because type int). -func NewMigration(body io.ReadCloser, identifier string, - version uint, targetVersion int) (*Migration, error) { - tnow := time.Now() - m := &Migration{ - Identifier: identifier, - Version: version, - TargetVersion: targetVersion, - Scheduled: tnow, - } - - if body == nil { - if len(identifier) == 0 { - m.Identifier = "" - } - - m.StartedBuffering = tnow - m.FinishedBuffering = tnow - m.FinishedReading = tnow - return m, nil - } - - br, bw := io.Pipe() - m.Body = body // want to simulate low latency? newSlowReader(body) - m.BufferSize = DefaultBufferSize - m.BufferedBody = br - m.bufferWriter = bw - return m, nil -} - -// String implements string.Stringer and is used in tests. -func (m *Migration) String() string { - return fmt.Sprintf("%v [%v=>%v]", m.Identifier, m.Version, m.TargetVersion) -} - -// LogString returns a string describing this migration to humans. -func (m *Migration) LogString() string { - directionStr := "u" - if m.TargetVersion < int(m.Version) { - directionStr = "d" - } - return fmt.Sprintf("%v/%v %v", m.Version, directionStr, m.Identifier) -} - -// Buffer buffers Body up to BufferSize. -// Calling this function blocks. Call with goroutine. -func (m *Migration) Buffer() error { - if m.Body == nil { - return nil - } - - m.StartedBuffering = time.Now() - - b := bufio.NewReaderSize(m.Body, int(m.BufferSize)) - - // start reading from body, peek won't move the read pointer though - // poor man's solution? - b.Peek(int(m.BufferSize)) - - m.FinishedBuffering = time.Now() - - // write to bufferWriter, this will block until - // something starts reading from m.Buffer - n, err := b.WriteTo(m.bufferWriter) - if err != nil { - return err - } - - m.FinishedReading = time.Now() - m.BytesRead = n - - // close bufferWriter so Buffer knows that there is no - // more data coming - m.bufferWriter.Close() - - // it's safe to close the Body too - m.Body.Close() - - return nil -} diff --git a/vendor/github.com/mattes/migrate/migration_test.go b/vendor/github.com/mattes/migrate/migration_test.go deleted file mode 100644 index b6589f938..000000000 --- a/vendor/github.com/mattes/migrate/migration_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package migrate - -import ( - "fmt" - "io/ioutil" - "log" - "strings" -) - -func ExampleNewMigration() { - // Create a dummy migration body, this is coming from the source usually. - body := ioutil.NopCloser(strings.NewReader("dumy migration that creates users table")) - - // Create a new Migration that represents version 1486686016. - // Once this migration has been applied to the database, the new - // migration version will be 1486689359. - migr, err := NewMigration(body, "create_users_table", 1486686016, 1486689359) - if err != nil { - log.Fatal(err) - } - - fmt.Print(migr.LogString()) - // Output: - // 1486686016/u create_users_table -} - -func ExampleNewMigration_nilMigration() { - // Create a new Migration that represents a NilMigration. - // Once this migration has been applied to the database, the new - // migration version will be 1486689359. - migr, err := NewMigration(nil, "", 1486686016, 1486689359) - if err != nil { - log.Fatal(err) - } - - fmt.Print(migr.LogString()) - // Output: - // 1486686016/u -} - -func ExampleNewMigration_nilVersion() { - // Create a dummy migration body, this is coming from the source usually. - body := ioutil.NopCloser(strings.NewReader("dumy migration that deletes users table")) - - // Create a new Migration that represents version 1486686016. - // This is the last available down migration, so the migration version - // will be -1, meaning NilVersion once this migration ran. - migr, err := NewMigration(body, "drop_users_table", 1486686016, -1) - if err != nil { - log.Fatal(err) - } - - fmt.Print(migr.LogString()) - // Output: - // 1486686016/d drop_users_table -} diff --git a/vendor/github.com/mattes/migrate/source/aws-s3/README.md b/vendor/github.com/mattes/migrate/source/aws-s3/README.md deleted file mode 100644 index 3a59cfec9..000000000 --- a/vendor/github.com/mattes/migrate/source/aws-s3/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# aws-s3 - -`s3:///` diff --git a/vendor/github.com/mattes/migrate/source/aws-s3/s3.go b/vendor/github.com/mattes/migrate/source/aws-s3/s3.go deleted file mode 100644 index 8b581402c..000000000 --- a/vendor/github.com/mattes/migrate/source/aws-s3/s3.go +++ /dev/null @@ -1,125 +0,0 @@ -package awss3 - -import ( - "fmt" - "io" - "net/url" - "os" - "path" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3iface" - "github.com/mattes/migrate/source" -) - -func init() { - source.Register("s3", &s3Driver{}) -} - -type s3Driver struct { - s3client s3iface.S3API - bucket string - prefix string - migrations *source.Migrations -} - -func (s *s3Driver) Open(folder string) (source.Driver, error) { - u, err := url.Parse(folder) - if err != nil { - return nil, err - } - sess, err := session.NewSession() - if err != nil { - return nil, err - } - driver := s3Driver{ - bucket: u.Host, - prefix: strings.Trim(u.Path, "/") + "/", - s3client: s3.New(sess), - migrations: source.NewMigrations(), - } - err = driver.loadMigrations() - if err != nil { - return nil, err - } - return &driver, nil -} - -func (s *s3Driver) loadMigrations() error { - output, err := s.s3client.ListObjects(&s3.ListObjectsInput{ - Bucket: aws.String(s.bucket), - Prefix: aws.String(s.prefix), - Delimiter: aws.String("/"), - }) - if err != nil { - return err - } - for _, object := range output.Contents { - _, fileName := path.Split(aws.StringValue(object.Key)) - m, err := source.DefaultParse(fileName) - if err != nil { - continue - } - if !s.migrations.Append(m) { - return fmt.Errorf("unable to parse file %v", aws.StringValue(object.Key)) - } - } - return nil -} - -func (s *s3Driver) Close() error { - return nil -} - -func (s *s3Driver) First() (uint, error) { - v, ok := s.migrations.First() - if !ok { - return 0, os.ErrNotExist - } - return v, nil -} - -func (s *s3Driver) Prev(version uint) (uint, error) { - v, ok := s.migrations.Prev(version) - if !ok { - return 0, os.ErrNotExist - } - return v, nil -} - -func (s *s3Driver) Next(version uint) (uint, error) { - v, ok := s.migrations.Next(version) - if !ok { - return 0, os.ErrNotExist - } - return v, nil -} - -func (s *s3Driver) ReadUp(version uint) (io.ReadCloser, string, error) { - if m, ok := s.migrations.Up(version); ok { - return s.open(m) - } - return nil, "", os.ErrNotExist -} - -func (s *s3Driver) ReadDown(version uint) (io.ReadCloser, string, error) { - if m, ok := s.migrations.Down(version); ok { - return s.open(m) - } - return nil, "", os.ErrNotExist -} - -func (s *s3Driver) open(m *source.Migration) (io.ReadCloser, string, error) { - key := path.Join(s.prefix, m.Raw) - object, err := s.s3client.GetObject(&s3.GetObjectInput{ - Bucket: aws.String(s.bucket), - Key: aws.String(key), - }) - if err != nil { - return nil, "", err - } - return object.Body, m.Identifier, nil -} diff --git a/vendor/github.com/mattes/migrate/source/aws-s3/s3_test.go b/vendor/github.com/mattes/migrate/source/aws-s3/s3_test.go deleted file mode 100644 index f07d7ff2c..000000000 --- a/vendor/github.com/mattes/migrate/source/aws-s3/s3_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package awss3 - -import ( - "errors" - "io/ioutil" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/mattes/migrate/source" - st "github.com/mattes/migrate/source/testing" -) - -func Test(t *testing.T) { - s3Client := fakeS3{ - bucket: "some-bucket", - objects: map[string]string{ - "staging/migrations/1_foobar.up.sql": "1 up", - "staging/migrations/1_foobar.down.sql": "1 down", - "prod/migrations/1_foobar.up.sql": "1 up", - "prod/migrations/1_foobar.down.sql": "1 down", - "prod/migrations/3_foobar.up.sql": "3 up", - "prod/migrations/4_foobar.up.sql": "4 up", - "prod/migrations/4_foobar.down.sql": "4 down", - "prod/migrations/5_foobar.down.sql": "5 down", - "prod/migrations/7_foobar.up.sql": "7 up", - "prod/migrations/7_foobar.down.sql": "7 down", - "prod/migrations/not-a-migration.txt": "", - "prod/migrations/0-random-stuff/whatever.txt": "", - }, - } - driver := s3Driver{ - bucket: "some-bucket", - prefix: "prod/migrations/", - migrations: source.NewMigrations(), - s3client: &s3Client, - } - err := driver.loadMigrations() - if err != nil { - t.Fatal(err) - } - st.Test(t, &driver) -} - -type fakeS3 struct { - s3.S3 - bucket string - objects map[string]string -} - -func (s *fakeS3) ListObjects(input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) { - bucket := aws.StringValue(input.Bucket) - if bucket != s.bucket { - return nil, errors.New("bucket not found") - } - prefix := aws.StringValue(input.Prefix) - delimiter := aws.StringValue(input.Delimiter) - var output s3.ListObjectsOutput - for name := range s.objects { - if strings.HasPrefix(name, prefix) { - if delimiter == "" || !strings.Contains(strings.Replace(name, prefix, "", 1), delimiter) { - output.Contents = append(output.Contents, &s3.Object{ - Key: aws.String(name), - }) - } - } - } - return &output, nil -} - -func (s *fakeS3) GetObject(input *s3.GetObjectInput) (*s3.GetObjectOutput, error) { - bucket := aws.StringValue(input.Bucket) - if bucket != s.bucket { - return nil, errors.New("bucket not found") - } - if data, ok := s.objects[aws.StringValue(input.Key)]; ok { - body := ioutil.NopCloser(strings.NewReader(data)) - return &s3.GetObjectOutput{Body: body}, nil - } - return nil, errors.New("object not found") -} diff --git a/vendor/github.com/mattes/migrate/source/driver.go b/vendor/github.com/mattes/migrate/source/driver.go deleted file mode 100644 index 103138df0..000000000 --- a/vendor/github.com/mattes/migrate/source/driver.go +++ /dev/null @@ -1,107 +0,0 @@ -// Package source provides the Source interface. -// All source drivers must implement this interface, register themselves, -// optionally provide a `WithInstance` function and pass the tests -// in package source/testing. -package source - -import ( - "fmt" - "io" - nurl "net/url" - "sync" -) - -var driversMu sync.RWMutex -var drivers = make(map[string]Driver) - -// Driver is the interface every source driver must implement. -// -// How to implement a source driver? -// 1. Implement this interface. -// 2. Optionally, add a function named `WithInstance`. -// This function should accept an existing source instance and a Config{} struct -// and return a driver instance. -// 3. Add a test that calls source/testing.go:Test() -// 4. Add own tests for Open(), WithInstance() (when provided) and Close(). -// All other functions are tested by tests in source/testing. -// Saves you some time and makes sure all source drivers behave the same way. -// 5. Call Register in init(). -// -// Guidelines: -// * All configuration input must come from the URL string in func Open() -// or the Config{} struct in WithInstance. Don't os.Getenv(). -// * Drivers are supposed to be read only. -// * Ideally don't load any contents (into memory) in Open or WithInstance. -type Driver interface { - // Open returns a a new driver instance configured with parameters - // coming from the URL string. Migrate will call this function - // only once per instance. - Open(url string) (Driver, error) - - // Close closes the underlying source instance managed by the driver. - // Migrate will call this function only once per instance. - Close() error - - // First returns the very first migration version available to the driver. - // Migrate will call this function multiple times. - // If there is no version available, it must return os.ErrNotExist. - First() (version uint, err error) - - // Prev returns the previous version for a given version available to the driver. - // Migrate will call this function multiple times. - // If there is no previous version available, it must return os.ErrNotExist. - Prev(version uint) (prevVersion uint, err error) - - // Next returns the next version for a given version available to the driver. - // Migrate will call this function multiple times. - // If there is no next version available, it must return os.ErrNotExist. - Next(version uint) (nextVersion uint, err error) - - // ReadUp returns the UP migration body and an identifier that helps - // finding this migration in the source for a given version. - // If there is no up migration available for this version, - // it must return os.ErrNotExist. - // Do not start reading, just return the ReadCloser! - ReadUp(version uint) (r io.ReadCloser, identifier string, err error) - - // ReadDown returns the DOWN migration body and an identifier that helps - // finding this migration in the source for a given version. - // If there is no down migration available for this version, - // it must return os.ErrNotExist. - // Do not start reading, just return the ReadCloser! - ReadDown(version uint) (r io.ReadCloser, identifier string, err error) -} - -// Open returns a new driver instance. -func Open(url string) (Driver, error) { - u, err := nurl.Parse(url) - if err != nil { - return nil, err - } - - if u.Scheme == "" { - return nil, fmt.Errorf("source driver: invalid URL scheme") - } - - driversMu.RLock() - d, ok := drivers[u.Scheme] - driversMu.RUnlock() - if !ok { - return nil, fmt.Errorf("source driver: unknown driver %v (forgotton import?)", u.Scheme) - } - - return d.Open(url) -} - -// Register globally registers a driver. -func Register(name string, driver Driver) { - driversMu.Lock() - defer driversMu.Unlock() - if driver == nil { - panic("Register driver is nil") - } - if _, dup := drivers[name]; dup { - panic("Register called twice for driver " + name) - } - drivers[name] = driver -} diff --git a/vendor/github.com/mattes/migrate/source/driver_test.go b/vendor/github.com/mattes/migrate/source/driver_test.go deleted file mode 100644 index 82284a0b9..000000000 --- a/vendor/github.com/mattes/migrate/source/driver_test.go +++ /dev/null @@ -1,8 +0,0 @@ -package source - -func ExampleDriver() { - // see source/stub for an example - - // source/stub/stub.go has the driver implementation - // source/stub/stub_test.go runs source/testing/test.go:Test -} diff --git a/vendor/github.com/mattes/migrate/source/file/README.md b/vendor/github.com/mattes/migrate/source/file/README.md deleted file mode 100644 index 7912eff66..000000000 --- a/vendor/github.com/mattes/migrate/source/file/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# file - -`file:///absolute/path` -`file://relative/path` diff --git a/vendor/github.com/mattes/migrate/source/file/file.go b/vendor/github.com/mattes/migrate/source/file/file.go deleted file mode 100644 index b97d0aa3d..000000000 --- a/vendor/github.com/mattes/migrate/source/file/file.go +++ /dev/null @@ -1,127 +0,0 @@ -package file - -import ( - "fmt" - "io" - "io/ioutil" - nurl "net/url" - "os" - "path" - "path/filepath" - - "github.com/mattes/migrate/source" -) - -func init() { - source.Register("file", &File{}) -} - -type File struct { - url string - path string - migrations *source.Migrations -} - -func (f *File) Open(url string) (source.Driver, error) { - u, err := nurl.Parse(url) - if err != nil { - return nil, err - } - - // concat host and path to restore full path - // host might be `.` - p := u.Host + u.Path - - if len(p) == 0 { - // default to current directory if no path - wd, err := os.Getwd() - if err != nil { - return nil, err - } - p = wd - - } else if p[0:1] == "." || p[0:1] != "/" { - // make path absolute if relative - abs, err := filepath.Abs(p) - if err != nil { - return nil, err - } - p = abs - } - - // scan directory - files, err := ioutil.ReadDir(p) - if err != nil { - return nil, err - } - - nf := &File{ - url: url, - path: p, - migrations: source.NewMigrations(), - } - - for _, fi := range files { - if !fi.IsDir() { - m, err := source.DefaultParse(fi.Name()) - if err != nil { - continue // ignore files that we can't parse - } - if !nf.migrations.Append(m) { - return nil, fmt.Errorf("unable to parse file %v", fi.Name()) - } - } - } - return nf, nil -} - -func (f *File) Close() error { - // nothing do to here - return nil -} - -func (f *File) First() (version uint, err error) { - if v, ok := f.migrations.First(); !ok { - return 0, &os.PathError{"first", f.path, os.ErrNotExist} - } else { - return v, nil - } -} - -func (f *File) Prev(version uint) (prevVersion uint, err error) { - if v, ok := f.migrations.Prev(version); !ok { - return 0, &os.PathError{fmt.Sprintf("prev for version %v", version), f.path, os.ErrNotExist} - } else { - return v, nil - } -} - -func (f *File) Next(version uint) (nextVersion uint, err error) { - if v, ok := f.migrations.Next(version); !ok { - return 0, &os.PathError{fmt.Sprintf("next for version %v", version), f.path, os.ErrNotExist} - } else { - return v, nil - } -} - -func (f *File) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) { - if m, ok := f.migrations.Up(version); ok { - r, err := os.Open(path.Join(f.path, m.Raw)) - if err != nil { - return nil, "", err - } - return r, m.Identifier, nil - } - return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), f.path, os.ErrNotExist} -} - -func (f *File) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) { - if m, ok := f.migrations.Down(version); ok { - r, err := os.Open(path.Join(f.path, m.Raw)) - if err != nil { - return nil, "", err - } - return r, m.Identifier, nil - } - return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), f.path, os.ErrNotExist} -} diff --git a/vendor/github.com/mattes/migrate/source/file/file_test.go b/vendor/github.com/mattes/migrate/source/file/file_test.go deleted file mode 100644 index 310131c6f..000000000 --- a/vendor/github.com/mattes/migrate/source/file/file_test.go +++ /dev/null @@ -1,207 +0,0 @@ -package file - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "path/filepath" - "testing" - - st "github.com/mattes/migrate/source/testing" -) - -func Test(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - // write files that meet driver test requirements - mustWriteFile(t, tmpDir, "1_foobar.up.sql", "1 up") - mustWriteFile(t, tmpDir, "1_foobar.down.sql", "1 down") - - mustWriteFile(t, tmpDir, "3_foobar.up.sql", "3 up") - - mustWriteFile(t, tmpDir, "4_foobar.up.sql", "4 up") - mustWriteFile(t, tmpDir, "4_foobar.down.sql", "4 down") - - mustWriteFile(t, tmpDir, "5_foobar.down.sql", "5 down") - - mustWriteFile(t, tmpDir, "7_foobar.up.sql", "7 up") - mustWriteFile(t, tmpDir, "7_foobar.down.sql", "7 down") - - f := &File{} - d, err := f.Open("file://" + tmpDir) - if err != nil { - t.Fatal(err) - } - - st.Test(t, d) -} - -func TestOpen(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "TestOpen") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - mustWriteFile(t, tmpDir, "1_foobar.up.sql", "") - mustWriteFile(t, tmpDir, "1_foobar.down.sql", "") - - if !filepath.IsAbs(tmpDir) { - t.Fatal("expected tmpDir to be absolute path") - } - - f := &File{} - _, err = f.Open("file://" + tmpDir) // absolute path - if err != nil { - t.Fatal(err) - } -} - -func TestOpenWithRelativePath(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "TestOpen") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - wd, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - defer os.Chdir(wd) // rescue working dir after we are done - - if err := os.Chdir(tmpDir); err != nil { - t.Fatal(err) - } - - if err := os.Mkdir(filepath.Join(tmpDir, "foo"), os.ModePerm); err != nil { - t.Fatal(err) - } - - mustWriteFile(t, filepath.Join(tmpDir, "foo"), "1_foobar.up.sql", "") - - f := &File{} - - // dir: foo - d, err := f.Open("file://foo") - if err != nil { - t.Fatal(err) - } - _, err = d.First() - if err != nil { - t.Fatalf("expected first file in working dir %v for foo", tmpDir) - } - - // dir: ./foo - d, err = f.Open("file://./foo") - if err != nil { - t.Fatal(err) - } - _, err = d.First() - if err != nil { - t.Fatalf("expected first file in working dir %v for ./foo", tmpDir) - } -} - -func TestOpenDefaultsToCurrentDirectory(t *testing.T) { - wd, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - f := &File{} - d, err := f.Open("file://") - if err != nil { - t.Fatal(err) - } - - if d.(*File).path != wd { - t.Fatal("expected driver to default to current directory") - } -} - -func TestOpenWithDuplicateVersion(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "TestOpenWithDuplicateVersion") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - mustWriteFile(t, tmpDir, "1_foo.up.sql", "") // 1 up - mustWriteFile(t, tmpDir, "1_bar.up.sql", "") // 1 up - - f := &File{} - _, err = f.Open("file://" + tmpDir) - if err == nil { - t.Fatal("expected err") - } -} - -func TestClose(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "TestOpen") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - f := &File{} - d, err := f.Open("file://" + tmpDir) - if err != nil { - t.Fatal(err) - } - - if d.Close() != nil { - t.Fatal("expected nil") - } -} - -func mustWriteFile(t testing.TB, dir, file string, body string) { - if err := ioutil.WriteFile(path.Join(dir, file), []byte(body), 06444); err != nil { - t.Fatal(err) - } -} - -func mustCreateBenchmarkDir(t *testing.B) (dir string) { - tmpDir, err := ioutil.TempDir("", "Benchmark") - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 1000; i++ { - mustWriteFile(t, tmpDir, fmt.Sprintf("%v_foobar.up.sql", i), "") - mustWriteFile(t, tmpDir, fmt.Sprintf("%v_foobar.down.sql", i), "") - } - - return tmpDir -} - -func BenchmarkOpen(b *testing.B) { - dir := mustCreateBenchmarkDir(b) - defer os.RemoveAll(dir) - b.ResetTimer() - for n := 0; n < b.N; n++ { - f := &File{} - f.Open("file://" + dir) - } - b.StopTimer() -} - -func BenchmarkNext(b *testing.B) { - dir := mustCreateBenchmarkDir(b) - defer os.RemoveAll(dir) - f := &File{} - d, _ := f.Open("file://" + dir) - b.ResetTimer() - v, err := d.First() - for n := 0; n < b.N; n++ { - for !os.IsNotExist(err) { - v, err = d.Next(v) - } - } - b.StopTimer() -} diff --git a/vendor/github.com/mattes/migrate/source/github/.gitignore b/vendor/github.com/mattes/migrate/source/github/.gitignore deleted file mode 100644 index 3006ad5eb..000000000 --- a/vendor/github.com/mattes/migrate/source/github/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.github_test_secrets diff --git a/vendor/github.com/mattes/migrate/source/github/README.md b/vendor/github.com/mattes/migrate/source/github/README.md deleted file mode 100644 index 257f575c4..000000000 --- a/vendor/github.com/mattes/migrate/source/github/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# github - -`github://user:personal-access-token@owner/repo/path` - -| URL Query | WithInstance Config | Description | -|------------|---------------------|-------------| -| user | | The username of the user connecting | -| personal-access-token | | An access token from Github (https://github.com/settings/tokens) | -| owner | | the repo owner | -| repo | | the name of the repository | -| path | | path in repo to migrations | diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1085649617_create_users_table.down.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1085649617_create_users_table.down.sql deleted file mode 100644 index c99ddcdc8..000000000 --- a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1085649617_create_users_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS users; diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1085649617_create_users_table.up.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1085649617_create_users_table.up.sql deleted file mode 100644 index 92897dcab..000000000 --- a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1085649617_create_users_table.up.sql +++ /dev/null @@ -1,5 +0,0 @@ -CREATE TABLE users ( - user_id integer unique, - name varchar(40), - email varchar(40) -); diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1185749658_add_city_to_users.down.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1185749658_add_city_to_users.down.sql deleted file mode 100644 index 940c60712..000000000 --- a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1185749658_add_city_to_users.down.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE users DROP COLUMN IF EXISTS city; diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1185749658_add_city_to_users.up.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1185749658_add_city_to_users.up.sql deleted file mode 100644 index 67823edc9..000000000 --- a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1185749658_add_city_to_users.up.sql +++ /dev/null @@ -1,3 +0,0 @@ -ALTER TABLE users ADD COLUMN city varchar(100); - - diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.down.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.down.sql deleted file mode 100644 index 3e87dd229..000000000 --- a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP INDEX IF EXISTS users_email_index; diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.up.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.up.sql deleted file mode 100644 index fbeb4ab4e..000000000 --- a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.up.sql +++ /dev/null @@ -1,3 +0,0 @@ -CREATE UNIQUE INDEX CONCURRENTLY users_email_index ON users (email); - --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1385949617_create_books_table.down.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1385949617_create_books_table.down.sql deleted file mode 100644 index 1a0b1a214..000000000 --- a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1385949617_create_books_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS books; diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1385949617_create_books_table.up.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1385949617_create_books_table.up.sql deleted file mode 100644 index f1503b518..000000000 --- a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1385949617_create_books_table.up.sql +++ /dev/null @@ -1,5 +0,0 @@ -CREATE TABLE books ( - user_id integer, - name varchar(40), - author varchar(40) -); diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1485949617_create_movies_table.down.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1485949617_create_movies_table.down.sql deleted file mode 100644 index 3a5187689..000000000 --- a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1485949617_create_movies_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS movies; diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1485949617_create_movies_table.up.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1485949617_create_movies_table.up.sql deleted file mode 100644 index f0ef5943b..000000000 --- a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1485949617_create_movies_table.up.sql +++ /dev/null @@ -1,5 +0,0 @@ -CREATE TABLE movies ( - user_id integer, - name varchar(40), - director varchar(40) -); diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1585849751_just_a_comment.up.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1585849751_just_a_comment.up.sql deleted file mode 100644 index 9b6b57a61..000000000 --- a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1585849751_just_a_comment.up.sql +++ /dev/null @@ -1 +0,0 @@ --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1685849751_another_comment.up.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1685849751_another_comment.up.sql deleted file mode 100644 index 9b6b57a61..000000000 --- a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1685849751_another_comment.up.sql +++ /dev/null @@ -1 +0,0 @@ --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1785849751_another_comment.up.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1785849751_another_comment.up.sql deleted file mode 100644 index 9b6b57a61..000000000 --- a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1785849751_another_comment.up.sql +++ /dev/null @@ -1 +0,0 @@ --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1885849751_another_comment.up.sql b/vendor/github.com/mattes/migrate/source/github/examples/migrations/1885849751_another_comment.up.sql deleted file mode 100644 index 9b6b57a61..000000000 --- a/vendor/github.com/mattes/migrate/source/github/examples/migrations/1885849751_another_comment.up.sql +++ /dev/null @@ -1 +0,0 @@ --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/mattes/migrate/source/github/github.go b/vendor/github.com/mattes/migrate/source/github/github.go deleted file mode 100644 index d534ed37b..000000000 --- a/vendor/github.com/mattes/migrate/source/github/github.go +++ /dev/null @@ -1,180 +0,0 @@ -package github - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - nurl "net/url" - "os" - "path" - "strings" - - "github.com/google/go-github/github" - "github.com/mattes/migrate/source" -) - -func init() { - source.Register("github", &Github{}) -} - -var ( - ErrNoUserInfo = fmt.Errorf("no username:token provided") - ErrNoAccessToken = fmt.Errorf("no access token") - ErrInvalidRepo = fmt.Errorf("invalid repo") - ErrInvalidGithubClient = fmt.Errorf("expected *github.Client") - ErrNoDir = fmt.Errorf("no directory") -) - -type Github struct { - client *github.Client - url string - - pathOwner string - pathRepo string - path string - migrations *source.Migrations -} - -type Config struct { -} - -func (g *Github) Open(url string) (source.Driver, error) { - u, err := nurl.Parse(url) - if err != nil { - return nil, err - } - - if u.User == nil { - return nil, ErrNoUserInfo - } - - password, ok := u.User.Password() - if !ok { - return nil, ErrNoUserInfo - } - - tr := &github.BasicAuthTransport{ - Username: u.User.Username(), - Password: password, - } - - gn := &Github{ - client: github.NewClient(tr.Client()), - url: url, - migrations: source.NewMigrations(), - } - - // set owner, repo and path in repo - gn.pathOwner = u.Host - pe := strings.Split(strings.Trim(u.Path, "/"), "/") - if len(pe) < 1 { - return nil, ErrInvalidRepo - } - gn.pathRepo = pe[0] - if len(pe) > 1 { - gn.path = strings.Join(pe[1:], "/") - } - - if err := gn.readDirectory(); err != nil { - return nil, err - } - - return gn, nil -} - -func WithInstance(client *github.Client, config *Config) (source.Driver, error) { - gn := &Github{ - client: client, - migrations: source.NewMigrations(), - } - if err := gn.readDirectory(); err != nil { - return nil, err - } - return gn, nil -} - -func (g *Github) readDirectory() error { - fileContent, dirContents, _, err := g.client.Repositories.GetContents(context.Background(), g.pathOwner, g.pathRepo, g.path, &github.RepositoryContentGetOptions{}) - if err != nil { - return err - } - if fileContent != nil { - return ErrNoDir - } - - for _, fi := range dirContents { - m, err := source.DefaultParse(*fi.Name) - if err != nil { - continue // ignore files that we can't parse - } - if !g.migrations.Append(m) { - return fmt.Errorf("unable to parse file %v", *fi.Name) - } - } - - return nil -} - -func (g *Github) Close() error { - return nil -} - -func (g *Github) First() (version uint, er error) { - if v, ok := g.migrations.First(); !ok { - return 0, &os.PathError{"first", g.path, os.ErrNotExist} - } else { - return v, nil - } -} - -func (g *Github) Prev(version uint) (prevVersion uint, err error) { - if v, ok := g.migrations.Prev(version); !ok { - return 0, &os.PathError{fmt.Sprintf("prev for version %v", version), g.path, os.ErrNotExist} - } else { - return v, nil - } -} - -func (g *Github) Next(version uint) (nextVersion uint, err error) { - if v, ok := g.migrations.Next(version); !ok { - return 0, &os.PathError{fmt.Sprintf("next for version %v", version), g.path, os.ErrNotExist} - } else { - return v, nil - } -} - -func (g *Github) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) { - if m, ok := g.migrations.Up(version); ok { - file, _, _, err := g.client.Repositories.GetContents(context.Background(), g.pathOwner, g.pathRepo, path.Join(g.path, m.Raw), &github.RepositoryContentGetOptions{}) - if err != nil { - return nil, "", err - } - if file != nil { - r, err := file.GetContent() - if err != nil { - return nil, "", err - } - return ioutil.NopCloser(bytes.NewReader([]byte(r))), m.Identifier, nil - } - } - return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), g.path, os.ErrNotExist} -} - -func (g *Github) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) { - if m, ok := g.migrations.Down(version); ok { - file, _, _, err := g.client.Repositories.GetContents(context.Background(), g.pathOwner, g.pathRepo, path.Join(g.path, m.Raw), &github.RepositoryContentGetOptions{}) - if err != nil { - return nil, "", err - } - if file != nil { - r, err := file.GetContent() - if err != nil { - return nil, "", err - } - return ioutil.NopCloser(bytes.NewReader([]byte(r))), m.Identifier, nil - } - } - return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), g.path, os.ErrNotExist} -} diff --git a/vendor/github.com/mattes/migrate/source/github/github_test.go b/vendor/github.com/mattes/migrate/source/github/github_test.go deleted file mode 100644 index 83e86618e..000000000 --- a/vendor/github.com/mattes/migrate/source/github/github_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package github - -import ( - "bytes" - "io/ioutil" - "testing" - - st "github.com/mattes/migrate/source/testing" -) - -var GithubTestSecret = "" // username:token - -func init() { - secrets, err := ioutil.ReadFile(".github_test_secrets") - if err == nil { - GithubTestSecret = string(bytes.TrimSpace(secrets)[:]) - } -} - -func Test(t *testing.T) { - if len(GithubTestSecret) == 0 { - t.Skip("test requires .github_test_secrets") - } - - g := &Github{} - d, err := g.Open("github://" + GithubTestSecret + "@mattes/migrate_test_tmp/test") - if err != nil { - t.Fatal(err) - } - - st.Test(t, d) -} diff --git a/vendor/github.com/mattes/migrate/source/go-bindata/README.md b/vendor/github.com/mattes/migrate/source/go-bindata/README.md deleted file mode 100644 index e03456415..000000000 --- a/vendor/github.com/mattes/migrate/source/go-bindata/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# go-bindata - -## Usage - - - -### Read bindata with NewWithSourceInstance - -```shell -go get -u github.com/jteeuwen/go-bindata/... -cd examples/migrations && go-bindata -pkg migrations . -``` - -```go -import ( - "github.com/mattes/migrate" - "github.com/mattes/migrate/source/go-bindata" - "github.com/mattes/migrate/source/go-bindata/examples/migrations" -) - -func main() { - // wrap assets into Resource - s := bindata.Resource(migrations.AssetNames(), - func(name string) ([]byte, error) { - return migrations.Asset(name) - }) - - m, err := migrate.NewWithSourceInstance("go-bindata", s, "database://foobar") - m.Up() // run your migrations and handle the errors above of course -} -``` - -### Read bindata with URL (todo) - -This will restore the assets in a tmp directory and then -proxy to source/file. go-bindata must be in your `$PATH`. - -``` -migrate -source go-bindata://examples/migrations/bindata.go -``` - - diff --git a/vendor/github.com/mattes/migrate/source/go-bindata/examples/migrations/bindata.go b/vendor/github.com/mattes/migrate/source/go-bindata/examples/migrations/bindata.go deleted file mode 100644 index 282d5ef54..000000000 --- a/vendor/github.com/mattes/migrate/source/go-bindata/examples/migrations/bindata.go +++ /dev/null @@ -1,304 +0,0 @@ -// Code generated by go-bindata. -// sources: -// 1085649617_create_users_table.down.sql -// 1085649617_create_users_table.up.sql -// 1185749658_add_city_to_users.down.sql -// 1185749658_add_city_to_users.up.sql -// DO NOT EDIT! - -package testdata - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -func bindataRead(data []byte, name string) ([]byte, error) { - gz, err := gzip.NewReader(bytes.NewBuffer(data)) - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - - var buf bytes.Buffer - _, err = io.Copy(&buf, gz) - clErr := gz.Close() - - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - if clErr != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -type asset struct { - bytes []byte - info os.FileInfo -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -func (fi bindataFileInfo) Name() string { - return fi.name -} -func (fi bindataFileInfo) Size() int64 { - return fi.size -} -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} -func (fi bindataFileInfo) IsDir() bool { - return false -} -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var __1085649617_create_users_tableDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\xf0\x74\x53\x70\x8d\xf0\x0c\x0e\x09\x56\x28\x2d\x4e\x2d\x2a\xb6\xe6\x02\x04\x00\x00\xff\xff\x2c\x02\x3d\xa7\x1c\x00\x00\x00") - -func _1085649617_create_users_tableDownSqlBytes() ([]byte, error) { - return bindataRead( - __1085649617_create_users_tableDownSql, - "1085649617_create_users_table.down.sql", - ) -} - -func _1085649617_create_users_tableDownSql() (*asset, error) { - bytes, err := _1085649617_create_users_tableDownSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "1085649617_create_users_table.down.sql", size: 28, mode: os.FileMode(420), modTime: time.Unix(1485750305, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __1085649617_create_users_tableUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x72\x0e\x72\x75\x0c\x71\x55\x08\x71\x74\xf2\x71\x55\x28\x2d\x4e\x2d\x2a\x56\xd0\xe0\x52\x00\xb3\xe2\x33\x53\x14\x32\xf3\x4a\x52\xd3\x53\x8b\x14\x4a\xf3\x32\x0b\x4b\x53\x75\xb8\x14\x14\xf2\x12\x73\x53\x15\x14\x14\x14\xca\x12\x8b\x92\x33\x12\x8b\x34\x4c\x0c\x34\x41\xc2\xa9\xb9\x89\x99\x39\xa8\xc2\x5c\x9a\xd6\x5c\x80\x00\x00\x00\xff\xff\xa3\x57\xbc\x0b\x5f\x00\x00\x00") - -func _1085649617_create_users_tableUpSqlBytes() ([]byte, error) { - return bindataRead( - __1085649617_create_users_tableUpSql, - "1085649617_create_users_table.up.sql", - ) -} - -func _1085649617_create_users_tableUpSql() (*asset, error) { - bytes, err := _1085649617_create_users_tableUpSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "1085649617_create_users_table.up.sql", size: 95, mode: os.FileMode(420), modTime: time.Unix(1485803085, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __1185749658_add_city_to_usersDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x2d\x4e\x2d\x2a\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x09\xf5\xf5\x53\xf0\x74\x53\x70\x8d\xf0\x0c\x0e\x09\x56\x48\xce\x2c\xa9\xb4\xe6\x02\x04\x00\x00\xff\xff\xb7\x52\x88\xd7\x2e\x00\x00\x00") - -func _1185749658_add_city_to_usersDownSqlBytes() ([]byte, error) { - return bindataRead( - __1185749658_add_city_to_usersDownSql, - "1185749658_add_city_to_users.down.sql", - ) -} - -func _1185749658_add_city_to_usersDownSql() (*asset, error) { - bytes, err := _1185749658_add_city_to_usersDownSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "1185749658_add_city_to_users.down.sql", size: 46, mode: os.FileMode(420), modTime: time.Unix(1485750443, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __1185749658_add_city_to_usersUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x2d\x4e\x2d\x2a\x56\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x53\x48\xce\x2c\xa9\x54\x28\x4b\x2c\x4a\xce\x48\x2c\xd2\x30\x34\x30\xd0\xb4\xe6\xe2\xe2\x02\x04\x00\x00\xff\xff\xa8\x0f\x49\xc6\x32\x00\x00\x00") - -func _1185749658_add_city_to_usersUpSqlBytes() ([]byte, error) { - return bindataRead( - __1185749658_add_city_to_usersUpSql, - "1185749658_add_city_to_users.up.sql", - ) -} - -func _1185749658_add_city_to_usersUpSql() (*asset, error) { - bytes, err := _1185749658_add_city_to_usersUpSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "1185749658_add_city_to_users.up.sql", size: 50, mode: os.FileMode(420), modTime: time.Unix(1485843733, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "1085649617_create_users_table.down.sql": _1085649617_create_users_tableDownSql, - "1085649617_create_users_table.up.sql": _1085649617_create_users_tableUpSql, - "1185749658_add_city_to_users.down.sql": _1185749658_add_city_to_usersDownSql, - "1185749658_add_city_to_users.up.sql": _1185749658_add_city_to_usersUpSql, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"} -// AssetDir("data/img") would return []string{"a.png", "b.png"} -// AssetDir("foo.txt") and AssetDir("notexist") would return an error -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - cannonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(cannonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} -var _bintree = &bintree{nil, map[string]*bintree{ - "1085649617_create_users_table.down.sql": &bintree{_1085649617_create_users_tableDownSql, map[string]*bintree{}}, - "1085649617_create_users_table.up.sql": &bintree{_1085649617_create_users_tableUpSql, map[string]*bintree{}}, - "1185749658_add_city_to_users.down.sql": &bintree{_1185749658_add_city_to_usersDownSql, map[string]*bintree{}}, - "1185749658_add_city_to_users.up.sql": &bintree{_1185749658_add_city_to_usersUpSql, map[string]*bintree{}}, -}} - -// RestoreAsset restores an asset under the given directory -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) - if err != nil { - return err - } - return nil -} - -// RestoreAssets restores an asset under the given directory recursively -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - cannonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) -} - diff --git a/vendor/github.com/mattes/migrate/source/go-bindata/go-bindata.go b/vendor/github.com/mattes/migrate/source/go-bindata/go-bindata.go deleted file mode 100644 index 7426db71b..000000000 --- a/vendor/github.com/mattes/migrate/source/go-bindata/go-bindata.go +++ /dev/null @@ -1,119 +0,0 @@ -package bindata - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/mattes/migrate/source" -) - -type AssetFunc func(name string) ([]byte, error) - -func Resource(names []string, afn AssetFunc) *AssetSource { - return &AssetSource{ - Names: names, - AssetFunc: afn, - } -} - -type AssetSource struct { - Names []string - AssetFunc AssetFunc -} - -func init() { - source.Register("go-bindata", &Bindata{}) -} - -type Bindata struct { - path string - assetSource *AssetSource - migrations *source.Migrations -} - -func (b *Bindata) Open(url string) (source.Driver, error) { - return nil, fmt.Errorf("not yet implemented") -} - -var ( - ErrNoAssetSource = fmt.Errorf("expects *AssetSource") -) - -func WithInstance(instance interface{}) (source.Driver, error) { - if _, ok := instance.(*AssetSource); !ok { - return nil, ErrNoAssetSource - } - as := instance.(*AssetSource) - - bn := &Bindata{ - path: "", - assetSource: as, - migrations: source.NewMigrations(), - } - - for _, fi := range as.Names { - m, err := source.DefaultParse(fi) - if err != nil { - continue // ignore files that we can't parse - } - - if !bn.migrations.Append(m) { - return nil, fmt.Errorf("unable to parse file %v", fi) - } - } - - return bn, nil -} - -func (b *Bindata) Close() error { - return nil -} - -func (b *Bindata) First() (version uint, err error) { - if v, ok := b.migrations.First(); !ok { - return 0, &os.PathError{"first", b.path, os.ErrNotExist} - } else { - return v, nil - } -} - -func (b *Bindata) Prev(version uint) (prevVersion uint, err error) { - if v, ok := b.migrations.Prev(version); !ok { - return 0, &os.PathError{fmt.Sprintf("prev for version %v", version), b.path, os.ErrNotExist} - } else { - return v, nil - } -} - -func (b *Bindata) Next(version uint) (nextVersion uint, err error) { - if v, ok := b.migrations.Next(version); !ok { - return 0, &os.PathError{fmt.Sprintf("next for version %v", version), b.path, os.ErrNotExist} - } else { - return v, nil - } -} - -func (b *Bindata) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) { - if m, ok := b.migrations.Up(version); ok { - body, err := b.assetSource.AssetFunc(m.Raw) - if err != nil { - return nil, "", err - } - return ioutil.NopCloser(bytes.NewReader(body)), m.Identifier, nil - } - return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), b.path, os.ErrNotExist} -} - -func (b *Bindata) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) { - if m, ok := b.migrations.Down(version); ok { - body, err := b.assetSource.AssetFunc(m.Raw) - if err != nil { - return nil, "", err - } - return ioutil.NopCloser(bytes.NewReader(body)), m.Identifier, nil - } - return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), b.path, os.ErrNotExist} -} diff --git a/vendor/github.com/mattes/migrate/source/go-bindata/go-bindata_test.go b/vendor/github.com/mattes/migrate/source/go-bindata/go-bindata_test.go deleted file mode 100644 index 746a7b91f..000000000 --- a/vendor/github.com/mattes/migrate/source/go-bindata/go-bindata_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package bindata - -import ( - "testing" - - "github.com/mattes/migrate/source/go-bindata/testdata" - st "github.com/mattes/migrate/source/testing" -) - -func Test(t *testing.T) { - // wrap assets into Resource first - s := Resource(testdata.AssetNames(), - func(name string) ([]byte, error) { - return testdata.Asset(name) - }) - - d, err := WithInstance(s) - if err != nil { - t.Fatal(err) - } - st.Test(t, d) -} - -func TestWithInstance(t *testing.T) { - // wrap assets into Resource - s := Resource(testdata.AssetNames(), - func(name string) ([]byte, error) { - return testdata.Asset(name) - }) - - _, err := WithInstance(s) - if err != nil { - t.Fatal(err) - } -} - -func TestOpen(t *testing.T) { - b := &Bindata{} - _, err := b.Open("") - if err == nil { - t.Fatal("expected err, because it's not implemented yet") - } -} diff --git a/vendor/github.com/mattes/migrate/source/go-bindata/testdata/bindata.go b/vendor/github.com/mattes/migrate/source/go-bindata/testdata/bindata.go deleted file mode 100644 index 304f3d87c..000000000 --- a/vendor/github.com/mattes/migrate/source/go-bindata/testdata/bindata.go +++ /dev/null @@ -1,396 +0,0 @@ -// Code generated by go-bindata. -// sources: -// 1_test.down.sql -// 1_test.up.sql -// 3_test.up.sql -// 4_test.down.sql -// 4_test.up.sql -// 5_test.down.sql -// 7_test.down.sql -// 7_test.up.sql -// DO NOT EDIT! - -package testdata - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -func bindataRead(data []byte, name string) ([]byte, error) { - gz, err := gzip.NewReader(bytes.NewBuffer(data)) - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - - var buf bytes.Buffer - _, err = io.Copy(&buf, gz) - clErr := gz.Close() - - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - if clErr != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -type asset struct { - bytes []byte - info os.FileInfo -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -func (fi bindataFileInfo) Name() string { - return fi.name -} -func (fi bindataFileInfo) Size() int64 { - return fi.size -} -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} -func (fi bindataFileInfo) IsDir() bool { - return false -} -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var __1_testDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") - -func _1_testDownSqlBytes() ([]byte, error) { - return bindataRead( - __1_testDownSql, - "1_test.down.sql", - ) -} - -func _1_testDownSql() (*asset, error) { - bytes, err := _1_testDownSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "1_test.down.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440324, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __1_testUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") - -func _1_testUpSqlBytes() ([]byte, error) { - return bindataRead( - __1_testUpSql, - "1_test.up.sql", - ) -} - -func _1_testUpSql() (*asset, error) { - bytes, err := _1_testUpSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "1_test.up.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440319, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __3_testUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") - -func _3_testUpSqlBytes() ([]byte, error) { - return bindataRead( - __3_testUpSql, - "3_test.up.sql", - ) -} - -func _3_testUpSql() (*asset, error) { - bytes, err := _3_testUpSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "3_test.up.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440331, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __4_testDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") - -func _4_testDownSqlBytes() ([]byte, error) { - return bindataRead( - __4_testDownSql, - "4_test.down.sql", - ) -} - -func _4_testDownSql() (*asset, error) { - bytes, err := _4_testDownSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "4_test.down.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440337, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __4_testUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") - -func _4_testUpSqlBytes() ([]byte, error) { - return bindataRead( - __4_testUpSql, - "4_test.up.sql", - ) -} - -func _4_testUpSql() (*asset, error) { - bytes, err := _4_testUpSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "4_test.up.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440335, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __5_testDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") - -func _5_testDownSqlBytes() ([]byte, error) { - return bindataRead( - __5_testDownSql, - "5_test.down.sql", - ) -} - -func _5_testDownSql() (*asset, error) { - bytes, err := _5_testDownSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "5_test.down.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440340, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __7_testDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") - -func _7_testDownSqlBytes() ([]byte, error) { - return bindataRead( - __7_testDownSql, - "7_test.down.sql", - ) -} - -func _7_testDownSql() (*asset, error) { - bytes, err := _7_testDownSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "7_test.down.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440343, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __7_testUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") - -func _7_testUpSqlBytes() ([]byte, error) { - return bindataRead( - __7_testUpSql, - "7_test.up.sql", - ) -} - -func _7_testUpSql() (*asset, error) { - bytes, err := _7_testUpSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "7_test.up.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440347, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "1_test.down.sql": _1_testDownSql, - "1_test.up.sql": _1_testUpSql, - "3_test.up.sql": _3_testUpSql, - "4_test.down.sql": _4_testDownSql, - "4_test.up.sql": _4_testUpSql, - "5_test.down.sql": _5_testDownSql, - "7_test.down.sql": _7_testDownSql, - "7_test.up.sql": _7_testUpSql, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"} -// AssetDir("data/img") would return []string{"a.png", "b.png"} -// AssetDir("foo.txt") and AssetDir("notexist") would return an error -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - cannonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(cannonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} -var _bintree = &bintree{nil, map[string]*bintree{ - "1_test.down.sql": &bintree{_1_testDownSql, map[string]*bintree{}}, - "1_test.up.sql": &bintree{_1_testUpSql, map[string]*bintree{}}, - "3_test.up.sql": &bintree{_3_testUpSql, map[string]*bintree{}}, - "4_test.down.sql": &bintree{_4_testDownSql, map[string]*bintree{}}, - "4_test.up.sql": &bintree{_4_testUpSql, map[string]*bintree{}}, - "5_test.down.sql": &bintree{_5_testDownSql, map[string]*bintree{}}, - "7_test.down.sql": &bintree{_7_testDownSql, map[string]*bintree{}}, - "7_test.up.sql": &bintree{_7_testUpSql, map[string]*bintree{}}, -}} - -// RestoreAsset restores an asset under the given directory -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) - if err != nil { - return err - } - return nil -} - -// RestoreAssets restores an asset under the given directory recursively -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - cannonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) -} - diff --git a/vendor/github.com/mattes/migrate/source/google-cloud-storage/README.md b/vendor/github.com/mattes/migrate/source/google-cloud-storage/README.md deleted file mode 100644 index e61cb2311..000000000 --- a/vendor/github.com/mattes/migrate/source/google-cloud-storage/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# google-cloud-storage - -`gcs:///` diff --git a/vendor/github.com/mattes/migrate/source/google-cloud-storage/storage.go b/vendor/github.com/mattes/migrate/source/google-cloud-storage/storage.go deleted file mode 100644 index c1a18bc2f..000000000 --- a/vendor/github.com/mattes/migrate/source/google-cloud-storage/storage.go +++ /dev/null @@ -1,119 +0,0 @@ -package googlecloudstorage - -import ( - "fmt" - "io" - "net/url" - "os" - "path" - "strings" - - "cloud.google.com/go/storage" - "github.com/mattes/migrate/source" - "golang.org/x/net/context" - "google.golang.org/api/iterator" -) - -func init() { - source.Register("gcs", &gcs{}) -} - -type gcs struct { - bucket *storage.BucketHandle - prefix string - migrations *source.Migrations -} - -func (g *gcs) Open(folder string) (source.Driver, error) { - u, err := url.Parse(folder) - if err != nil { - return nil, err - } - client, err := storage.NewClient(context.Background()) - if err != nil { - return nil, err - } - driver := gcs{ - bucket: client.Bucket(u.Host), - prefix: strings.Trim(u.Path, "/") + "/", - migrations: source.NewMigrations(), - } - err = driver.loadMigrations() - if err != nil { - return nil, err - } - return &driver, nil -} - -func (g *gcs) loadMigrations() error { - iter := g.bucket.Objects(context.Background(), &storage.Query{ - Prefix: g.prefix, - Delimiter: "/", - }) - object, err := iter.Next() - for ; err == nil; object, err = iter.Next() { - _, fileName := path.Split(object.Name) - m, parseErr := source.DefaultParse(fileName) - if parseErr != nil { - continue - } - if !g.migrations.Append(m) { - return fmt.Errorf("unable to parse file %v", object.Name) - } - } - if err != iterator.Done { - return err - } - return nil -} - -func (g *gcs) Close() error { - return nil -} - -func (g *gcs) First() (uint, error) { - v, ok := g.migrations.First() - if !ok { - return 0, os.ErrNotExist - } - return v, nil -} - -func (g *gcs) Prev(version uint) (uint, error) { - v, ok := g.migrations.Prev(version) - if !ok { - return 0, os.ErrNotExist - } - return v, nil -} - -func (g *gcs) Next(version uint) (uint, error) { - v, ok := g.migrations.Next(version) - if !ok { - return 0, os.ErrNotExist - } - return v, nil -} - -func (g *gcs) ReadUp(version uint) (io.ReadCloser, string, error) { - if m, ok := g.migrations.Up(version); ok { - return g.open(m) - } - return nil, "", os.ErrNotExist -} - -func (g *gcs) ReadDown(version uint) (io.ReadCloser, string, error) { - if m, ok := g.migrations.Down(version); ok { - return g.open(m) - } - return nil, "", os.ErrNotExist -} - -func (g *gcs) open(m *source.Migration) (io.ReadCloser, string, error) { - objectPath := path.Join(g.prefix, m.Raw) - reader, err := g.bucket.Object(objectPath).NewReader(context.Background()) - if err != nil { - return nil, "", err - } - return reader, m.Identifier, nil -} diff --git a/vendor/github.com/mattes/migrate/source/google-cloud-storage/storage_test.go b/vendor/github.com/mattes/migrate/source/google-cloud-storage/storage_test.go deleted file mode 100644 index 2af4947cc..000000000 --- a/vendor/github.com/mattes/migrate/source/google-cloud-storage/storage_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package googlecloudstorage - -import ( - "testing" - - "github.com/fsouza/fake-gcs-server/fakestorage" - "github.com/mattes/migrate/source" - st "github.com/mattes/migrate/source/testing" -) - -func Test(t *testing.T) { - server := fakestorage.NewServer([]fakestorage.Object{ - {BucketName: "some-bucket", Name: "staging/migrations/1_foobar.up.sql", Content: []byte("1 up")}, - {BucketName: "some-bucket", Name: "staging/migrations/1_foobar.down.sql", Content: []byte("1 down")}, - {BucketName: "some-bucket", Name: "prod/migrations/1_foobar.up.sql", Content: []byte("1 up")}, - {BucketName: "some-bucket", Name: "prod/migrations/1_foobar.down.sql", Content: []byte("1 down")}, - {BucketName: "some-bucket", Name: "prod/migrations/3_foobar.up.sql", Content: []byte("3 up")}, - {BucketName: "some-bucket", Name: "prod/migrations/4_foobar.up.sql", Content: []byte("4 up")}, - {BucketName: "some-bucket", Name: "prod/migrations/4_foobar.down.sql", Content: []byte("4 down")}, - {BucketName: "some-bucket", Name: "prod/migrations/5_foobar.down.sql", Content: []byte("5 down")}, - {BucketName: "some-bucket", Name: "prod/migrations/7_foobar.up.sql", Content: []byte("7 up")}, - {BucketName: "some-bucket", Name: "prod/migrations/7_foobar.down.sql", Content: []byte("7 down")}, - {BucketName: "some-bucket", Name: "prod/migrations/not-a-migration.txt"}, - {BucketName: "some-bucket", Name: "prod/migrations/0-random-stuff/whatever.txt"}, - }) - defer server.Stop() - driver := gcs{ - bucket: server.Client().Bucket("some-bucket"), - prefix: "prod/migrations/", - migrations: source.NewMigrations(), - } - err := driver.loadMigrations() - if err != nil { - t.Fatal(err) - } - st.Test(t, &driver) -} diff --git a/vendor/github.com/mattes/migrate/source/migration.go b/vendor/github.com/mattes/migrate/source/migration.go deleted file mode 100644 index 97a4ee226..000000000 --- a/vendor/github.com/mattes/migrate/source/migration.go +++ /dev/null @@ -1,143 +0,0 @@ -package source - -import ( - "sort" -) - -// Direction is either up or down. -type Direction string - -const ( - Down Direction = "down" - Up = "up" -) - -// Migration is a helper struct for source drivers that need to -// build the full directory tree in memory. -// Migration is fully independent from migrate.Migration. -type Migration struct { - // Version is the version of this migration. - Version uint - - // Identifier can be any string that helps identifying - // this migration in the source. - Identifier string - - // Direction is either Up or Down. - Direction Direction - - // Raw holds the raw location path to this migration in source. - // ReadUp and ReadDown will use this. - Raw string -} - -// Migrations wraps Migration and has an internal index -// to keep track of Migration order. -type Migrations struct { - index uintSlice - migrations map[uint]map[Direction]*Migration -} - -func NewMigrations() *Migrations { - return &Migrations{ - index: make(uintSlice, 0), - migrations: make(map[uint]map[Direction]*Migration), - } -} - -func (i *Migrations) Append(m *Migration) (ok bool) { - if m == nil { - return false - } - - if i.migrations[m.Version] == nil { - i.migrations[m.Version] = make(map[Direction]*Migration) - } - - // reject duplicate versions - if _, dup := i.migrations[m.Version][m.Direction]; dup { - return false - } - - i.migrations[m.Version][m.Direction] = m - i.buildIndex() - - return true -} - -func (i *Migrations) buildIndex() { - i.index = make(uintSlice, 0) - for version, _ := range i.migrations { - i.index = append(i.index, version) - } - sort.Sort(i.index) -} - -func (i *Migrations) First() (version uint, ok bool) { - if len(i.index) == 0 { - return 0, false - } - return i.index[0], true -} - -func (i *Migrations) Prev(version uint) (prevVersion uint, ok bool) { - pos := i.findPos(version) - if pos >= 1 && len(i.index) > pos-1 { - return i.index[pos-1], true - } - return 0, false -} - -func (i *Migrations) Next(version uint) (nextVersion uint, ok bool) { - pos := i.findPos(version) - if pos >= 0 && len(i.index) > pos+1 { - return i.index[pos+1], true - } - return 0, false -} - -func (i *Migrations) Up(version uint) (m *Migration, ok bool) { - if _, ok := i.migrations[version]; ok { - if mx, ok := i.migrations[version][Up]; ok { - return mx, true - } - } - return nil, false -} - -func (i *Migrations) Down(version uint) (m *Migration, ok bool) { - if _, ok := i.migrations[version]; ok { - if mx, ok := i.migrations[version][Down]; ok { - return mx, true - } - } - return nil, false -} - -func (i *Migrations) findPos(version uint) int { - if len(i.index) > 0 { - ix := i.index.Search(version) - if ix < len(i.index) && i.index[ix] == version { - return ix - } - } - return -1 -} - -type uintSlice []uint - -func (s uintSlice) Len() int { - return len(s) -} - -func (s uintSlice) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s uintSlice) Less(i, j int) bool { - return s[i] < s[j] -} - -func (s uintSlice) Search(x uint) int { - return sort.Search(len(s), func(i int) bool { return s[i] >= x }) -} diff --git a/vendor/github.com/mattes/migrate/source/migration_test.go b/vendor/github.com/mattes/migrate/source/migration_test.go deleted file mode 100644 index 857cd26af..000000000 --- a/vendor/github.com/mattes/migrate/source/migration_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package source - -import ( - "testing" -) - -func TestNewMigrations(t *testing.T) { - // TODO -} - -func TestAppend(t *testing.T) { - // TODO -} - -func TestBuildIndex(t *testing.T) { - // TODO -} - -func TestFirst(t *testing.T) { - // TODO -} - -func TestPrev(t *testing.T) { - // TODO -} - -func TestUp(t *testing.T) { - // TODO -} - -func TestDown(t *testing.T) { - // TODO -} - -func TestFindPos(t *testing.T) { - m := Migrations{index: uintSlice{1, 2, 3}} - if p := m.findPos(0); p != -1 { - t.Errorf("expected -1, got %v", p) - } - if p := m.findPos(1); p != 0 { - t.Errorf("expected 0, got %v", p) - } - if p := m.findPos(3); p != 2 { - t.Errorf("expected 2, got %v", p) - } -} diff --git a/vendor/github.com/mattes/migrate/source/parse.go b/vendor/github.com/mattes/migrate/source/parse.go deleted file mode 100644 index 2f888fe75..000000000 --- a/vendor/github.com/mattes/migrate/source/parse.go +++ /dev/null @@ -1,39 +0,0 @@ -package source - -import ( - "fmt" - "regexp" - "strconv" -) - -var ( - ErrParse = fmt.Errorf("no match") -) - -var ( - DefaultParse = Parse - DefaultRegex = Regex -) - -// Regex matches the following pattern: -// 123_name.up.ext -// 123_name.down.ext -var Regex = regexp.MustCompile(`^([0-9]+)_(.*)\.(` + string(Down) + `|` + string(Up) + `)\.(.*)$`) - -// Parse returns Migration for matching Regex pattern. -func Parse(raw string) (*Migration, error) { - m := Regex.FindStringSubmatch(raw) - if len(m) == 5 { - versionUint64, err := strconv.ParseUint(m[1], 10, 64) - if err != nil { - return nil, err - } - return &Migration{ - Version: uint(versionUint64), - Identifier: m[2], - Direction: Direction(m[3]), - Raw: raw, - }, nil - } - return nil, ErrParse -} diff --git a/vendor/github.com/mattes/migrate/source/parse_test.go b/vendor/github.com/mattes/migrate/source/parse_test.go deleted file mode 100644 index d06356cc8..000000000 --- a/vendor/github.com/mattes/migrate/source/parse_test.go +++ /dev/null @@ -1,106 +0,0 @@ -package source - -import ( - "testing" -) - -func TestParse(t *testing.T) { - tt := []struct { - name string - expectErr error - expectMigration *Migration - }{ - { - name: "1_foobar.up.sql", - expectErr: nil, - expectMigration: &Migration{ - Version: 1, - Identifier: "foobar", - Direction: Up, - Raw: "1_foobar.up.sql", - }, - }, - { - name: "1_foobar.down.sql", - expectErr: nil, - expectMigration: &Migration{ - Version: 1, - Identifier: "foobar", - Direction: Down, - Raw: "1_foobar.down.sql", - }, - }, - { - name: "1_f-o_ob+ar.up.sql", - expectErr: nil, - expectMigration: &Migration{ - Version: 1, - Identifier: "f-o_ob+ar", - Direction: Up, - Raw: "1_f-o_ob+ar.up.sql", - }, - }, - { - name: "1485385885_foobar.up.sql", - expectErr: nil, - expectMigration: &Migration{ - Version: 1485385885, - Identifier: "foobar", - Direction: Up, - Raw: "1485385885_foobar.up.sql", - }, - }, - { - name: "20170412214116_date_foobar.up.sql", - expectErr: nil, - expectMigration: &Migration{ - Version: 20170412214116, - Identifier: "date_foobar", - Direction: Up, - Raw: "20170412214116_date_foobar.up.sql", - }, - }, - { - name: "-1_foobar.up.sql", - expectErr: ErrParse, - expectMigration: nil, - }, - { - name: "foobar.up.sql", - expectErr: ErrParse, - expectMigration: nil, - }, - { - name: "1.up.sql", - expectErr: ErrParse, - expectMigration: nil, - }, - { - name: "1_foobar.sql", - expectErr: ErrParse, - expectMigration: nil, - }, - { - name: "1_foobar.up", - expectErr: ErrParse, - expectMigration: nil, - }, - { - name: "1_foobar.down", - expectErr: ErrParse, - expectMigration: nil, - }, - } - - for i, v := range tt { - f, err := Parse(v.name) - - if err != v.expectErr { - t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) - } - - if v.expectMigration != nil && *f != *v.expectMigration { - t.Errorf("expected %+v, got %+v, in %v", *v.expectMigration, *f, i) - } - } -} diff --git a/vendor/github.com/mattes/migrate/source/stub/stub.go b/vendor/github.com/mattes/migrate/source/stub/stub.go deleted file mode 100644 index 0f4153c54..000000000 --- a/vendor/github.com/mattes/migrate/source/stub/stub.go +++ /dev/null @@ -1,85 +0,0 @@ -package stub - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/mattes/migrate/source" -) - -func init() { - source.Register("stub", &Stub{}) -} - -type Config struct{} - -// d, _ := source.Open("stub://") -// d.(*stub.Stub).Migrations = - -type Stub struct { - Url string - Instance interface{} - Migrations *source.Migrations - Config *Config -} - -func (s *Stub) Open(url string) (source.Driver, error) { - return &Stub{ - Url: url, - Migrations: source.NewMigrations(), - Config: &Config{}, - }, nil -} - -func WithInstance(instance interface{}, config *Config) (source.Driver, error) { - return &Stub{ - Instance: instance, - Migrations: source.NewMigrations(), - Config: config, - }, nil -} - -func (s *Stub) Close() error { - return nil -} - -func (s *Stub) First() (version uint, err error) { - if v, ok := s.Migrations.First(); !ok { - return 0, &os.PathError{"first", s.Url, os.ErrNotExist} // TODO: s.Url can be empty when called with WithInstance - } else { - return v, nil - } -} - -func (s *Stub) Prev(version uint) (prevVersion uint, err error) { - if v, ok := s.Migrations.Prev(version); !ok { - return 0, &os.PathError{fmt.Sprintf("prev for version %v", version), s.Url, os.ErrNotExist} - } else { - return v, nil - } -} - -func (s *Stub) Next(version uint) (nextVersion uint, err error) { - if v, ok := s.Migrations.Next(version); !ok { - return 0, &os.PathError{fmt.Sprintf("next for version %v", version), s.Url, os.ErrNotExist} - } else { - return v, nil - } -} - -func (s *Stub) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) { - if m, ok := s.Migrations.Up(version); ok { - return ioutil.NopCloser(bytes.NewBufferString(m.Identifier)), fmt.Sprintf("%v.up.stub", version), nil - } - return nil, "", &os.PathError{fmt.Sprintf("read up version %v", version), s.Url, os.ErrNotExist} -} - -func (s *Stub) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) { - if m, ok := s.Migrations.Down(version); ok { - return ioutil.NopCloser(bytes.NewBufferString(m.Identifier)), fmt.Sprintf("%v.down.stub", version), nil - } - return nil, "", &os.PathError{fmt.Sprintf("read down version %v", version), s.Url, os.ErrNotExist} -} diff --git a/vendor/github.com/mattes/migrate/source/stub/stub_test.go b/vendor/github.com/mattes/migrate/source/stub/stub_test.go deleted file mode 100644 index 05ce819d7..000000000 --- a/vendor/github.com/mattes/migrate/source/stub/stub_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package stub - -import ( - "testing" - - "github.com/mattes/migrate/source" - st "github.com/mattes/migrate/source/testing" -) - -func Test(t *testing.T) { - s := &Stub{} - d, err := s.Open("") - if err != nil { - t.Fatal(err) - } - - m := source.NewMigrations() - m.Append(&source.Migration{Version: 1, Direction: source.Up}) - m.Append(&source.Migration{Version: 1, Direction: source.Down}) - m.Append(&source.Migration{Version: 3, Direction: source.Up}) - m.Append(&source.Migration{Version: 4, Direction: source.Up}) - m.Append(&source.Migration{Version: 4, Direction: source.Down}) - m.Append(&source.Migration{Version: 5, Direction: source.Down}) - m.Append(&source.Migration{Version: 7, Direction: source.Up}) - m.Append(&source.Migration{Version: 7, Direction: source.Down}) - - d.(*Stub).Migrations = m - - st.Test(t, d) -} diff --git a/vendor/github.com/mattes/migrate/source/testing/testing.go b/vendor/github.com/mattes/migrate/source/testing/testing.go deleted file mode 100644 index 3cc003c59..000000000 --- a/vendor/github.com/mattes/migrate/source/testing/testing.go +++ /dev/null @@ -1,169 +0,0 @@ -// Package testing has the source tests. -// All source drivers must pass the Test function. -// This lives in it's own package so it stays a test dependency. -package testing - -import ( - "os" - "testing" - - "github.com/mattes/migrate/source" -) - -// Test runs tests against source implementations. -// It assumes that the driver tests has access to the following migrations: -// -// u = up migration, d = down migration, n = version -// | 1 | - | 3 | 4 | 5 | - | 7 | -// | u d | - | u | u d | d | - | u d | -// -// See source/stub/stub_test.go or source/file/file_test.go for an example. -func Test(t *testing.T, d source.Driver) { - TestFirst(t, d) - TestPrev(t, d) - TestNext(t, d) - TestReadUp(t, d) - TestReadDown(t, d) -} - -func TestFirst(t *testing.T, d source.Driver) { - version, err := d.First() - if err != nil { - t.Fatalf("First: expected err to be nil, got %v", err) - } - if version != 1 { - t.Errorf("First: expected 1, got %v", version) - } -} - -func TestPrev(t *testing.T, d source.Driver) { - tt := []struct { - version uint - expectErr error - expectPrevVersion uint - }{ - {version: 0, expectErr: os.ErrNotExist}, - {version: 1, expectErr: os.ErrNotExist}, - {version: 2, expectErr: os.ErrNotExist}, - {version: 3, expectErr: nil, expectPrevVersion: 1}, - {version: 4, expectErr: nil, expectPrevVersion: 3}, - {version: 5, expectErr: nil, expectPrevVersion: 4}, - {version: 6, expectErr: os.ErrNotExist}, - {version: 7, expectErr: nil, expectPrevVersion: 5}, - {version: 8, expectErr: os.ErrNotExist}, - {version: 9, expectErr: os.ErrNotExist}, - } - - for i, v := range tt { - pv, err := d.Prev(v.version) - if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) && v.expectErr != err { - t.Errorf("Prev: expected %v, got %v, in %v", v.expectErr, err, i) - } - if err == nil && v.expectPrevVersion != pv { - t.Errorf("Prev: expected %v, got %v, in %v", v.expectPrevVersion, pv, i) - } - } -} - -func TestNext(t *testing.T, d source.Driver) { - tt := []struct { - version uint - expectErr error - expectNextVersion uint - }{ - {version: 0, expectErr: os.ErrNotExist}, - {version: 1, expectErr: nil, expectNextVersion: 3}, - {version: 2, expectErr: os.ErrNotExist}, - {version: 3, expectErr: nil, expectNextVersion: 4}, - {version: 4, expectErr: nil, expectNextVersion: 5}, - {version: 5, expectErr: nil, expectNextVersion: 7}, - {version: 6, expectErr: os.ErrNotExist}, - {version: 7, expectErr: os.ErrNotExist}, - {version: 8, expectErr: os.ErrNotExist}, - {version: 9, expectErr: os.ErrNotExist}, - } - - for i, v := range tt { - nv, err := d.Next(v.version) - if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) && v.expectErr != err { - t.Errorf("Next: expected %v, got %v, in %v", v.expectErr, err, i) - } - if err == nil && v.expectNextVersion != nv { - t.Errorf("Next: expected %v, got %v, in %v", v.expectNextVersion, nv, i) - } - } -} - -func TestReadUp(t *testing.T, d source.Driver) { - tt := []struct { - version uint - expectErr error - expectUp bool - }{ - {version: 0, expectErr: os.ErrNotExist}, - {version: 1, expectErr: nil, expectUp: true}, - {version: 2, expectErr: os.ErrNotExist}, - {version: 3, expectErr: nil, expectUp: true}, - {version: 4, expectErr: nil, expectUp: true}, - {version: 5, expectErr: os.ErrNotExist}, - {version: 6, expectErr: os.ErrNotExist}, - {version: 7, expectErr: nil, expectUp: true}, - {version: 8, expectErr: os.ErrNotExist}, - } - - for i, v := range tt { - up, identifier, err := d.ReadUp(v.version) - if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || - (v.expectErr != os.ErrNotExist && err != v.expectErr) { - t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) - - } else if err == nil { - if len(identifier) == 0 { - t.Errorf("expected identifier not to be empty, in %v", i) - } - - if v.expectUp == true && up == nil { - t.Errorf("expected up not to be nil, in %v", i) - } else if v.expectUp == false && up != nil { - t.Errorf("expected up to be nil, got %v, in %v", up, i) - } - } - } -} - -func TestReadDown(t *testing.T, d source.Driver) { - tt := []struct { - version uint - expectErr error - expectDown bool - }{ - {version: 0, expectErr: os.ErrNotExist}, - {version: 1, expectErr: nil, expectDown: true}, - {version: 2, expectErr: os.ErrNotExist}, - {version: 3, expectErr: os.ErrNotExist}, - {version: 4, expectErr: nil, expectDown: true}, - {version: 5, expectErr: nil, expectDown: true}, - {version: 6, expectErr: os.ErrNotExist}, - {version: 7, expectErr: nil, expectDown: true}, - {version: 8, expectErr: os.ErrNotExist}, - } - - for i, v := range tt { - down, identifier, err := d.ReadDown(v.version) - if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || - (v.expectErr != os.ErrNotExist && err != v.expectErr) { - t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) - - } else if err == nil { - if len(identifier) == 0 { - t.Errorf("expected identifier not to be empty, in %v", i) - } - - if v.expectDown == true && down == nil { - t.Errorf("expected down not to be nil, in %v", i) - } else if v.expectDown == false && down != nil { - t.Errorf("expected down to be nil, got %v, in %v", down, i) - } - } - } -} diff --git a/vendor/github.com/mattes/migrate/testing/docker.go b/vendor/github.com/mattes/migrate/testing/docker.go deleted file mode 100644 index 9017e97f8..000000000 --- a/vendor/github.com/mattes/migrate/testing/docker.go +++ /dev/null @@ -1,226 +0,0 @@ -// Package testing is used in driver tests. -package testing - -import ( - "bufio" - "context" // TODO: is issue with go < 1.7? - "encoding/json" - "fmt" - "io" - "math/rand" - "strconv" - "strings" - "testing" - "time" - - dockertypes "github.com/docker/docker/api/types" - dockercontainer "github.com/docker/docker/api/types/container" - dockernetwork "github.com/docker/docker/api/types/network" - dockerclient "github.com/docker/docker/client" -) - -func NewDockerContainer(t testing.TB, image string, env []string) (*DockerContainer, error) { - c, err := dockerclient.NewEnvClient() - if err != nil { - return nil, err - } - - contr := &DockerContainer{ - t: t, - client: c, - ImageName: image, - ENV: env, - } - - if err := contr.PullImage(); err != nil { - return nil, err - } - - if err := contr.Start(); err != nil { - return nil, err - } - - return contr, nil -} - -// DockerContainer implements Instance interface -type DockerContainer struct { - t testing.TB - client *dockerclient.Client - ImageName string - ENV []string - ContainerId string - ContainerName string - ContainerJSON dockertypes.ContainerJSON - containerInspected bool - keepForDebugging bool -} - -func (d *DockerContainer) PullImage() error { - d.t.Logf("Docker: Pull image %v", d.ImageName) - r, err := d.client.ImagePull(context.Background(), d.ImageName, dockertypes.ImagePullOptions{}) - if err != nil { - return err - } - defer r.Close() - - // read output and log relevant lines - bf := bufio.NewScanner(r) - for bf.Scan() { - var resp dockerImagePullOutput - if err := json.Unmarshal(bf.Bytes(), &resp); err != nil { - return err - } - if strings.HasPrefix(resp.Status, "Status: ") { - d.t.Logf("Docker: %v", resp.Status) - } - } - return bf.Err() -} - -func (d *DockerContainer) Start() error { - containerName := fmt.Sprintf("migrate_test_%v", pseudoRandStr(10)) - - // create container first - resp, err := d.client.ContainerCreate(context.Background(), - &dockercontainer.Config{ - Image: d.ImageName, - Labels: map[string]string{"migrate_test": "true"}, - Env: d.ENV, - }, - &dockercontainer.HostConfig{ - PublishAllPorts: true, - }, - &dockernetwork.NetworkingConfig{}, - containerName) - if err != nil { - return err - } - - d.ContainerId = resp.ID - d.ContainerName = containerName - - // then start it - if err := d.client.ContainerStart(context.Background(), resp.ID, dockertypes.ContainerStartOptions{}); err != nil { - return err - } - - d.t.Logf("Docker: Started container %v (%v) for image %v listening at %v:%v", resp.ID[0:12], containerName, d.ImageName, d.Host(), d.Port()) - for _, v := range resp.Warnings { - d.t.Logf("Docker: Warning: %v", v) - } - return nil -} - -func (d *DockerContainer) KeepForDebugging() { - d.keepForDebugging = true -} - -func (d *DockerContainer) Remove() error { - if d.keepForDebugging { - return nil - } - - if len(d.ContainerId) == 0 { - return fmt.Errorf("missing containerId") - } - if err := d.client.ContainerRemove(context.Background(), d.ContainerId, - dockertypes.ContainerRemoveOptions{ - Force: true, - }); err != nil { - d.t.Log(err) - return err - } - d.t.Logf("Docker: Removed %v", d.ContainerName) - return nil -} - -func (d *DockerContainer) Inspect() error { - if len(d.ContainerId) == 0 { - return fmt.Errorf("missing containerId") - } - resp, err := d.client.ContainerInspect(context.Background(), d.ContainerId) - if err != nil { - return err - } - - d.ContainerJSON = resp - d.containerInspected = true - return nil -} - -func (d *DockerContainer) Logs() (io.ReadCloser, error) { - if len(d.ContainerId) == 0 { - return nil, fmt.Errorf("missing containerId") - } - - return d.client.ContainerLogs(context.Background(), d.ContainerId, dockertypes.ContainerLogsOptions{ - ShowStdout: true, - ShowStderr: true, - }) -} - -func (d *DockerContainer) firstPortMapping() (containerPort uint, hostIP string, hostPort uint, err error) { - if !d.containerInspected { - if err := d.Inspect(); err != nil { - d.t.Fatal(err) - } - } - - for port, bindings := range d.ContainerJSON.NetworkSettings.Ports { - for _, binding := range bindings { - - hostPortUint, err := strconv.ParseUint(binding.HostPort, 10, 64) - if err != nil { - return 0, "", 0, err - } - - return uint(port.Int()), binding.HostIP, uint(hostPortUint), nil - } - } - return 0, "", 0, fmt.Errorf("no port binding") -} - -func (d *DockerContainer) Host() string { - _, hostIP, _, err := d.firstPortMapping() - if err != nil { - d.t.Fatal(err) - } - - if hostIP == "0.0.0.0" { - return "127.0.0.1" - } else { - return hostIP - } -} - -func (d *DockerContainer) Port() uint { - _, _, port, err := d.firstPortMapping() - if err != nil { - d.t.Fatal(err) - } - return port -} - -type dockerImagePullOutput struct { - Status string `json:"status"` - ProgressDetails struct { - Current int `json:"current"` - Total int `json:"total"` - } `json:"progressDetail"` - Id string `json:"id"` - Progress string `json:"progress"` -} - -func init() { - rand.Seed(time.Now().UnixNano()) -} - -func pseudoRandStr(n int) string { - var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz0123456789") - b := make([]rune, n) - for i := range b { - b[i] = letterRunes[rand.Intn(len(letterRunes))] - } - return string(b) -} diff --git a/vendor/github.com/mattes/migrate/testing/testing.go b/vendor/github.com/mattes/migrate/testing/testing.go deleted file mode 100644 index fb04ad74b..000000000 --- a/vendor/github.com/mattes/migrate/testing/testing.go +++ /dev/null @@ -1,91 +0,0 @@ -package testing - -import ( - "io/ioutil" - "os" - "strconv" - "testing" - "time" -) - -type IsReadyFunc func(Instance) bool - -type TestFunc func(*testing.T, Instance) - -type Version struct { - Image string - ENV []string -} - -func ParallelTest(t *testing.T, versions []Version, readyFn IsReadyFunc, testFn TestFunc) { - delay, err := strconv.Atoi(os.Getenv("MIGRATE_TEST_CONTAINER_BOOT_DELAY")) - if err != nil { - delay = 0 - } - - for i, version := range versions { - version := version // capture range variable, see https://goo.gl/60w3p2 - - // Only test against one version in short mode - // TODO: order is random, maybe always pick first version instead? - if i > 0 && testing.Short() { - t.Logf("Skipping %v in short mode", version) - - } else { - t.Run(version.Image, func(t *testing.T) { - t.Parallel() - - // create new container - container, err := NewDockerContainer(t, version.Image, version.ENV) - if err != nil { - t.Fatalf("%v\n%s", err, containerLogs(t, container)) - } - - // make sure to remove container once done - defer container.Remove() - - // wait until database is ready - tick := time.Tick(1000 * time.Millisecond) - timeout := time.After(time.Duration(delay+60) * time.Second) - outer: - for { - select { - case <-tick: - if readyFn(container) { - break outer - } - - case <-timeout: - t.Fatalf("Docker: Container not ready, timeout for %v.\n%s", version, containerLogs(t, container)) - } - } - - time.Sleep(time.Duration(int64(delay)) * time.Second) - - // we can now run the tests - testFn(t, container) - }) - } - } -} - -func containerLogs(t *testing.T, c *DockerContainer) []byte { - r, err := c.Logs() - if err != nil { - t.Error("%v", err) - return nil - } - defer r.Close() - b, err := ioutil.ReadAll(r) - if err != nil { - t.Error("%v", err) - return nil - } - return b -} - -type Instance interface { - Host() string - Port() uint - KeepForDebugging() -} diff --git a/vendor/github.com/mattes/migrate/testing/testing_test.go b/vendor/github.com/mattes/migrate/testing/testing_test.go deleted file mode 100644 index 8217decfa..000000000 --- a/vendor/github.com/mattes/migrate/testing/testing_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package testing - -import ( - "testing" -) - -func ExampleParallelTest(t *testing.T) { - var isReady = func(i Instance) bool { - // Return true if Instance is ready to run tests. - // Don't block here though. - return true - } - - // t is *testing.T coming from parent Test(t *testing.T) - ParallelTest(t, []Version{{Image: "docker_image:9.6"}}, isReady, - func(t *testing.T, i Instance) { - // Run your test/s ... - t.Fatal("...") - }) -} diff --git a/vendor/github.com/mattes/migrate/util.go b/vendor/github.com/mattes/migrate/util.go deleted file mode 100644 index 67048ea5c..000000000 --- a/vendor/github.com/mattes/migrate/util.go +++ /dev/null @@ -1,105 +0,0 @@ -package migrate - -import ( - "bufio" - "fmt" - "io" - nurl "net/url" - "strings" - "time" -) - -// MultiError holds multiple errors. -type MultiError struct { - Errs []error -} - -// NewMultiError returns an error type holding multiple errors. -func NewMultiError(errs ...error) MultiError { - compactErrs := make([]error, 0) - for _, e := range errs { - if e != nil { - compactErrs = append(compactErrs, e) - } - } - return MultiError{compactErrs} -} - -// Error implements error. Mulitple errors are concatenated with 'and's. -func (m MultiError) Error() string { - var strs = make([]string, 0) - for _, e := range m.Errs { - if len(e.Error()) > 0 { - strs = append(strs, e.Error()) - } - } - return strings.Join(strs, " and ") -} - -// suint safely converts int to uint -// see https://goo.gl/wEcqof -// see https://goo.gl/pai7Dr -func suint(n int) uint { - if n < 0 { - panic(fmt.Sprintf("suint(%v) expects input >= 0", n)) - } - return uint(n) -} - -// newSlowReader turns an io.ReadCloser into a slow io.ReadCloser. -// Use this to simulate a slow internet connection. -func newSlowReader(r io.ReadCloser) io.ReadCloser { - return &slowReader{ - rx: r, - reader: bufio.NewReader(r), - } -} - -type slowReader struct { - rx io.ReadCloser - reader *bufio.Reader -} - -func (b *slowReader) Read(p []byte) (n int, err error) { - time.Sleep(10 * time.Millisecond) - c, err := b.reader.ReadByte() - if err != nil { - return 0, err - } else { - copy(p, []byte{c}) - return 1, nil - } -} - -func (b *slowReader) Close() error { - return b.rx.Close() -} - -var errNoScheme = fmt.Errorf("no scheme") - -// schemeFromUrl returns the scheme from a URL string -func schemeFromUrl(url string) (string, error) { - u, err := nurl.Parse(url) - if err != nil { - return "", err - } - - if len(u.Scheme) == 0 { - return "", errNoScheme - } - - return u.Scheme, nil -} - -// FilterCustomQuery filters all query values starting with `x-` -func FilterCustomQuery(u *nurl.URL) *nurl.URL { - ux := *u - vx := make(nurl.Values) - for k, v := range ux.Query() { - if len(k) <= 1 || (len(k) > 1 && k[0:2] != "x-") { - vx[k] = v - } - } - ux.RawQuery = vx.Encode() - return &ux -} diff --git a/vendor/github.com/mattes/migrate/util_test.go b/vendor/github.com/mattes/migrate/util_test.go deleted file mode 100644 index 1ad234473..000000000 --- a/vendor/github.com/mattes/migrate/util_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package migrate - -import ( - nurl "net/url" - "testing" -) - -func TestSuintPanicsWithNegativeInput(t *testing.T) { - defer func() { - if r := recover(); r == nil { - t.Fatal("expected suint to panic for -1") - } - }() - suint(-1) -} - -func TestSuint(t *testing.T) { - if u := suint(0); u != 0 { - t.Fatalf("expected 0, got %v", u) - } -} - -func TestFilterCustomQuery(t *testing.T) { - n, err := nurl.Parse("foo://host?a=b&x-custom=foo&c=d") - if err != nil { - t.Fatal(err) - } - nx := FilterCustomQuery(n).Query() - if nx.Get("x-custom") != "" { - t.Fatalf("didn't expect x-custom") - } -} diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index 371ac7503..c71bcb981 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -26,7 +26,7 @@ const ( // The Content-Type values for the different wire protocols. FmtUnknown Format = `` - FmtText Format = `text/plain; version=` + TextVersion + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` FmtProtoText Format = ProtoFmt + ` encoding=text` FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` diff --git a/vendor/github.com/prometheus/common/route/route.go b/vendor/github.com/prometheus/common/route/route.go index bb4688173..dbec638e5 100644 --- a/vendor/github.com/prometheus/common/route/route.go +++ b/vendor/github.com/prometheus/common/route/route.go @@ -19,11 +19,12 @@ func WithParam(ctx context.Context, p, v string) context.Context { return context.WithValue(ctx, param(p), v) } -// Router wraps httprouter.Router and adds support for prefixed sub-routers -// and per-request context injections. +// Router wraps httprouter.Router and adds support for prefixed sub-routers, +// per-request context injections and instrumentation. type Router struct { rtr *httprouter.Router prefix string + instrh func(handlerName string, handler http.HandlerFunc) http.HandlerFunc } // New returns a new Router. @@ -33,13 +34,18 @@ func New() *Router { } } +// WithInstrumentation returns a router with instrumentation support. +func (r *Router) WithInstrumentation(instrh func(handlerName string, handler http.HandlerFunc) http.HandlerFunc) *Router { + return &Router{rtr: r.rtr, prefix: r.prefix, instrh: instrh} +} + // WithPrefix returns a router that prefixes all registered routes with prefix. func (r *Router) WithPrefix(prefix string) *Router { - return &Router{rtr: r.rtr, prefix: r.prefix + prefix} + return &Router{rtr: r.rtr, prefix: r.prefix + prefix, instrh: r.instrh} } // handle turns a HandlerFunc into an httprouter.Handle. -func (r *Router) handle(h http.HandlerFunc) httprouter.Handle { +func (r *Router) handle(handlerName string, h http.HandlerFunc) httprouter.Handle { return func(w http.ResponseWriter, req *http.Request, params httprouter.Params) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -47,33 +53,36 @@ func (r *Router) handle(h http.HandlerFunc) httprouter.Handle { for _, p := range params { ctx = context.WithValue(ctx, param(p.Key), p.Value) } + if r.instrh != nil { + h = r.instrh(handlerName, h) + } h(w, req.WithContext(ctx)) } } // Get registers a new GET route. func (r *Router) Get(path string, h http.HandlerFunc) { - r.rtr.GET(r.prefix+path, r.handle(h)) + r.rtr.GET(r.prefix+path, r.handle(path, h)) } // Options registers a new OPTIONS route. func (r *Router) Options(path string, h http.HandlerFunc) { - r.rtr.OPTIONS(r.prefix+path, r.handle(h)) + r.rtr.OPTIONS(r.prefix+path, r.handle(path, h)) } // Del registers a new DELETE route. func (r *Router) Del(path string, h http.HandlerFunc) { - r.rtr.DELETE(r.prefix+path, r.handle(h)) + r.rtr.DELETE(r.prefix+path, r.handle(path, h)) } // Put registers a new PUT route. func (r *Router) Put(path string, h http.HandlerFunc) { - r.rtr.PUT(r.prefix+path, r.handle(h)) + r.rtr.PUT(r.prefix+path, r.handle(path, h)) } // Post registers a new POST route. func (r *Router) Post(path string, h http.HandlerFunc) { - r.rtr.POST(r.prefix+path, r.handle(h)) + r.rtr.POST(r.prefix+path, r.handle(path, h)) } // Redirect takes an absolute path and sends an internal HTTP redirect for it, diff --git a/vendor/github.com/prometheus/common/route/route_test.go b/vendor/github.com/prometheus/common/route/route_test.go index a9bb20996..d491cad66 100644 --- a/vendor/github.com/prometheus/common/route/route_test.go +++ b/vendor/github.com/prometheus/common/route/route_test.go @@ -42,3 +42,35 @@ func TestContext(t *testing.T) { } router.ServeHTTP(nil, r) } + +func TestInstrumentation(t *testing.T) { + var got string + cases := []struct { + router *Router + want string + }{ + { + router: New(), + want: "", + }, { + router: New().WithInstrumentation(func(handlerName string, handler http.HandlerFunc) http.HandlerFunc { + got = handlerName + return handler + }), + want: "/foo", + }, + } + + for _, c := range cases { + c.router.Get("/foo", func(w http.ResponseWriter, r *http.Request) {}) + + r, err := http.NewRequest("GET", "http://localhost:9090/foo", nil) + if err != nil { + t.Fatalf("Error building test request: %s", err) + } + c.router.ServeHTTP(nil, r) + if c.want != got { + t.Fatalf("Unexpected value: want %q, got %q", c.want, got) + } + } +} diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore new file mode 100644 index 000000000..25e3659ab --- /dev/null +++ b/vendor/github.com/prometheus/procfs/.gitignore @@ -0,0 +1 @@ +/fixtures/ diff --git a/vendor/github.com/prometheus/procfs/.travis.yml b/vendor/github.com/prometheus/procfs/.travis.yml index a43cf42c9..5416cf8a2 100644 --- a/vendor/github.com/prometheus/procfs/.travis.yml +++ b/vendor/github.com/prometheus/procfs/.travis.yml @@ -3,6 +3,9 @@ sudo: false language: go go: +- 1.7.x +- 1.8.x +- 1.9.x - 1.10.x - 1.x diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile index d88725f52..5c8f72625 100644 --- a/vendor/github.com/prometheus/procfs/Makefile +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -39,7 +39,7 @@ check_license: @echo ">> checking license header" @./scripts/check_license.sh -test: sysfs/fixtures/.unpacked +test: fixtures/.unpacked sysfs/fixtures/.unpacked @echo ">> running all tests" @$(GO) test -race $(shell $(GO) list ./... | grep -v /vendor/ | grep -v examples) @@ -55,8 +55,8 @@ staticcheck: $(STATICCHECK) @echo ">> running staticcheck" @$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) -sysfs/fixtures/.unpacked: sysfs/fixtures.ttar - ./ttar -C sysfs -x -f sysfs/fixtures.ttar +%/.unpacked: %.ttar + ./ttar -C $(dir $*) -x -f $*.ttar touch $@ $(FIRST_GOPATH)/bin/staticcheck: diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar new file mode 100644 index 000000000..3ee8291e8 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fixtures.ttar @@ -0,0 +1,446 @@ +# Archive created by ttar -c -f fixtures.ttar fixtures/ +Directory: fixtures +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26231 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/cmdline +Lines: 1 +vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/comm +Lines: 1 +vim +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/exe +SymlinkTo: /usr/bin/vim +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26231/fd +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/0 +SymlinkTo: ../../symlinktargets/abc +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/1 +SymlinkTo: ../../symlinktargets/def +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/10 +SymlinkTo: ../../symlinktargets/xyz +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/2 +SymlinkTo: ../../symlinktargets/ghi +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/3 +SymlinkTo: ../../symlinktargets/uvw +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/io +Lines: 7 +rchar: 750339 +wchar: 818609 +syscr: 7405 +syscw: 5245 +read_bytes: 1024 +write_bytes: 2048 +cancelled_write_bytes: -1024 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/limits +Lines: 17 +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size 0 unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 62898 62898 processes +Max open files 2048 4096 files +Max locked memory 65536 65536 bytes +Max address space 8589934592 unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 62898 62898 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/mountstats +Lines: 19 +device rootfs mounted on / with fstype rootfs +device sysfs mounted on /sys with fstype sysfs +device proc mounted on /proc with fstype proc +device /dev/sda1 mounted on / with fstype ext4 +device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 + opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none + age: 13968 + caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 + nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured + sec: flavor=1,pseudoflavor=1 + events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 + bytes: 1207640230 0 0 0 1210214218 0 295483 0 + RPC iostats version: 1.0 p/v: 100003/4 (nfs) + xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726 + per-op statistics + NULL: 0 0 0 0 0 0 0 0 + READ: 1298 1298 0 207680 1210292152 6 79386 79407 + WRITE: 0 0 0 0 0 0 0 0 + +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26231/net +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/net/dev +Lines: 4 +Inter-| Receive | Transmit + face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed + lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26231/ns +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/ns/mnt +SymlinkTo: mnt:[4026531840] +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/ns/net +SymlinkTo: net:[4026531993] +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/stat +Lines: 1 +26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26232 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/cmdline +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/comm +Lines: 1 +ata_sff +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26232/fd +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/0 +SymlinkTo: ../../symlinktargets/abc +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/1 +SymlinkTo: ../../symlinktargets/def +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/2 +SymlinkTo: ../../symlinktargets/ghi +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/3 +SymlinkTo: ../../symlinktargets/uvw +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/4 +SymlinkTo: ../../symlinktargets/xyz +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/limits +Lines: 17 +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size 0 unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 29436 29436 processes +Max open files 1024 4096 files +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 29436 29436 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/stat +Lines: 1 +33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26233 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26233/cmdline +Lines: 1 +com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/584 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/584/stat +Lines: 2 +1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 +#!/bin/cat /proc/self/stat +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/buddyinfo +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/buddyinfo/short +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/buddyinfo/short/buddyinfo +Lines: 3 +Node 0, zone +Node 0, zone +Node 0, zone +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/buddyinfo/sizemismatch +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/buddyinfo/sizemismatch/buddyinfo +Lines: 3 +Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 +Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 0 +Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/buddyinfo/valid +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/buddyinfo/valid/buddyinfo +Lines: 3 +Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 +Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 +Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/fs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/fs/xfs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/fs/xfs/stat +Lines: 23 +extent_alloc 92447 97589 92448 93751 +abt 0 0 0 0 +blk_map 1767055 188820 184891 92447 92448 2140766 0 +bmbt 0 0 0 0 +dir 185039 92447 92444 136422 +trans 706 944304 0 +ig 185045 58807 0 126238 0 33637 22 +log 2883 113448 9 17360 739 +push_ail 945014 0 134260 15483 0 3940 464 159985 0 40 +xstrat 92447 0 +rw 107739 94045 +attr 4 0 0 0 +icluster 8677 7849 135802 +vnodes 92601 0 0 0 92444 92444 92444 0 +buf 2666287 7122 2659202 3599 2 7085 0 10297 7085 +abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147 +abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023 +bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0 +fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +qm 0 0 0 0 0 0 0 0 +xpc 399724544 92823103 86219234 +debug 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/mdstat +Lines: 26 +Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] +md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] + 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] + +md127 : active raid1 sdi2[0] sdj2[1] + 312319552 blocks [2/2] [UU] + +md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1] + 248896 blocks [2/2] [UU] + +md4 : inactive raid1 sda3[0] sdb3[1] + 4883648 blocks [2/2] [UU] + +md6 : active raid1 sdb2[2] sda2[0] + 195310144 blocks [2/1] [U_] + [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec + +md8 : active raid1 sdb1[1] sda1[0] + 195310144 blocks [2/2] [UU] + [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec + +md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1] + 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] + bitmap: 0/30 pages [0KB], 65536KB chunk + +unused devices: +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/net +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/dev +Lines: 6 +Inter-| Receive | Transmit + face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed +vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0 + lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0 +docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0 + eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/ip_vs +Lines: 21 +IP Virtual Server version 1.2.1 (size=4096) +Prot LocalAddress:Port Scheduler Flags + -> RemoteAddress:Port Forward Weight ActiveConn InActConn +TCP C0A80016:0CEA wlc + -> C0A85216:0CEA Tunnel 100 248 2 + -> C0A85318:0CEA Tunnel 100 248 2 + -> C0A85315:0CEA Tunnel 100 248 1 +TCP C0A80039:0CEA wlc + -> C0A85416:0CEA Tunnel 0 0 0 + -> C0A85215:0CEA Tunnel 100 1499 0 + -> C0A83215:0CEA Tunnel 100 1498 0 +TCP C0A80037:0CEA wlc + -> C0A8321A:0CEA Tunnel 0 0 0 + -> C0A83120:0CEA Tunnel 100 0 0 +TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh + -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0 + -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0 + -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1 +FWM 10001000 wlc + -> C0A8321A:0CEA Route 0 0 1 + -> C0A83215:0CEA Route 0 0 2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/ip_vs_stats +Lines: 6 + Total Incoming Outgoing Incoming Outgoing + Conns Packets Packets Bytes Bytes + 16AA370 E33656E5 0 51D8C8883AB3 0 + + Conns/s Pkts/s Pkts/s Bytes/s Bytes/s + 4 1FB3C 0 1282A8F 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/net/rpc +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/rpc/nfs +Lines: 5 +net 18628 0 18628 6 +rpc 4329785 0 4338291 +proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 +proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39 +proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/rpc/nfsd +Lines: 11 +rc 0 6 18622 +fh 0 0 0 0 0 +io 157286400 0 +th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 +ra 32 0 0 0 0 0 0 0 0 0 0 0 +net 18628 0 18628 6 +rpc 18628 0 0 0 0 +proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 +proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 +proc4 2 2 10853 +proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/xfrm_stat +Lines: 28 +XfrmInError 1 +XfrmInBufferError 2 +XfrmInHdrError 4 +XfrmInNoStates 3 +XfrmInStateProtoError 40 +XfrmInStateModeError 100 +XfrmInStateSeqError 6000 +XfrmInStateExpired 4 +XfrmInStateMismatch 23451 +XfrmInStateInvalid 55555 +XfrmInTmplMismatch 51 +XfrmInNoPols 65432 +XfrmInPolBlock 100 +XfrmInPolError 10000 +XfrmOutError 1000000 +XfrmOutBundleGenError 43321 +XfrmOutBundleCheckError 555 +XfrmOutNoStates 869 +XfrmOutStateProtoError 4542 +XfrmOutStateModeError 4 +XfrmOutStateSeqError 543 +XfrmOutStateExpired 565 +XfrmOutPolBlock 43456 +XfrmOutPolDead 7656 +XfrmOutPolError 1454 +XfrmFwdHdrError 6654 +XfrmOutStateInvalid 28765 +XfrmAcquireError 24532 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/self +SymlinkTo: 26231 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/stat +Lines: 16 +cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 +cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 +cpu1 47869 23 16474 1110787 591 0 46 0 0 0 +cpu2 46504 36 15916 1112321 441 0 326 0 0 0 +cpu3 47054 102 15683 1113230 533 0 60 0 0 0 +cpu4 28413 25 10776 1140321 217 0 8 0 0 0 +cpu5 29271 101 11586 1136270 672 0 30 0 0 0 +cpu6 29152 36 10276 1139721 319 0 29 0 0 0 +cpu7 29098 268 10164 1139282 555 0 31 0 0 0 +intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ctxt 38014093 +btime 1418183276 +processes 26442 +procs_running 2 +procs_blocked 1 +softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/symlinktargets +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/README +Lines: 2 +This directory contains some empty files that are the symlinks the files in the "fd" directory point to. +They are otherwise ignored by the tests +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/abc +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/def +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/ghi +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/uvw +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/xyz +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/cmdline b/vendor/github.com/prometheus/procfs/fixtures/26231/cmdline deleted file mode 100644 index d2d8ef887..000000000 Binary files a/vendor/github.com/prometheus/procfs/fixtures/26231/cmdline and /dev/null differ diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/comm b/vendor/github.com/prometheus/procfs/fixtures/26231/comm deleted file mode 100644 index f027e0d4b..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/26231/comm +++ /dev/null @@ -1 +0,0 @@ -vim diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/exe b/vendor/github.com/prometheus/procfs/fixtures/26231/exe deleted file mode 120000 index a91bec4da..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/26231/exe +++ /dev/null @@ -1 +0,0 @@ -/usr/bin/vim \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/fd/0 b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/0 deleted file mode 120000 index da9c5dff3..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/26231/fd/0 +++ /dev/null @@ -1 +0,0 @@ -../../symlinktargets/abc \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/fd/1 b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/1 deleted file mode 120000 index ca47b50ca..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/26231/fd/1 +++ /dev/null @@ -1 +0,0 @@ -../../symlinktargets/def \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/fd/10 b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/10 deleted file mode 120000 index c08683168..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/26231/fd/10 +++ /dev/null @@ -1 +0,0 @@ -../../symlinktargets/xyz \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/fd/2 b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/2 deleted file mode 120000 index 66731c068..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/26231/fd/2 +++ /dev/null @@ -1 +0,0 @@ -../../symlinktargets/ghi \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/fd/3 b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/3 deleted file mode 120000 index 0135dce35..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/26231/fd/3 +++ /dev/null @@ -1 +0,0 @@ -../../symlinktargets/uvw \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/io b/vendor/github.com/prometheus/procfs/fixtures/26231/io deleted file mode 100644 index b6210a7a7..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/26231/io +++ /dev/null @@ -1,7 +0,0 @@ -rchar: 750339 -wchar: 818609 -syscr: 7405 -syscw: 5245 -read_bytes: 1024 -write_bytes: 2048 -cancelled_write_bytes: -1024 diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/limits b/vendor/github.com/prometheus/procfs/fixtures/26231/limits deleted file mode 100644 index 8b40108aa..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/26231/limits +++ /dev/null @@ -1,17 +0,0 @@ -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 62898 62898 processes -Max open files 2048 4096 files -Max locked memory 65536 65536 bytes -Max address space 8589934592 unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 62898 62898 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/mountstats b/vendor/github.com/prometheus/procfs/fixtures/26231/mountstats deleted file mode 100644 index a665c33da..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/26231/mountstats +++ /dev/null @@ -1,19 +0,0 @@ -device rootfs mounted on / with fstype rootfs -device sysfs mounted on /sys with fstype sysfs -device proc mounted on /proc with fstype proc -device /dev/sda1 mounted on / with fstype ext4 -device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 - opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none - age: 13968 - caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 - nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured - sec: flavor=1,pseudoflavor=1 - events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 - bytes: 1207640230 0 0 0 1210214218 0 295483 0 - RPC iostats version: 1.0 p/v: 100003/4 (nfs) - xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726 - per-op statistics - NULL: 0 0 0 0 0 0 0 0 - READ: 1298 1298 0 207680 1210292152 6 79386 79407 - WRITE: 0 0 0 0 0 0 0 0 - diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/net/dev b/vendor/github.com/prometheus/procfs/fixtures/26231/net/dev deleted file mode 100644 index f10895560..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/26231/net/dev +++ /dev/null @@ -1,4 +0,0 @@ -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed - lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/ns/mnt b/vendor/github.com/prometheus/procfs/fixtures/26231/ns/mnt deleted file mode 120000 index 9c52ca211..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/26231/ns/mnt +++ /dev/null @@ -1 +0,0 @@ -mnt:[4026531840] \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/ns/net b/vendor/github.com/prometheus/procfs/fixtures/26231/ns/net deleted file mode 120000 index 1f0f79594..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/26231/ns/net +++ /dev/null @@ -1 +0,0 @@ -net:[4026531993] \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/stat b/vendor/github.com/prometheus/procfs/fixtures/26231/stat deleted file mode 100644 index 438aaa9dc..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/26231/stat +++ /dev/null @@ -1 +0,0 @@ -26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 diff --git a/vendor/github.com/prometheus/procfs/fixtures/26232/cmdline b/vendor/github.com/prometheus/procfs/fixtures/26232/cmdline deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/prometheus/procfs/fixtures/26232/comm b/vendor/github.com/prometheus/procfs/fixtures/26232/comm deleted file mode 100644 index 62361ca78..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/26232/comm +++ /dev/null @@ -1 +0,0 @@ -ata_sff diff --git a/vendor/github.com/prometheus/procfs/fixtures/26232/fd/0 b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/0 deleted file mode 120000 index da9c5dff3..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/26232/fd/0 +++ /dev/null @@ -1 +0,0 @@ -../../symlinktargets/abc \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/26232/fd/1 b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/1 deleted file mode 120000 index ca47b50ca..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/26232/fd/1 +++ /dev/null @@ -1 +0,0 @@ -../../symlinktargets/def \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/26232/fd/2 b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/2 deleted file mode 120000 index 66731c068..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/26232/fd/2 +++ /dev/null @@ -1 +0,0 @@ -../../symlinktargets/ghi \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/26232/fd/3 b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/3 deleted file mode 120000 index 0135dce35..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/26232/fd/3 +++ /dev/null @@ -1 +0,0 @@ -../../symlinktargets/uvw \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/26232/fd/4 b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/4 deleted file mode 120000 index c08683168..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/26232/fd/4 +++ /dev/null @@ -1 +0,0 @@ -../../symlinktargets/xyz \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/26232/limits b/vendor/github.com/prometheus/procfs/fixtures/26232/limits deleted file mode 100644 index 3f9bf16a9..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/26232/limits +++ /dev/null @@ -1,17 +0,0 @@ -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 29436 29436 processes -Max open files 1024 4096 files -Max locked memory 65536 65536 bytes -Max address space unlimited unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 29436 29436 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us diff --git a/vendor/github.com/prometheus/procfs/fixtures/26232/stat b/vendor/github.com/prometheus/procfs/fixtures/26232/stat deleted file mode 100644 index 321b16073..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/26232/stat +++ /dev/null @@ -1 +0,0 @@ -33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 diff --git a/vendor/github.com/prometheus/procfs/fixtures/26233/cmdline b/vendor/github.com/prometheus/procfs/fixtures/26233/cmdline deleted file mode 100644 index 95ec7d69e..000000000 Binary files a/vendor/github.com/prometheus/procfs/fixtures/26233/cmdline and /dev/null differ diff --git a/vendor/github.com/prometheus/procfs/fixtures/584/stat b/vendor/github.com/prometheus/procfs/fixtures/584/stat deleted file mode 100644 index 65b9369d1..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/584/stat +++ /dev/null @@ -1,2 +0,0 @@ -1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 -#!/bin/cat /proc/self/stat diff --git a/vendor/github.com/prometheus/procfs/fixtures/buddyinfo/short/buddyinfo b/vendor/github.com/prometheus/procfs/fixtures/buddyinfo/short/buddyinfo deleted file mode 100644 index 40e71ca35..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/buddyinfo/short/buddyinfo +++ /dev/null @@ -1,3 +0,0 @@ -Node 0, zone -Node 0, zone -Node 0, zone diff --git a/vendor/github.com/prometheus/procfs/fixtures/buddyinfo/sizemismatch/buddyinfo b/vendor/github.com/prometheus/procfs/fixtures/buddyinfo/sizemismatch/buddyinfo deleted file mode 100644 index 945636182..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/buddyinfo/sizemismatch/buddyinfo +++ /dev/null @@ -1,3 +0,0 @@ -Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 -Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 0 -Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 diff --git a/vendor/github.com/prometheus/procfs/fixtures/buddyinfo/valid/buddyinfo b/vendor/github.com/prometheus/procfs/fixtures/buddyinfo/valid/buddyinfo deleted file mode 100644 index f90594a81..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/buddyinfo/valid/buddyinfo +++ /dev/null @@ -1,3 +0,0 @@ -Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 -Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 -Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 diff --git a/vendor/github.com/prometheus/procfs/fixtures/fs/xfs/stat b/vendor/github.com/prometheus/procfs/fixtures/fs/xfs/stat deleted file mode 100644 index f7ca7f940..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/fs/xfs/stat +++ /dev/null @@ -1,23 +0,0 @@ -extent_alloc 92447 97589 92448 93751 -abt 0 0 0 0 -blk_map 1767055 188820 184891 92447 92448 2140766 0 -bmbt 0 0 0 0 -dir 185039 92447 92444 136422 -trans 706 944304 0 -ig 185045 58807 0 126238 0 33637 22 -log 2883 113448 9 17360 739 -push_ail 945014 0 134260 15483 0 3940 464 159985 0 40 -xstrat 92447 0 -rw 107739 94045 -attr 4 0 0 0 -icluster 8677 7849 135802 -vnodes 92601 0 0 0 92444 92444 92444 0 -buf 2666287 7122 2659202 3599 2 7085 0 10297 7085 -abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147 -abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023 -bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0 -fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -qm 0 0 0 0 0 0 0 0 -xpc 399724544 92823103 86219234 -debug 0 diff --git a/vendor/github.com/prometheus/procfs/fixtures/mdstat b/vendor/github.com/prometheus/procfs/fixtures/mdstat deleted file mode 100644 index 4430bdee2..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/mdstat +++ /dev/null @@ -1,26 +0,0 @@ -Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] -md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] - 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] - -md127 : active raid1 sdi2[0] sdj2[1] - 312319552 blocks [2/2] [UU] - -md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1] - 248896 blocks [2/2] [UU] - -md4 : inactive raid1 sda3[0] sdb3[1] - 4883648 blocks [2/2] [UU] - -md6 : active raid1 sdb2[2] sda2[0] - 195310144 blocks [2/1] [U_] - [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md8 : active raid1 sdb1[1] sda1[0] - 195310144 blocks [2/2] [UU] - [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1] - 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] - bitmap: 0/30 pages [0KB], 65536KB chunk - -unused devices: diff --git a/vendor/github.com/prometheus/procfs/fixtures/net/dev b/vendor/github.com/prometheus/procfs/fixtures/net/dev deleted file mode 100644 index 2df786fa5..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/net/dev +++ /dev/null @@ -1,6 +0,0 @@ -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed -vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0 -docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 diff --git a/vendor/github.com/prometheus/procfs/fixtures/net/ip_vs b/vendor/github.com/prometheus/procfs/fixtures/net/ip_vs deleted file mode 100644 index 5ee4bd2be..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/net/ip_vs +++ /dev/null @@ -1,21 +0,0 @@ -IP Virtual Server version 1.2.1 (size=4096) -Prot LocalAddress:Port Scheduler Flags - -> RemoteAddress:Port Forward Weight ActiveConn InActConn -TCP C0A80016:0CEA wlc - -> C0A85216:0CEA Tunnel 100 248 2 - -> C0A85318:0CEA Tunnel 100 248 2 - -> C0A85315:0CEA Tunnel 100 248 1 -TCP C0A80039:0CEA wlc - -> C0A85416:0CEA Tunnel 0 0 0 - -> C0A85215:0CEA Tunnel 100 1499 0 - -> C0A83215:0CEA Tunnel 100 1498 0 -TCP C0A80037:0CEA wlc - -> C0A8321A:0CEA Tunnel 0 0 0 - -> C0A83120:0CEA Tunnel 100 0 0 -TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh - -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1 -FWM 10001000 wlc - -> C0A8321A:0CEA Route 0 0 1 - -> C0A83215:0CEA Route 0 0 2 diff --git a/vendor/github.com/prometheus/procfs/fixtures/net/ip_vs_stats b/vendor/github.com/prometheus/procfs/fixtures/net/ip_vs_stats deleted file mode 100644 index c00724e0f..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/net/ip_vs_stats +++ /dev/null @@ -1,6 +0,0 @@ - Total Incoming Outgoing Incoming Outgoing - Conns Packets Packets Bytes Bytes - 16AA370 E33656E5 0 51D8C8883AB3 0 - - Conns/s Pkts/s Pkts/s Bytes/s Bytes/s - 4 1FB3C 0 1282A8F 0 diff --git a/vendor/github.com/prometheus/procfs/fixtures/net/rpc/nfs b/vendor/github.com/prometheus/procfs/fixtures/net/rpc/nfs deleted file mode 100644 index 2e58e0544..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/net/rpc/nfs +++ /dev/null @@ -1,5 +0,0 @@ -net 18628 0 18628 6 -rpc 4329785 0 4338291 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39 -proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 diff --git a/vendor/github.com/prometheus/procfs/fixtures/net/rpc/nfsd b/vendor/github.com/prometheus/procfs/fixtures/net/rpc/nfsd deleted file mode 100644 index 4e8565f41..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/net/rpc/nfsd +++ /dev/null @@ -1,11 +0,0 @@ -rc 0 6 18622 -fh 0 0 0 0 0 -io 157286400 0 -th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 -ra 32 0 0 0 0 0 0 0 0 0 0 0 -net 18628 0 18628 6 -rpc 18628 0 0 0 0 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 -proc4 2 2 10853 -proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 diff --git a/vendor/github.com/prometheus/procfs/fixtures/net/xfrm_stat b/vendor/github.com/prometheus/procfs/fixtures/net/xfrm_stat deleted file mode 100644 index d278ace95..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/net/xfrm_stat +++ /dev/null @@ -1,28 +0,0 @@ -XfrmInError 1 -XfrmInBufferError 2 -XfrmInHdrError 4 -XfrmInNoStates 3 -XfrmInStateProtoError 40 -XfrmInStateModeError 100 -XfrmInStateSeqError 6000 -XfrmInStateExpired 4 -XfrmInStateMismatch 23451 -XfrmInStateInvalid 55555 -XfrmInTmplMismatch 51 -XfrmInNoPols 65432 -XfrmInPolBlock 100 -XfrmInPolError 10000 -XfrmOutError 1000000 -XfrmOutBundleGenError 43321 -XfrmOutBundleCheckError 555 -XfrmOutNoStates 869 -XfrmOutStateProtoError 4542 -XfrmOutStateModeError 4 -XfrmOutStateSeqError 543 -XfrmOutStateExpired 565 -XfrmOutPolBlock 43456 -XfrmOutPolDead 7656 -XfrmOutPolError 1454 -XfrmFwdHdrError 6654 -XfrmOutStateInvalid 28765 -XfrmAcquireError 24532 diff --git a/vendor/github.com/prometheus/procfs/fixtures/self b/vendor/github.com/prometheus/procfs/fixtures/self deleted file mode 120000 index 1eeedea3d..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/self +++ /dev/null @@ -1 +0,0 @@ -26231 \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/stat b/vendor/github.com/prometheus/procfs/fixtures/stat deleted file mode 100644 index 1582a8cb5..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/stat +++ /dev/null @@ -1,16 +0,0 @@ -cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 -cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 -cpu1 47869 23 16474 1110787 591 0 46 0 0 0 -cpu2 46504 36 15916 1112321 441 0 326 0 0 0 -cpu3 47054 102 15683 1113230 533 0 60 0 0 0 -cpu4 28413 25 10776 1140321 217 0 8 0 0 0 -cpu5 29271 101 11586 1136270 672 0 30 0 0 0 -cpu6 29152 36 10276 1139721 319 0 29 0 0 0 -cpu7 29098 268 10164 1139282 555 0 31 0 0 0 -intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ctxt 38014093 -btime 1418183276 -processes 26442 -procs_running 2 -procs_blocked 1 -softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 diff --git a/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/README b/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/README deleted file mode 100644 index 5cf184ea0..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/README +++ /dev/null @@ -1,2 +0,0 @@ -This directory contains some empty files that are the symlinks the files in the "fd" directory point to. -They are otherwise ignored by the tests diff --git a/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/abc b/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/abc deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/def b/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/def deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/ghi b/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/ghi deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/uvw b/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/uvw deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/xyz b/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/xyz deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/prometheus/procfs/sysfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/sysfs/fixtures.ttar index 0045e7d56..8e665ce5a 100644 --- a/vendor/github.com/prometheus/procfs/sysfs/fixtures.ttar +++ b/vendor/github.com/prometheus/procfs/sysfs/fixtures.ttar @@ -1,3 +1,4 @@ +# Archive created by ttar -c -f fixtures.ttar fixtures/ Directory: fixtures Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -719,3 +720,132 @@ Lines: 1 extent_alloc 2 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/class/net/eth0/ +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/addr_assign_type +Lines: 1 +3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/addr_len +Lines: 1 +6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/address +Lines: 1 +01:01:01:01:01:01 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/broadcast +Lines: 1 +ff:ff:ff:ff:ff:ff +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/carrier +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/carrier_changes +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/carrier_down_count +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/carrier_up_count +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/dev_id +Lines: 1 +0x20 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/dormant +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/duplex +Lines: 1 +full +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/flags +Lines: 1 +0x1303 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/ifalias +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/ifindex +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/iflink +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/link_mode +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/mtu +Lines: 1 +1500 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/name_assign_type +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/netdev_group +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/operstate +Lines: 1 +up +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/phys_port_id +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/phys_port_name +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/phys_switch_id +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/speed +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/tx_queue_len +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/class/net/eth0/type +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/sysfs/net_class.go b/vendor/github.com/prometheus/procfs/sysfs/net_class.go new file mode 100644 index 000000000..46affd7f7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/sysfs/net_class.go @@ -0,0 +1,165 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sysfs + +import ( + "fmt" + "io/ioutil" + "os" + "reflect" + "strconv" + "strings" + "syscall" +) + +// NetClassIface contains info from files in /sys/class/net/ +// for single interface (iface). +type NetClassIface struct { + Name string // Interface name + AddrAssignType int64 `fileName:"addr_assign_type"` // /sys/class/net//addr_assign_type + AddrLen int64 `fileName:"addr_len"` // /sys/class/net//addr_len + Address string `fileName:"address"` // /sys/class/net//address + Broadcast string `fileName:"broadcast"` // /sys/class/net//broadcast + Carrier int64 `fileName:"carrier"` // /sys/class/net//carrier + CarrierChanges int64 `fileName:"carrier_changes"` // /sys/class/net//carrier_changes + CarrierUpCount int64 `fileName:"carrier_up_count"` // /sys/class/net//carrier_up_count + CarrierDownCount int64 `fileName:"carrier_down_count"` // /sys/class/net//carrier_down_count + DevID int64 `fileName:"dev_id"` // /sys/class/net//dev_id + Dormant int64 `fileName:"dormant"` // /sys/class/net//dormant + Duplex string `fileName:"duplex"` // /sys/class/net//duplex + Flags int64 `fileName:"flags"` // /sys/class/net//flags + IfAlias string `fileName:"ifalias"` // /sys/class/net//ifalias + IfIndex int64 `fileName:"ifindex"` // /sys/class/net//ifindex + IfLink int64 `fileName:"iflink"` // /sys/class/net//iflink + LinkMode int64 `fileName:"link_mode"` // /sys/class/net//link_mode + MTU int64 `fileName:"mtu"` // /sys/class/net//mtu + NameAssignType int64 `fileName:"name_assign_type"` // /sys/class/net//name_assign_type + NetDevGroup int64 `fileName:"netdev_group"` // /sys/class/net//netdev_group + OperState string `fileName:"operstate"` // /sys/class/net//operstate + PhysPortID string `fileName:"phys_port_id"` // /sys/class/net//phys_port_id + PhysPortName string `fileName:"phys_port_name"` // /sys/class/net//phys_port_name + PhysSwitchID string `fileName:"phys_switch_id"` // /sys/class/net//phys_switch_id + Speed int64 `fileName:"speed"` // /sys/class/net//speed + TxQueueLen int64 `fileName:"tx_queue_len"` // /sys/class/net//tx_queue_len + Type int64 `fileName:"type"` // /sys/class/net//type +} + +// NetClass is collection of info for every interface (iface) in /sys/class/net. The map keys +// are interface (iface) names. +type NetClass map[string]NetClassIface + +// NewNetClass returns info for all net interfaces (iface) read from /sys/class/net/. +func NewNetClass() (NetClass, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewNetClass() +} + +// NewNetClass returns info for all net interfaces (iface) read from /sys/class/net/. +func (fs FS) NewNetClass() (NetClass, error) { + path := fs.Path("class/net") + + devices, err := ioutil.ReadDir(path) + if err != nil { + return NetClass{}, fmt.Errorf("cannot access %s dir %s", path, err) + } + + netClass := NetClass{} + for _, deviceDir := range devices { + interfaceClass, err := netClass.parseNetClassIface(path + "/" + deviceDir.Name()) + if err != nil { + return nil, err + } + interfaceClass.Name = deviceDir.Name() + netClass[deviceDir.Name()] = *interfaceClass + } + return netClass, nil +} + +// parseNetClassIface scans predefined files in /sys/class/net/ +// directory and gets their contents. +func (nc NetClass) parseNetClassIface(devicePath string) (*NetClassIface, error) { + interfaceClass := NetClassIface{} + interfaceElem := reflect.ValueOf(&interfaceClass).Elem() + interfaceType := reflect.TypeOf(interfaceClass) + + //start from 1 - skip the Name field + for i := 1; i < interfaceElem.NumField(); i++ { + fieldType := interfaceType.Field(i) + fieldValue := interfaceElem.Field(i) + + if fieldType.Tag.Get("fileName") == "" { + panic(fmt.Errorf("field %s does not have a filename tag", fieldType.Name)) + } + + fileContents, err := sysReadFile(devicePath + "/" + fieldType.Tag.Get("fileName")) + + if err != nil { + if os.IsNotExist(err) || err.Error() == "operation not supported" || err.Error() == "invalid argument" { + continue + } + return nil, fmt.Errorf("could not access file %s: %s", fieldType.Tag.Get("fileName"), err) + } + value := strings.TrimSpace(string(fileContents)) + + switch fieldValue.Kind() { + case reflect.Int64: + if strings.HasPrefix(value, "0x") { + intValue, err := strconv.ParseInt(value[2:], 16, 64) + if err != nil { + return nil, fmt.Errorf("expected hex value for %s, got: %s", fieldType.Name, value) + } + fieldValue.SetInt(intValue) + } else { + intValue, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return nil, fmt.Errorf("expected Uint64 value for %s, got: %s", fieldType.Name, value) + } + fieldValue.SetInt(intValue) + } + case reflect.String: + fieldValue.SetString(value) + default: + return nil, fmt.Errorf("unhandled type %q", fieldValue.Kind()) + } + } + + return &interfaceClass, nil +} + +// sysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. +// https://github.com/prometheus/node_exporter/pull/728/files +func sysReadFile(file string) ([]byte, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + // On some machines, hwmon drivers are broken and return EAGAIN. This causes + // Go's ioutil.ReadFile implementation to poll forever. + // + // Since we either want to read data or bail immediately, do the simplest + // possible read using syscall directly. + b := make([]byte, 128) + n, err := syscall.Read(int(f.Fd()), b) + if err != nil { + return nil, err + } + + return b[:n], nil +} diff --git a/vendor/github.com/prometheus/procfs/sysfs/net_class_test.go b/vendor/github.com/prometheus/procfs/sysfs/net_class_test.go new file mode 100644 index 000000000..5c356dc74 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/sysfs/net_class_test.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sysfs + +import ( + "reflect" + "testing" +) + +func TestNewNetClass(t *testing.T) { + fs, err := NewFS("fixtures") + if err != nil { + t.Fatal(err) + } + + nc, err := fs.NewNetClass() + if err != nil { + t.Fatal(err) + } + + netClass := NetClass{ + "eth0": { + Address: "01:01:01:01:01:01", + AddrAssignType: 3, + AddrLen: 6, + Broadcast: "ff:ff:ff:ff:ff:ff", + Carrier: 1, + CarrierChanges: 2, + CarrierDownCount: 1, + CarrierUpCount: 1, + DevID: 32, + Dormant: 1, + Duplex: "full", + Flags: 4867, + IfAlias: "", + IfIndex: 2, + IfLink: 2, + LinkMode: 1, + MTU: 1500, + Name: "eth0", + NameAssignType: 2, + NetDevGroup: 0, + OperState: "up", + PhysPortID: "", + PhysPortName: "", + PhysSwitchID: "", + Speed: 1000, + TxQueueLen: 1000, + Type: 1, + }, + } + + if !reflect.DeepEqual(netClass, nc) { + t.Errorf("Result not correct: want %v, have %v", netClass, nc) + } +} diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar index 8227a4a37..b0171a12b 100755 --- a/vendor/github.com/prometheus/procfs/ttar +++ b/vendor/github.com/prometheus/procfs/ttar @@ -1,11 +1,26 @@ #!/usr/bin/env bash + # Purpose: plain text tar format # Limitations: - only suitable for text files, directories, and symlinks # - stores only filename, content, and mode # - not designed for untrusted input - +# # Note: must work with bash version 3.2 (macOS) +# Copyright 2017 Roger Luethi +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + set -o errexit -o nounset # Sanitize environment (for instance, standard sorting of glob matches) @@ -13,6 +28,55 @@ export LC_ALL=C path="" CMD="" +ARG_STRING="$*" + +#------------------------------------------------------------------------------ +# Not all sed implementations can work on null bytes. In order to make ttar +# work out of the box on macOS, use Python as a stream editor. + +USE_PYTHON=0 + +PYTHON_CREATE_FILTER=$(cat << 'PCF' +#!/usr/bin/env python + +import re +import sys + +for line in sys.stdin: + line = re.sub(r'EOF', r'\EOF', line) + line = re.sub(r'NULLBYTE', r'\NULLBYTE', line) + line = re.sub('\x00', r'NULLBYTE', line) + sys.stdout.write(line) +PCF +) + +PYTHON_EXTRACT_FILTER=$(cat << 'PEF' +#!/usr/bin/env python + +import re +import sys + +for line in sys.stdin: + line = re.sub(r'(?/dev/null; then + echo "ERROR Python not found. Aborting." + exit 2 + fi + USE_PYTHON=1 + fi +} + +#------------------------------------------------------------------------------ function usage { bname=$(basename "$0") @@ -23,6 +87,7 @@ Usage: $bname [-C ] -c -f (create archive) Options: -C (change directory) + -v (verbose) Example: Change to sysfs directory, create ttar file from fixtures directory $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ @@ -45,6 +110,8 @@ function set_cmd { CMD=$1 } +unset VERBOSE + while getopts :cf:htxvC: opt; do case $opt in c) @@ -142,8 +209,37 @@ function extract { fi while IFS= read -r line; do line_no=$(( line_no + 1 )) + local eof_without_newline if [ "$size" -gt 0 ]; then - echo "$line" >> "$path" + if [[ "$line" =~ [^\\]EOF ]]; then + # An EOF not preceeded by a backslash indicates that the line + # does not end with a newline + eof_without_newline=1 + else + eof_without_newline=0 + fi + # Replace NULLBYTE with null byte if at beginning of line + # Replace NULLBYTE with null byte unless preceeded by backslash + # Remove one backslash in front of NULLBYTE (if any) + # Remove EOF unless preceeded by backslash + # Remove one backslash in front of EOF + if [ $USE_PYTHON -eq 1 ]; then + echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" + else + # The repeated pattern makes up for sed's lack of negative + # lookbehind assertions (for consecutive null bytes). + echo -n "$line" | \ + sed -e 's/^NULLBYTE/\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\\NULLBYTE/NULLBYTE/g; + s/\([^\\]\)EOF/\1/g; + s/\\EOF/EOF/g; + ' >> "$path" + fi + if [[ "$eof_without_newline" -eq 0 ]]; then + echo >> "$path" + fi size=$(( size - 1 )) continue fi @@ -187,11 +283,14 @@ function get_mode { local mfile=$1 if [ -z "${STAT_OPTION:-}" ]; then if stat -c '%a' "$mfile" >/dev/null 2>&1; then + # GNU stat STAT_OPTION='-c' STAT_FORMAT='%a' else + # BSD stat STAT_OPTION='-f' - STAT_FORMAT='%A' + # Octal output, user/group/other (omit file type, sticky bit) + STAT_FORMAT='%OLp' fi fi stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile" @@ -200,6 +299,7 @@ function get_mode { function _create { shopt -s nullglob local mode + local eof_without_newline while (( "$#" )); do file=$1 if [ -L "$file" ]; then @@ -223,8 +323,30 @@ function _create { elif [ -f "$file" ]; then echo "Path: $file" lines=$(wc -l "$file"|awk '{print $1}') + eof_without_newline=0 + if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \ + [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then + eof_without_newline=1 + lines=$((lines+1)) + fi echo "Lines: $lines" - cat "$file" + # Add backslash in front of EOF + # Add backslash in front of NULLBYTE + # Replace null byte with NULLBYTE + if [ $USE_PYTHON -eq 1 ]; then + < "$file" python -c "$PYTHON_CREATE_FILTER" + else + < "$file" \ + sed 's/EOF/\\EOF/g; + s/NULLBYTE/\\NULLBYTE/g; + s/\x0/NULLBYTE/g; + ' + fi + if [[ "$eof_without_newline" -eq 1 ]]; then + # Finish line with EOF to indicate that the original line did + # not end with a linefeed + echo "EOF" + fi mode=$(get_mode "$file") echo "Mode: $mode" vecho "$mode $file" @@ -249,9 +371,12 @@ function create { rm "$ttar_file" fi exec > "$ttar_file" + echo "# Archive created by ttar $ARG_STRING" _create "$@" } +test_environment + if [ -n "${CDIR:-}" ]; then if [[ "$ARCHIVE" != /* ]]; then # Relative path: preserve the archive's location before changing diff --git a/vendor/github.com/rdallman/migrate/.gitignore b/vendor/github.com/rdallman/migrate/.gitignore deleted file mode 100644 index 938901207..000000000 --- a/vendor/github.com/rdallman/migrate/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -.DS_Store -cli/build -cli/cli -cli/migrate -.coverage -.godoc.pid diff --git a/vendor/github.com/rdallman/migrate/.travis.yml b/vendor/github.com/rdallman/migrate/.travis.yml deleted file mode 100644 index c06a4041d..000000000 --- a/vendor/github.com/rdallman/migrate/.travis.yml +++ /dev/null @@ -1,60 +0,0 @@ -language: go -sudo: required - -go: - - 1.9.1 - -env: - - MIGRATE_TEST_CONTAINER_BOOT_DELAY=10 - -# TODO: https://docs.docker.com/engine/installation/linux/ubuntu/ -# pre-provision with travis docker setup and pin down docker version in install step -services: - - docker - -install: - - make deps - - (cd $GOPATH/src/github.com/docker/docker && git fetch --all --tags --prune && git checkout v17.05.0-ce) - - sudo apt-get update && sudo apt-get install docker-ce=17.05.0* - - go get github.com/mattn/goveralls - -script: - - make test - -after_success: - - goveralls -service=travis-ci -coverprofile .coverage/combined.txt - - make list-external-deps > dependency_tree.txt && cat dependency_tree.txt - -before_deploy: - - make build-cli - - gem install --no-ri --no-rdoc fpm - - fpm -s dir -t deb -n migrate -v "$(git describe --tags 2>/dev/null | cut -c 2-)" --license MIT -m matthias.kadenbach@gmail.com --url https://github.com/mattes/migrate --description='Database migrations' -a amd64 -p migrate.$(git describe --tags 2>/dev/null | cut -c 2-).deb --deb-no-default-config-files -f -C cli/build migrate.linux-amd64=/usr/bin/migrate - -deploy: - - provider: releases - api_key: - secure: EFow50BI448HVb/uQ1Kk2Kq0xzmwIYq3V67YyymXIuqSCodvXEsMiBPUoLrxEknpPEIc67LEQTNdfHBgvyHk6oRINWAfie+7pr5tKrpOTF9ghyxoN1PlO8WKQCqwCvGMBCnc5ur5rvzp0bqfpV2rs5q9/nngy3kBuEvs12V7iho= - skip_cleanup: true - on: - go: 1.9 - repo: mattes/migrate - tags: true - file: - - cli/build/migrate.linux-amd64.tar.gz - - cli/build/migrate.darwin-amd64.tar.gz - - cli/build/migrate.windows-amd64.exe.tar.gz - - cli/build/sha256sum.txt - - dependency_tree.txt - - provider: packagecloud - repository: migrate - username: mattes - token: - secure: RiHJ/+J9DvXUah/APYdWySWZ5uOOISYJ0wS7xddc7/BNStRVjzFzvJ9zmb67RkyZZrvGuVjPiL4T8mtDyCJCj47RmU/56wPdEHbar/FjsiUCgwvR19RlulkgbV4okBCePbwzMw6HNHRp14TzfQCPtnN4kef0lOI4gZJkImN7rtQ= - dist: ubuntu/xenial - package_glob: '*.deb' - skip_cleanup: true - on: - go: 1.9 - repo: mattes/migrate - tags: true - diff --git a/vendor/github.com/rdallman/migrate/CONTRIBUTING.md b/vendor/github.com/rdallman/migrate/CONTRIBUTING.md deleted file mode 100644 index fcf82a42e..000000000 --- a/vendor/github.com/rdallman/migrate/CONTRIBUTING.md +++ /dev/null @@ -1,22 +0,0 @@ -# Development, Testing and Contributing - - 1. Make sure you have a running Docker daemon - (Install for [MacOS](https://docs.docker.com/docker-for-mac/)) - 2. Fork this repo and `git clone` somewhere to `$GOPATH/src/github.com/%you%/migrate` - 3. `make rewrite-import-paths` to update imports to your local fork - 4. Confirm tests are working: `make test-short` - 5. Write awesome code ... - 6. `make test` to run all tests against all database versions - 7. `make restore-import-paths` to restore import paths - 8. Push code and open Pull Request - -Some more helpful commands: - - * You can specify which database/ source tests to run: - `make test-short SOURCE='file go-bindata' DATABASE='postgres cassandra'` - * After `make test`, run `make html-coverage` which opens a shiny test coverage overview. - * Missing imports? `make deps` - * `make build-cli` builds the CLI in directory `cli/build/`. - * `make list-external-deps` lists all external dependencies for each package - * `make docs && make open-docs` opens godoc in your browser, `make kill-docs` kills the godoc server. - Repeatedly call `make docs` to refresh the server. diff --git a/vendor/github.com/rdallman/migrate/FAQ.md b/vendor/github.com/rdallman/migrate/FAQ.md deleted file mode 100644 index f8bb9a85b..000000000 --- a/vendor/github.com/rdallman/migrate/FAQ.md +++ /dev/null @@ -1,67 +0,0 @@ -# FAQ - -#### How is the code base structured? - ``` - / package migrate (the heart of everything) - /cli the CLI wrapper - /database database driver and sub directories have the actual driver implementations - /source source driver and sub directories have the actual driver implementations - ``` - -#### Why is there no `source/driver.go:Last()`? - It's not needed. And unless the source has a "native" way to read a directory in reversed order, - it might be expensive to do a full directory scan in order to get the last element. - -#### What is a NilMigration? NilVersion? - NilMigration defines a migration without a body. NilVersion is defined as const -1. - -#### What is the difference between uint(version) and int(targetVersion)? - version refers to an existing migration version coming from a source and therefor can never be negative. - targetVersion can either be a version OR represent a NilVersion, which equals -1. - -#### What's the difference between Next/Previous and Up/Down? - ``` - 1_first_migration.up.extension next -> 2_second_migration.up.extension ... - 1_first_migration.down.extension <- previous 2_second_migration.down.extension ... - ``` - -#### Why two separate files (up and down) for a migration? - It makes all of our lives easier. No new markup/syntax to learn for users - and existing database utility tools continue to work as expected. - -#### How many migrations can migrate handle? - Whatever the maximum positive signed integer value is for your platform. - For 32bit it would be 2,147,483,647 migrations. Migrate only keeps references to - the currently run and pre-fetched migrations in memory. Please note that some - source drivers need to do build a full "directory" tree first, which puts some - heat on the memory consumption. - -#### Are the table tests in migrate_test.go bloated? - Yes and no. There are duplicate test cases for sure but they don't hurt here. In fact - the tests are very visual now and might help new users understand expected behaviors quickly. - Migrate from version x to y and y is the last migration? Just check out the test for - that particular case and know what's going on instantly. - -#### What is Docker being used for? - Only for testing. See [testing/docker.go](testing/docker.go) - -#### Why not just use docker-compose? - It doesn't give us enough runtime control for testing. We want to be able to bring up containers fast - and whenever we want, not just once at the beginning of all tests. - -#### Can I maintain my driver in my own repository? - Yes, technically thats possible. We want to encourage you to contribute your driver to this respository though. - The driver's functionality is dictated by migrate's interfaces. That means there should really - just be one driver for a database/ source. We want to prevent a future where several drivers doing the exact same thing, - just implemented a bit differently, co-exist somewhere on Github. If users have to do research first to find the - "best" available driver for a database in order to get started, we would have failed as an open source community. - -#### Can I mix multiple sources during a batch of migrations? - No. - -#### What does "dirty" database mean? - Before a migration runs, each database sets a dirty flag. Execution stops if a migration fails and the dirty state persists, - which prevents attempts to run more migrations on top of a failed migration. You need to manually fix the error - and then "force" the expected version. - - diff --git a/vendor/github.com/rdallman/migrate/LICENSE b/vendor/github.com/rdallman/migrate/LICENSE deleted file mode 100644 index 62efa3670..000000000 --- a/vendor/github.com/rdallman/migrate/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Matthias Kadenbach - -https://github.com/mattes/migrate - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/rdallman/migrate/MIGRATIONS.md b/vendor/github.com/rdallman/migrate/MIGRATIONS.md deleted file mode 100644 index fbefb9278..000000000 --- a/vendor/github.com/rdallman/migrate/MIGRATIONS.md +++ /dev/null @@ -1,81 +0,0 @@ -# Migrations - -## Migration Filename Format - -A single logical migration is represented as two separate migration files, one -to migrate "up" to the specified version from the previous version, and a second -to migrate back "down" to the previous version. These migrations can be provided -by any one of the supported [migration sources](./README.md#migration-sources). - -The ordering and direction of the migration files is determined by the filenames -used for them. `migrate` expects the filenames of migrations to have the format: - - {version}_{title}.up.{extension} - {version}_{title}.down.{extension} - -The `title` of each migration is unused, and is only for readability. Similarly, -the `extension` of the migration files is not checked by the library, and should -be an appropriate format for the database in use (`.sql` for SQL variants, for -instance). - -Versions of migrations may be represented as any 64 bit unsigned integer. -All migrations are applied upward in order of increasing version number, and -downward by decreasing version number. - -Common versioning schemes include incrementing integers: - - 1_initialize_schema.down.sql - 1_initialize_schema.up.sql - 2_add_table.down.sql - 2_add_table.up.sql - ... - -Or timestamps at an appropriate resolution: - - 1500360784_initialize_schema.down.sql - 1500360784_initialize_schema.up.sql - 1500445949_add_table.down.sql - 1500445949_add_table.up.sql - ... - -But any scheme resulting in distinct, incrementing integers as versions is valid. - -It is suggested that the version number of corresponding `up` and `down` migration -files be equivalent for clarity, but they are allowed to differ so long as the -relative ordering of the migrations is preserved. - -The migration files are permitted to be empty, so in the event that a migration -is a no-op or is irreversible, it is recommended to still include both migration -files, and either leaving them empty or adding a comment as appropriate. - -## Migration Content Format - -The format of the migration files themselves varies between database systems. -Different databases have different semantics around schema changes and when and -how they are allowed to occur (for instance, if schema changes can occur within -a transaction). - -As such, the `migrate` library has little to no checking around the format of -migration sources. The migration files are generally processed directly by the -drivers as raw operations. - -## Reversibility of Migrations - -Best practice for writing schema migration is that all migrations should be -reversible. It should in theory be possible for run migrations down and back up -through any and all versions with the state being fully cleaned and recreated -by doing so. - -By adhering to this recommended practice, development and deployment of new code -is cleaner and easier (cleaning database state for a new feature should be as -easy as migrating down to a prior version, and back up to the latest). - -As opposed to some other migration libraries, `migrate` represents up and down -migrations as separate files. This prevents any non-standard file syntax from -being introduced which may result in unintended behavior or errors, depending -on what database is processing the file. - -While it is technically possible for an up or down migration to exist on its own -without an equivalently versioned counterpart, it is strongly recommended to -always include a down migration which cleans up the state of the corresponding -up migration. diff --git a/vendor/github.com/rdallman/migrate/Makefile b/vendor/github.com/rdallman/migrate/Makefile deleted file mode 100644 index e36394bed..000000000 --- a/vendor/github.com/rdallman/migrate/Makefile +++ /dev/null @@ -1,123 +0,0 @@ -SOURCE ?= file go-bindata github aws-s3 google-cloud-storage -DATABASE ?= postgres mysql redshift cassandra sqlite3 spanner cockroachdb clickhouse -VERSION ?= $(shell git describe --tags 2>/dev/null | cut -c 2-) -TEST_FLAGS ?= -REPO_OWNER ?= $(shell cd .. && basename "$$(pwd)") - - -build-cli: clean - -mkdir ./cli/build - cd ./cli && CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -a -o build/migrate.linux-amd64 -ldflags='-X main.Version=$(VERSION)' -tags '$(DATABASE) $(SOURCE)' . - cd ./cli && CGO_ENABLED=1 GOOS=darwin GOARCH=amd64 go build -a -o build/migrate.darwin-amd64 -ldflags='-X main.Version=$(VERSION)' -tags '$(DATABASE) $(SOURCE)' . - cd ./cli && CGO_ENABLED=1 GOOS=windows GOARCH=amd64 go build -a -o build/migrate.windows-amd64.exe -ldflags='-X main.Version=$(VERSION)' -tags '$(DATABASE) $(SOURCE)' . - cd ./cli/build && find . -name 'migrate*' | xargs -I{} tar czf {}.tar.gz {} - cd ./cli/build && shasum -a 256 * > sha256sum.txt - cat ./cli/build/sha256sum.txt - - -clean: - -rm -r ./cli/build - - -test-short: - make test-with-flags --ignore-errors TEST_FLAGS='-short' - - -test: - @-rm -r .coverage - @mkdir .coverage - make test-with-flags TEST_FLAGS='-v -race -covermode atomic -coverprofile .coverage/_$$(RAND).txt -bench=. -benchmem' - @echo 'mode: atomic' > .coverage/combined.txt - @cat .coverage/*.txt | grep -v 'mode: atomic' >> .coverage/combined.txt - - -test-with-flags: - @echo SOURCE: $(SOURCE) - @echo DATABASE: $(DATABASE) - - @go test $(TEST_FLAGS) . - @go test $(TEST_FLAGS) ./cli/... - @go test $(TEST_FLAGS) ./testing/... - - @echo -n '$(SOURCE)' | tr -s ' ' '\n' | xargs -I{} go test $(TEST_FLAGS) ./source/{} - @go test $(TEST_FLAGS) ./source/testing/... - @go test $(TEST_FLAGS) ./source/stub/... - - @echo -n '$(DATABASE)' | tr -s ' ' '\n' | xargs -I{} go test $(TEST_FLAGS) ./database/{} - @go test $(TEST_FLAGS) ./database/testing/... - @go test $(TEST_FLAGS) ./database/stub/... - - -kill-orphaned-docker-containers: - docker rm -f $(shell docker ps -aq --filter label=migrate_test) - - -html-coverage: - go tool cover -html=.coverage/combined.txt - - -deps: - -go get -v -u ./... - -go test -v -i ./... - # TODO: why is this not being fetched with the command above? - -go get -u github.com/fsouza/fake-gcs-server/fakestorage - - -list-external-deps: - $(call external_deps,'.') - $(call external_deps,'./cli/...') - $(call external_deps,'./testing/...') - - $(foreach v, $(SOURCE), $(call external_deps,'./source/$(v)/...')) - $(call external_deps,'./source/testing/...') - $(call external_deps,'./source/stub/...') - - $(foreach v, $(DATABASE), $(call external_deps,'./database/$(v)/...')) - $(call external_deps,'./database/testing/...') - $(call external_deps,'./database/stub/...') - - -restore-import-paths: - find . -name '*.go' -type f -execdir sed -i '' s%\"github.com/$(REPO_OWNER)/migrate%\"github.com/mattes/migrate%g '{}' \; - - -rewrite-import-paths: - find . -name '*.go' -type f -execdir sed -i '' s%\"github.com/mattes/migrate%\"github.com/$(REPO_OWNER)/migrate%g '{}' \; - - -# example: fswatch -0 --exclude .godoc.pid --event Updated . | xargs -0 -n1 -I{} make docs -docs: - -make kill-docs - nohup godoc -play -http=127.0.0.1:6064 /dev/null 2>&1 & echo $$! > .godoc.pid - cat .godoc.pid - - -kill-docs: - @cat .godoc.pid - kill -9 $$(cat .godoc.pid) - rm .godoc.pid - - -open-docs: - open http://localhost:6064/pkg/github.com/$(REPO_OWNER)/migrate - - -# example: make release V=0.0.0 -release: - git tag v$(V) - @read -p "Press enter to confirm and push to origin ..." && git push origin v$(V) - - -define external_deps - @echo '-- $(1)'; go list -f '{{join .Deps "\n"}}' $(1) | grep -v github.com/$(REPO_OWNER)/migrate | xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}' - -endef - - -.PHONY: build-cli clean test-short test test-with-flags deps html-coverage \ - restore-import-paths rewrite-import-paths list-external-deps release \ - docs kill-docs open-docs kill-orphaned-docker-containers - -SHELL = /bin/bash -RAND = $(shell echo $$RANDOM) - diff --git a/vendor/github.com/rdallman/migrate/README.md b/vendor/github.com/rdallman/migrate/README.md deleted file mode 100644 index cbc19d88c..000000000 --- a/vendor/github.com/rdallman/migrate/README.md +++ /dev/null @@ -1,140 +0,0 @@ -[![Build Status](https://travis-ci.org/mattes/migrate.svg?branch=master)](https://travis-ci.org/mattes/migrate) -[![GoDoc](https://godoc.org/github.com/mattes/migrate?status.svg)](https://godoc.org/github.com/mattes/migrate) -[![Coverage Status](https://coveralls.io/repos/github/mattes/migrate/badge.svg?branch=v3.0-prev)](https://coveralls.io/github/mattes/migrate?branch=v3.0-prev) -[![packagecloud.io](https://img.shields.io/badge/deb-packagecloud.io-844fec.svg)](https://packagecloud.io/mattes/migrate?filter=debs) - -# migrate - -__Database migrations written in Go. Use as [CLI](#cli-usage) or import as [library](#use-in-your-go-project).__ - - * Migrate reads migrations from [sources](#migration-sources) - and applies them in correct order to a [database](#databases). - * Drivers are "dumb", migrate glues everything together and makes sure the logic is bulletproof. - (Keeps the drivers lightweight, too.) - * Database drivers don't assume things or try to correct user input. When in doubt, fail. - - -Looking for [v1](https://github.com/mattes/migrate/tree/v1)? - - -## Databases - -Database drivers run migrations. [Add a new database?](database/driver.go) - - * [PostgreSQL](database/postgres) - * [Redshift](database/redshift) - * [Ql](database/ql) - * [Cassandra](database/cassandra) - * [SQLite](database/sqlite3) - * [MySQL/ MariaDB](database/mysql) - * [Neo4j](database/neo4j) ([todo #167](https://github.com/mattes/migrate/issues/167)) - * [MongoDB](database/mongodb) ([todo #169](https://github.com/mattes/migrate/issues/169)) - * [CrateDB](database/crate) ([todo #170](https://github.com/mattes/migrate/issues/170)) - * [Shell](database/shell) ([todo #171](https://github.com/mattes/migrate/issues/171)) - * [Google Cloud Spanner](database/spanner) - * [CockroachDB](database/cockroachdb) - * [ClickHouse](database/clickhouse) - - -## Migration Sources - -Source drivers read migrations from local or remote sources. [Add a new source?](source/driver.go) - - * [Filesystem](source/file) - read from fileystem (always included) - * [Go-Bindata](source/go-bindata) - read from embedded binary data ([jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata)) - * [Github](source/github) - read from remote Github repositories - * [AWS S3](source/aws-s3) - read from Amazon Web Services S3 - * [Google Cloud Storage](source/google-cloud-storage) - read from Google Cloud Platform Storage - - - -## CLI usage - - * Simple wrapper around this library. - * Handles ctrl+c (SIGINT) gracefully. - * No config search paths, no config files, no magic ENV var injections. - -__[CLI Documentation](cli)__ - -([brew todo #156](https://github.com/mattes/migrate/issues/156)) - -``` -$ brew install migrate --with-postgres -$ migrate -database postgres://localhost:5432/database up 2 -``` - - -## Use in your Go project - - * API is stable and frozen for this release (v3.x). - * Package migrate has no external dependencies. - * Only import the drivers you need. - (check [dependency_tree.txt](https://github.com/mattes/migrate/releases) for each driver) - * To help prevent database corruptions, it supports graceful stops via `GracefulStop chan bool`. - * Bring your own logger. - * Uses `io.Reader` streams internally for low memory overhead. - * Thread-safe and no goroutine leaks. - -__[Go Documentation](https://godoc.org/github.com/mattes/migrate)__ - -```go -import ( - "github.com/mattes/migrate" - _ "github.com/mattes/migrate/database/postgres" - _ "github.com/mattes/migrate/source/github" -) - -func main() { - m, err := migrate.New( - "github://mattes:personal-access-token@mattes/migrate_test", - "postgres://localhost:5432/database?sslmode=enable") - m.Steps(2) -} -``` - -Want to use an existing database client? - -```go -import ( - "database/sql" - _ "github.com/lib/pq" - "github.com/mattes/migrate" - "github.com/mattes/migrate/database/postgres" - _ "github.com/mattes/migrate/source/file" -) - -func main() { - db, err := sql.Open("postgres", "postgres://localhost:5432/database?sslmode=enable") - driver, err := postgres.WithInstance(db, &postgres.Config{}) - m, err := migrate.NewWithDatabaseInstance( - "file:///migrations", - "postgres", driver) - m.Steps(2) -} -``` - -## Migration files - -Each migration has an up and down migration. [Why?](FAQ.md#why-two-separate-files-up-and-down-for-a-migration) - -``` -1481574547_create_users_table.up.sql -1481574547_create_users_table.down.sql -``` - -[Best practices: How to write migrations.](MIGRATIONS.md) - - - -## Development and Contributing - -Yes, please! [`Makefile`](Makefile) is your friend, -read the [development guide](CONTRIBUTING.md). - -Also have a look at the [FAQ](FAQ.md). - - - ---- - -Looking for alternatives? [https://awesome-go.com/#database](https://awesome-go.com/#database). diff --git a/vendor/github.com/rdallman/migrate/cli/README.md b/vendor/github.com/rdallman/migrate/cli/README.md deleted file mode 100644 index c0886d5a7..000000000 --- a/vendor/github.com/rdallman/migrate/cli/README.md +++ /dev/null @@ -1,113 +0,0 @@ -# migrate CLI - -## Installation - -#### With Go toolchain - -``` -$ go get -u -d github.com/mattes/migrate/cli github.com/lib/pq -$ go build -tags 'postgres' -o /usr/local/bin/migrate github.com/mattes/migrate/cli -``` - -Note: This example builds the cli which will only work with postgres. In order -to build the cli for use with other databases, replace the `postgres` build tag -with the appropriate database tag(s) for the databases desired. The tags -correspond to the names of the sub-packages underneath the -[`database`](../database) package. - -#### MacOS - -([todo #156](https://github.com/mattes/migrate/issues/156)) - -``` -$ brew install migrate --with-postgres -``` - -#### Linux (*.deb package) - -``` -$ curl -L https://packagecloud.io/mattes/migrate/gpgkey | apt-key add - -$ echo "deb https://packagecloud.io/mattes/migrate/ubuntu/ xenial main" > /etc/apt/sources.list.d/migrate.list -$ apt-get update -$ apt-get install -y migrate -``` - -#### Download pre-build binary (Windows, MacOS, or Linux) - -[Release Downloads](https://github.com/mattes/migrate/releases) - -``` -$ curl -L https://github.com/mattes/migrate/releases/download/$version/migrate.$platform-amd64.tar.gz | tar xvz -``` - - - -## Usage - -``` -$ migrate -help -Usage: migrate OPTIONS COMMAND [arg...] - migrate [ -version | -help ] - -Options: - -source Location of the migrations (driver://url) - -path Shorthand for -source=file://path - -database Run migrations against this database (driver://url) - -prefetch N Number of migrations to load in advance before executing (default 10) - -lock-timeout N Allow N seconds to acquire database lock (default 15) - -verbose Print verbose logging - -version Print version - -help Print usage - -Commands: - create [-ext E] [-dir D] NAME - Create a set of timestamped up/down migrations titled NAME, in directory D with extension E - goto V Migrate to version V - up [N] Apply all or N up migrations - down [N] Apply all or N down migrations - drop Drop everyting inside database - force V Set version V but don't run migration (ignores dirty state) - version Print current migration version -``` - - -So let's say you want to run the first two migrations - -``` -$ migrate -database postgres://localhost:5432/database up 2 -``` - -If your migrations are hosted on github - -``` -$ migrate -source github://mattes:personal-access-token@mattes/migrate_test \ - -database postgres://localhost:5432/database down 2 -``` - -The CLI will gracefully stop at a safe point when SIGINT (ctrl+c) is received. -Send SIGKILL for immediate halt. - - - -## Reading CLI arguments from somewhere else - -##### ENV variables - -``` -$ migrate -database "$MY_MIGRATE_DATABASE" -``` - -##### JSON files - -Check out https://stedolan.github.io/jq/ - -``` -$ migrate -database "$(cat config.json | jq '.database')" -``` - -##### YAML files - -```` -$ migrate -database "$(cat config/database.yml | ruby -ryaml -e "print YAML.load(STDIN.read)['database']")" -$ migrate -database "$(cat config/database.yml | python -c 'import yaml,sys;print yaml.safe_load(sys.stdin)["database"]')" -``` diff --git a/vendor/github.com/rdallman/migrate/cli/build_aws-s3.go b/vendor/github.com/rdallman/migrate/cli/build_aws-s3.go deleted file mode 100644 index 766fd5663..000000000 --- a/vendor/github.com/rdallman/migrate/cli/build_aws-s3.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build aws-s3 - -package main - -import ( - _ "github.com/mattes/migrate/source/aws-s3" -) diff --git a/vendor/github.com/rdallman/migrate/cli/build_cassandra.go b/vendor/github.com/rdallman/migrate/cli/build_cassandra.go deleted file mode 100644 index 319b52d2a..000000000 --- a/vendor/github.com/rdallman/migrate/cli/build_cassandra.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build cassandra - -package main - -import ( - _ "github.com/mattes/migrate/database/cassandra" -) diff --git a/vendor/github.com/rdallman/migrate/cli/build_clickhouse.go b/vendor/github.com/rdallman/migrate/cli/build_clickhouse.go deleted file mode 100644 index c9175e280..000000000 --- a/vendor/github.com/rdallman/migrate/cli/build_clickhouse.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build clickhouse - -package main - -import ( - _ "github.com/kshvakov/clickhouse" - _ "github.com/mattes/migrate/database/clickhouse" -) diff --git a/vendor/github.com/rdallman/migrate/cli/build_cockroachdb.go b/vendor/github.com/rdallman/migrate/cli/build_cockroachdb.go deleted file mode 100644 index e5fdf073e..000000000 --- a/vendor/github.com/rdallman/migrate/cli/build_cockroachdb.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build cockroachdb - -package main - -import ( - _ "github.com/mattes/migrate/database/cockroachdb" -) diff --git a/vendor/github.com/rdallman/migrate/cli/build_github.go b/vendor/github.com/rdallman/migrate/cli/build_github.go deleted file mode 100644 index 9c813b46c..000000000 --- a/vendor/github.com/rdallman/migrate/cli/build_github.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build github - -package main - -import ( - _ "github.com/mattes/migrate/source/github" -) diff --git a/vendor/github.com/rdallman/migrate/cli/build_go-bindata.go b/vendor/github.com/rdallman/migrate/cli/build_go-bindata.go deleted file mode 100644 index 8a6a89349..000000000 --- a/vendor/github.com/rdallman/migrate/cli/build_go-bindata.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build go-bindata - -package main - -import ( - _ "github.com/mattes/migrate/source/go-bindata" -) diff --git a/vendor/github.com/rdallman/migrate/cli/build_google-cloud-storage.go b/vendor/github.com/rdallman/migrate/cli/build_google-cloud-storage.go deleted file mode 100644 index 04f314338..000000000 --- a/vendor/github.com/rdallman/migrate/cli/build_google-cloud-storage.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build google-cloud-storage - -package main - -import ( - _ "github.com/mattes/migrate/source/google-cloud-storage" -) diff --git a/vendor/github.com/rdallman/migrate/cli/build_mysql.go b/vendor/github.com/rdallman/migrate/cli/build_mysql.go deleted file mode 100644 index 177766f5e..000000000 --- a/vendor/github.com/rdallman/migrate/cli/build_mysql.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build mysql - -package main - -import ( - _ "github.com/mattes/migrate/database/mysql" -) diff --git a/vendor/github.com/rdallman/migrate/cli/build_postgres.go b/vendor/github.com/rdallman/migrate/cli/build_postgres.go deleted file mode 100644 index 87f6be757..000000000 --- a/vendor/github.com/rdallman/migrate/cli/build_postgres.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build postgres - -package main - -import ( - _ "github.com/mattes/migrate/database/postgres" -) diff --git a/vendor/github.com/rdallman/migrate/cli/build_ql.go b/vendor/github.com/rdallman/migrate/cli/build_ql.go deleted file mode 100644 index cd56ef958..000000000 --- a/vendor/github.com/rdallman/migrate/cli/build_ql.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build ql - -package main - -import ( - _ "github.com/mattes/migrate/database/ql" -) diff --git a/vendor/github.com/rdallman/migrate/cli/build_redshift.go b/vendor/github.com/rdallman/migrate/cli/build_redshift.go deleted file mode 100644 index 8153d0aa3..000000000 --- a/vendor/github.com/rdallman/migrate/cli/build_redshift.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build redshift - -package main - -import ( - _ "github.com/mattes/migrate/database/redshift" -) diff --git a/vendor/github.com/rdallman/migrate/cli/build_spanner.go b/vendor/github.com/rdallman/migrate/cli/build_spanner.go deleted file mode 100644 index 7223d820b..000000000 --- a/vendor/github.com/rdallman/migrate/cli/build_spanner.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build spanner - -package main - -import ( - _ "github.com/mattes/migrate/database/spanner" -) diff --git a/vendor/github.com/rdallman/migrate/cli/build_sqlite3.go b/vendor/github.com/rdallman/migrate/cli/build_sqlite3.go deleted file mode 100644 index 48ae8ebc2..000000000 --- a/vendor/github.com/rdallman/migrate/cli/build_sqlite3.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build sqlite3 - -package main - -import ( - _ "github.com/mattes/migrate/database/sqlite3" -) diff --git a/vendor/github.com/rdallman/migrate/cli/commands.go b/vendor/github.com/rdallman/migrate/cli/commands.go deleted file mode 100644 index 703896dc1..000000000 --- a/vendor/github.com/rdallman/migrate/cli/commands.go +++ /dev/null @@ -1,96 +0,0 @@ -package main - -import ( - "github.com/mattes/migrate" - _ "github.com/mattes/migrate/database/stub" // TODO remove again - _ "github.com/mattes/migrate/source/file" - "os" - "fmt" -) - -func createCmd(dir string, timestamp int64, name string, ext string) { - base := fmt.Sprintf("%v%v_%v.", dir, timestamp, name) - os.MkdirAll(dir, os.ModePerm) - createFile(base + "up" + ext) - createFile(base + "down" + ext) -} - -func createFile(fname string) { - if _, err := os.Create(fname); err != nil { - log.fatalErr(err) - } -} - -func gotoCmd(m *migrate.Migrate, v uint) { - if err := m.Migrate(v); err != nil { - if err != migrate.ErrNoChange { - log.fatalErr(err) - } else { - log.Println(err) - } - } -} - -func upCmd(m *migrate.Migrate, limit int) { - if limit >= 0 { - if err := m.Steps(limit); err != nil { - if err != migrate.ErrNoChange { - log.fatalErr(err) - } else { - log.Println(err) - } - } - } else { - if err := m.Up(); err != nil { - if err != migrate.ErrNoChange { - log.fatalErr(err) - } else { - log.Println(err) - } - } - } -} - -func downCmd(m *migrate.Migrate, limit int) { - if limit >= 0 { - if err := m.Steps(-limit); err != nil { - if err != migrate.ErrNoChange { - log.fatalErr(err) - } else { - log.Println(err) - } - } - } else { - if err := m.Down(); err != nil { - if err != migrate.ErrNoChange { - log.fatalErr(err) - } else { - log.Println(err) - } - } - } -} - -func dropCmd(m *migrate.Migrate) { - if err := m.Drop(); err != nil { - log.fatalErr(err) - } -} - -func forceCmd(m *migrate.Migrate, v int) { - if err := m.Force(v); err != nil { - log.fatalErr(err) - } -} - -func versionCmd(m *migrate.Migrate) { - v, dirty, err := m.Version() - if err != nil { - log.fatalErr(err) - } - if dirty { - log.Printf("%v (dirty)\n", v) - } else { - log.Println(v) - } -} diff --git a/vendor/github.com/rdallman/migrate/cli/examples/Dockerfile b/vendor/github.com/rdallman/migrate/cli/examples/Dockerfile deleted file mode 100644 index 740f951f8..000000000 --- a/vendor/github.com/rdallman/migrate/cli/examples/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM ubuntu:xenial - -RUN apt-get update && \ - apt-get install -y curl apt-transport-https - -RUN curl -L https://packagecloud.io/mattes/migrate/gpgkey | apt-key add - && \ - echo "deb https://packagecloud.io/mattes/migrate/ubuntu/ xenial main" > /etc/apt/sources.list.d/migrate.list && \ - apt-get update && \ - apt-get install -y migrate - -RUN migrate -version - diff --git a/vendor/github.com/rdallman/migrate/cli/log.go b/vendor/github.com/rdallman/migrate/cli/log.go deleted file mode 100644 index a119d3481..000000000 --- a/vendor/github.com/rdallman/migrate/cli/log.go +++ /dev/null @@ -1,45 +0,0 @@ -package main - -import ( - "fmt" - logpkg "log" - "os" -) - -type Log struct { - verbose bool -} - -func (l *Log) Printf(format string, v ...interface{}) { - if l.verbose { - logpkg.Printf(format, v...) - } else { - fmt.Fprintf(os.Stderr, format, v...) - } -} - -func (l *Log) Println(args ...interface{}) { - if l.verbose { - logpkg.Println(args...) - } else { - fmt.Fprintln(os.Stderr, args...) - } -} - -func (l *Log) Verbose() bool { - return l.verbose -} - -func (l *Log) fatalf(format string, v ...interface{}) { - l.Printf(format, v...) - os.Exit(1) -} - -func (l *Log) fatal(args ...interface{}) { - l.Println(args...) - os.Exit(1) -} - -func (l *Log) fatalErr(err error) { - l.fatal("error:", err) -} diff --git a/vendor/github.com/rdallman/migrate/cli/main.go b/vendor/github.com/rdallman/migrate/cli/main.go deleted file mode 100644 index 4c727a972..000000000 --- a/vendor/github.com/rdallman/migrate/cli/main.go +++ /dev/null @@ -1,237 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "os" - "os/signal" - "strconv" - "strings" - "syscall" - "time" - - "github.com/mattes/migrate" -) - -// set main log -var log = &Log{} - -func main() { - helpPtr := flag.Bool("help", false, "") - versionPtr := flag.Bool("version", false, "") - verbosePtr := flag.Bool("verbose", false, "") - prefetchPtr := flag.Uint("prefetch", 10, "") - lockTimeoutPtr := flag.Uint("lock-timeout", 15, "") - pathPtr := flag.String("path", "", "") - databasePtr := flag.String("database", "", "") - sourcePtr := flag.String("source", "", "") - - flag.Usage = func() { - fmt.Fprint(os.Stderr, - `Usage: migrate OPTIONS COMMAND [arg...] - migrate [ -version | -help ] - -Options: - -source Location of the migrations (driver://url) - -path Shorthand for -source=file://path - -database Run migrations against this database (driver://url) - -prefetch N Number of migrations to load in advance before executing (default 10) - -lock-timeout N Allow N seconds to acquire database lock (default 15) - -verbose Print verbose logging - -version Print version - -help Print usage - -Commands: - create [-ext E] [-dir D] NAME - Create a set of timestamped up/down migrations titled NAME, in directory D with extension E - goto V Migrate to version V - up [N] Apply all or N up migrations - down [N] Apply all or N down migrations - drop Drop everyting inside database - force V Set version V but don't run migration (ignores dirty state) - version Print current migration version -`) - } - - flag.Parse() - - // initialize logger - log.verbose = *verbosePtr - - // show cli version - if *versionPtr { - fmt.Fprintln(os.Stderr, Version) - os.Exit(0) - } - - // show help - if *helpPtr { - flag.Usage() - os.Exit(0) - } - - // translate -path into -source if given - if *sourcePtr == "" && *pathPtr != "" { - *sourcePtr = fmt.Sprintf("file://%v", *pathPtr) - } - - // initialize migrate - // don't catch migraterErr here and let each command decide - // how it wants to handle the error - migrater, migraterErr := migrate.New(*sourcePtr, *databasePtr) - defer func() { - if migraterErr == nil { - migrater.Close() - } - }() - if migraterErr == nil { - migrater.Log = log - migrater.PrefetchMigrations = *prefetchPtr - migrater.LockTimeout = time.Duration(int64(*lockTimeoutPtr)) * time.Second - - // handle Ctrl+c - signals := make(chan os.Signal, 1) - signal.Notify(signals, syscall.SIGINT) - go func() { - for range signals { - log.Println("Stopping after this running migration ...") - migrater.GracefulStop <- true - return - } - }() - } - - startTime := time.Now() - - switch flag.Arg(0) { - case "create": - args := flag.Args()[1:] - - createFlagSet := flag.NewFlagSet("create", flag.ExitOnError) - extPtr := createFlagSet.String("ext", "", "File extension") - dirPtr := createFlagSet.String("dir", "", "Directory to place file in (default: current working directory)") - createFlagSet.Parse(args) - - if createFlagSet.NArg() == 0 { - log.fatal("error: please specify name") - } - name := createFlagSet.Arg(0) - - if *extPtr != "" { - *extPtr = "." + strings.TrimPrefix(*extPtr, ".") - } - if *dirPtr != "" { - *dirPtr = strings.Trim(*dirPtr, "/") + "/" - } - - timestamp := startTime.Unix() - - createCmd(*dirPtr, timestamp, name, *extPtr) - - case "goto": - if migraterErr != nil { - log.fatalErr(migraterErr) - } - - if flag.Arg(1) == "" { - log.fatal("error: please specify version argument V") - } - - v, err := strconv.ParseUint(flag.Arg(1), 10, 64) - if err != nil { - log.fatal("error: can't read version argument V") - } - - gotoCmd(migrater, uint(v)) - - if log.verbose { - log.Println("Finished after", time.Now().Sub(startTime)) - } - - case "up": - if migraterErr != nil { - log.fatalErr(migraterErr) - } - - limit := -1 - if flag.Arg(1) != "" { - n, err := strconv.ParseUint(flag.Arg(1), 10, 64) - if err != nil { - log.fatal("error: can't read limit argument N") - } - limit = int(n) - } - - upCmd(migrater, limit) - - if log.verbose { - log.Println("Finished after", time.Now().Sub(startTime)) - } - - case "down": - if migraterErr != nil { - log.fatalErr(migraterErr) - } - - limit := -1 - if flag.Arg(1) != "" { - n, err := strconv.ParseUint(flag.Arg(1), 10, 64) - if err != nil { - log.fatal("error: can't read limit argument N") - } - limit = int(n) - } - - downCmd(migrater, limit) - - if log.verbose { - log.Println("Finished after", time.Now().Sub(startTime)) - } - - case "drop": - if migraterErr != nil { - log.fatalErr(migraterErr) - } - - dropCmd(migrater) - - if log.verbose { - log.Println("Finished after", time.Now().Sub(startTime)) - } - - case "force": - if migraterErr != nil { - log.fatalErr(migraterErr) - } - - if flag.Arg(1) == "" { - log.fatal("error: please specify version argument V") - } - - v, err := strconv.ParseInt(flag.Arg(1), 10, 64) - if err != nil { - log.fatal("error: can't read version argument V") - } - - if v < -1 { - log.fatal("error: argument V must be >= -1") - } - - forceCmd(migrater, int(v)) - - if log.verbose { - log.Println("Finished after", time.Now().Sub(startTime)) - } - - case "version": - if migraterErr != nil { - log.fatalErr(migraterErr) - } - - versionCmd(migrater) - - default: - flag.Usage() - os.Exit(0) - } -} diff --git a/vendor/github.com/rdallman/migrate/cli/version.go b/vendor/github.com/rdallman/migrate/cli/version.go deleted file mode 100644 index 6c3ec49fe..000000000 --- a/vendor/github.com/rdallman/migrate/cli/version.go +++ /dev/null @@ -1,4 +0,0 @@ -package main - -// Version is set in Makefile with build flags -var Version = "dev" diff --git a/vendor/github.com/rdallman/migrate/database/cassandra/README.md b/vendor/github.com/rdallman/migrate/database/cassandra/README.md deleted file mode 100644 index f99b1105e..000000000 --- a/vendor/github.com/rdallman/migrate/database/cassandra/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# Cassandra - -* Drop command will not work on Cassandra 2.X because it rely on -system_schema table which comes with 3.X -* Other commands should work properly but are **not tested** - - -## Usage -`cassandra://host:port/keyspace?param1=value¶m2=value2` - - -| URL Query | Default value | Description | -|------------|-------------|-----------| -| `x-migrations-table` | schema_migrations | Name of the migrations table | -| `port` | 9042 | The port to bind to | -| `consistency` | ALL | Migration consistency -| `protocol` | | Cassandra protocol version (3 or 4) -| `timeout` | 1 minute | Migration timeout -| `username` | nil | Username to use when authenticating. | -| `password` | nil | Password to use when authenticating. | - - -`timeout` is parsed using [time.ParseDuration(s string)](https://golang.org/pkg/time/#ParseDuration) - - -## Upgrading from v1 - -1. Write down the current migration version from schema_migrations -2. `DROP TABLE schema_migrations` -4. Download and install the latest migrate version. -5. Force the current migration version with `migrate force `. diff --git a/vendor/github.com/rdallman/migrate/database/cassandra/cassandra.go b/vendor/github.com/rdallman/migrate/database/cassandra/cassandra.go deleted file mode 100644 index 42563fdbe..000000000 --- a/vendor/github.com/rdallman/migrate/database/cassandra/cassandra.go +++ /dev/null @@ -1,228 +0,0 @@ -package cassandra - -import ( - "fmt" - "io" - "io/ioutil" - nurl "net/url" - "strconv" - "time" - - "github.com/gocql/gocql" - "github.com/mattes/migrate/database" -) - -func init() { - db := new(Cassandra) - database.Register("cassandra", db) -} - -var DefaultMigrationsTable = "schema_migrations" -var dbLocked = false - -var ( - ErrNilConfig = fmt.Errorf("no config") - ErrNoKeyspace = fmt.Errorf("no keyspace provided") - ErrDatabaseDirty = fmt.Errorf("database is dirty") -) - -type Config struct { - MigrationsTable string - KeyspaceName string -} - -type Cassandra struct { - session *gocql.Session - isLocked bool - - // Open and WithInstance need to guarantee that config is never nil - config *Config -} - -func (p *Cassandra) Open(url string) (database.Driver, error) { - u, err := nurl.Parse(url) - if err != nil { - return nil, err - } - - // Check for missing mandatory attributes - if len(u.Path) == 0 { - return nil, ErrNoKeyspace - } - - migrationsTable := u.Query().Get("x-migrations-table") - if len(migrationsTable) == 0 { - migrationsTable = DefaultMigrationsTable - } - - p.config = &Config{ - KeyspaceName: u.Path, - MigrationsTable: migrationsTable, - } - - cluster := gocql.NewCluster(u.Host) - cluster.Keyspace = u.Path[1:len(u.Path)] - cluster.Consistency = gocql.All - cluster.Timeout = 1 * time.Minute - - if len(u.Query().Get("username")) > 0 && len(u.Query().Get("password")) > 0 { - authenticator := gocql.PasswordAuthenticator{ - Username: u.Query().Get("username"), - Password: u.Query().Get("password"), - } - cluster.Authenticator = authenticator - } - - // Retrieve query string configuration - if len(u.Query().Get("consistency")) > 0 { - var consistency gocql.Consistency - consistency, err = parseConsistency(u.Query().Get("consistency")) - if err != nil { - return nil, err - } - - cluster.Consistency = consistency - } - if len(u.Query().Get("protocol")) > 0 { - var protoversion int - protoversion, err = strconv.Atoi(u.Query().Get("protocol")) - if err != nil { - return nil, err - } - cluster.ProtoVersion = protoversion - } - if len(u.Query().Get("timeout")) > 0 { - var timeout time.Duration - timeout, err = time.ParseDuration(u.Query().Get("timeout")) - if err != nil { - return nil, err - } - cluster.Timeout = timeout - } - - p.session, err = cluster.CreateSession() - - if err != nil { - return nil, err - } - - if err := p.ensureVersionTable(); err != nil { - return nil, err - } - - return p, nil -} - -func (p *Cassandra) Close() error { - p.session.Close() - return nil -} - -func (p *Cassandra) Lock() error { - if dbLocked { - return database.ErrLocked - } - dbLocked = true - return nil -} - -func (p *Cassandra) Unlock() error { - dbLocked = false - return nil -} - -func (p *Cassandra) Run(migration io.Reader) error { - migr, err := ioutil.ReadAll(migration) - if err != nil { - return err - } - // run migration - query := string(migr[:]) - if err := p.session.Query(query).Exec(); err != nil { - // TODO: cast to Cassandra error and get line number - return database.Error{OrigErr: err, Err: "migration failed", Query: migr} - } - - return nil -} - -func (p *Cassandra) SetVersion(version int, dirty bool) error { - query := `TRUNCATE "` + p.config.MigrationsTable + `"` - if err := p.session.Query(query).Exec(); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - if version >= 0 { - query = `INSERT INTO "` + p.config.MigrationsTable + `" (version, dirty) VALUES (?, ?)` - if err := p.session.Query(query, version, dirty).Exec(); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - } - - return nil -} - -// Return current keyspace version -func (p *Cassandra) Version() (version int, dirty bool, err error) { - query := `SELECT version, dirty FROM "` + p.config.MigrationsTable + `" LIMIT 1` - err = p.session.Query(query).Scan(&version, &dirty) - switch { - case err == gocql.ErrNotFound: - return database.NilVersion, false, nil - - case err != nil: - if _, ok := err.(*gocql.Error); ok { - return database.NilVersion, false, nil - } - return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} - - default: - return version, dirty, nil - } -} - -func (p *Cassandra) Drop() error { - // select all tables in current schema - query := fmt.Sprintf(`SELECT table_name from system_schema.tables WHERE keyspace_name='%s'`, p.config.KeyspaceName[1:]) // Skip '/' character - iter := p.session.Query(query).Iter() - var tableName string - for iter.Scan(&tableName) { - err := p.session.Query(fmt.Sprintf(`DROP TABLE %s`, tableName)).Exec() - if err != nil { - return err - } - } - // Re-create the version table - if err := p.ensureVersionTable(); err != nil { - return err - } - return nil -} - -// Ensure version table exists -func (p *Cassandra) ensureVersionTable() error { - err := p.session.Query(fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (version bigint, dirty boolean, PRIMARY KEY(version))", p.config.MigrationsTable)).Exec() - if err != nil { - return err - } - if _, _, err = p.Version(); err != nil { - return err - } - return nil -} - -// ParseConsistency wraps gocql.ParseConsistency -// to return an error instead of a panicking. -func parseConsistency(consistencyStr string) (consistency gocql.Consistency, err error) { - defer func() { - if r := recover(); r != nil { - var ok bool - err, ok = r.(error) - if !ok { - err = fmt.Errorf("Failed to parse consistency \"%s\": %v", consistencyStr, r) - } - } - }() - consistency = gocql.ParseConsistency(consistencyStr) - - return consistency, nil -} diff --git a/vendor/github.com/rdallman/migrate/database/cassandra/cassandra_test.go b/vendor/github.com/rdallman/migrate/database/cassandra/cassandra_test.go deleted file mode 100644 index 4ca764a04..000000000 --- a/vendor/github.com/rdallman/migrate/database/cassandra/cassandra_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package cassandra - -import ( - "fmt" - "testing" - dt "github.com/mattes/migrate/database/testing" - mt "github.com/mattes/migrate/testing" - "github.com/gocql/gocql" - "time" - "strconv" -) - -var versions = []mt.Version{ - {Image: "cassandra:3.0.10"}, - {Image: "cassandra:3.0"}, -} - -func isReady(i mt.Instance) bool { - // Cassandra exposes 5 ports (7000, 7001, 7199, 9042 & 9160) - // We only need the port bound to 9042, but we can only access to the first one - // through 'i.Port()' (which calls DockerContainer.firstPortMapping()) - // So we need to get port mapping to retrieve correct port number bound to 9042 - portMap := i.NetworkSettings().Ports - port, _ := strconv.Atoi(portMap["9042/tcp"][0].HostPort) - - cluster := gocql.NewCluster(i.Host()) - cluster.Port = port - //cluster.ProtoVersion = 4 - cluster.Consistency = gocql.All - cluster.Timeout = 1 * time.Minute - p, err := cluster.CreateSession() - if err != nil { - return false - } - // Create keyspace for tests - p.Query("CREATE KEYSPACE testks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor':1}").Exec() - return true -} - -func Test(t *testing.T) { - mt.ParallelTest(t, versions, isReady, - func(t *testing.T, i mt.Instance) { - p := &Cassandra{} - portMap := i.NetworkSettings().Ports - port, _ := strconv.Atoi(portMap["9042/tcp"][0].HostPort) - addr := fmt.Sprintf("cassandra://%v:%v/testks", i.Host(), port) - d, err := p.Open(addr) - if err != nil { - t.Fatalf("%v", err) - } - dt.Test(t, d, []byte("SELECT table_name from system_schema.tables")) - }) -} diff --git a/vendor/github.com/rdallman/migrate/database/clickhouse/README.md b/vendor/github.com/rdallman/migrate/database/clickhouse/README.md deleted file mode 100644 index 16dbbf965..000000000 --- a/vendor/github.com/rdallman/migrate/database/clickhouse/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# ClickHouse - -`clickhouse://host:port?username=user&password=qwerty&database=clicks` - -| URL Query | Description | -|------------|-------------| -| `x-migrations-table`| Name of the migrations table | -| `database` | The name of the database to connect to | -| `username` | The user to sign in as | -| `password` | The user's password | -| `host` | The host to connect to. | -| `port` | The port to bind to. | diff --git a/vendor/github.com/rdallman/migrate/database/clickhouse/clickhouse.go b/vendor/github.com/rdallman/migrate/database/clickhouse/clickhouse.go deleted file mode 100644 index fffc5585d..000000000 --- a/vendor/github.com/rdallman/migrate/database/clickhouse/clickhouse.go +++ /dev/null @@ -1,196 +0,0 @@ -package clickhouse - -import ( - "database/sql" - "fmt" - "io" - "io/ioutil" - "net/url" - "time" - - "github.com/mattes/migrate" - "github.com/mattes/migrate/database" -) - -var DefaultMigrationsTable = "schema_migrations" - -var ErrNilConfig = fmt.Errorf("no config") - -type Config struct { - DatabaseName string - MigrationsTable string -} - -func init() { - database.Register("clickhouse", &ClickHouse{}) -} - -func WithInstance(conn *sql.DB, config *Config) (database.Driver, error) { - if config == nil { - return nil, ErrNilConfig - } - - if err := conn.Ping(); err != nil { - return nil, err - } - - ch := &ClickHouse{ - conn: conn, - config: config, - } - - if err := ch.init(); err != nil { - return nil, err - } - - return ch, nil -} - -type ClickHouse struct { - conn *sql.DB - config *Config -} - -func (ch *ClickHouse) Open(dsn string) (database.Driver, error) { - purl, err := url.Parse(dsn) - if err != nil { - return nil, err - } - q := migrate.FilterCustomQuery(purl) - q.Scheme = "tcp" - conn, err := sql.Open("clickhouse", q.String()) - if err != nil { - return nil, err - } - - ch = &ClickHouse{ - conn: conn, - config: &Config{ - MigrationsTable: purl.Query().Get("x-migrations-table"), - DatabaseName: purl.Query().Get("database"), - }, - } - - if err := ch.init(); err != nil { - return nil, err - } - - return ch, nil -} - -func (ch *ClickHouse) init() error { - if len(ch.config.DatabaseName) == 0 { - if err := ch.conn.QueryRow("SELECT currentDatabase()").Scan(&ch.config.DatabaseName); err != nil { - return err - } - } - - if len(ch.config.MigrationsTable) == 0 { - ch.config.MigrationsTable = DefaultMigrationsTable - } - - return ch.ensureVersionTable() -} - -func (ch *ClickHouse) Run(r io.Reader) error { - migration, err := ioutil.ReadAll(r) - if err != nil { - return err - } - if _, err := ch.conn.Exec(string(migration)); err != nil { - return database.Error{OrigErr: err, Err: "migration failed", Query: migration} - } - - return nil -} -func (ch *ClickHouse) Version() (int, bool, error) { - var ( - version int - dirty uint8 - query = "SELECT version, dirty FROM `" + ch.config.MigrationsTable + "` ORDER BY sequence DESC LIMIT 1" - ) - if err := ch.conn.QueryRow(query).Scan(&version, &dirty); err != nil { - if err == sql.ErrNoRows { - return database.NilVersion, false, nil - } - return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} - } - return version, dirty == 1, nil -} - -func (ch *ClickHouse) SetVersion(version int, dirty bool) error { - var ( - bool = func(v bool) uint8 { - if v { - return 1 - } - return 0 - } - tx, err = ch.conn.Begin() - ) - if err != nil { - return err - } - - query := "INSERT INTO " + ch.config.MigrationsTable + " (version, dirty, sequence) VALUES (?, ?, ?)" - if _, err := tx.Exec(query, version, bool(dirty), time.Now().UnixNano()); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - - return tx.Commit() -} - -func (ch *ClickHouse) ensureVersionTable() error { - var ( - table string - query = "SHOW TABLES FROM " + ch.config.DatabaseName + " LIKE '" + ch.config.MigrationsTable + "'" - ) - // check if migration table exists - if err := ch.conn.QueryRow(query).Scan(&table); err != nil { - if err != sql.ErrNoRows { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - } else { - return nil - } - // if not, create the empty migration table - query = ` - CREATE TABLE ` + ch.config.MigrationsTable + ` ( - version UInt32, - dirty UInt8, - sequence UInt64 - ) Engine=TinyLog - ` - if _, err := ch.conn.Exec(query); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - return nil -} - -func (ch *ClickHouse) Drop() error { - var ( - query = "SHOW TABLES FROM " + ch.config.DatabaseName - tables, err = ch.conn.Query(query) - ) - if err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - defer tables.Close() - for tables.Next() { - var table string - if err := tables.Scan(&table); err != nil { - return err - } - - query = "DROP TABLE IF EXISTS " + ch.config.DatabaseName + "." + table - - if _, err := ch.conn.Exec(query); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - } - return ch.ensureVersionTable() -} - -func (ch *ClickHouse) Lock() error { return nil } -func (ch *ClickHouse) Unlock() error { return nil } -func (ch *ClickHouse) Close() error { return ch.conn.Close() } diff --git a/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/001_init.down.sql b/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/001_init.down.sql deleted file mode 100644 index 51cd8bfb5..000000000 --- a/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/001_init.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS test_1; \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/001_init.up.sql b/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/001_init.up.sql deleted file mode 100644 index 5436b6fdd..000000000 --- a/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/001_init.up.sql +++ /dev/null @@ -1,3 +0,0 @@ -CREATE TABLE test_1 ( - Date Date -) Engine=Memory; \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/002_create_table.down.sql b/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/002_create_table.down.sql deleted file mode 100644 index 9d7712233..000000000 --- a/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/002_create_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS test_2; \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/002_create_table.up.sql b/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/002_create_table.up.sql deleted file mode 100644 index 6b49ed99d..000000000 --- a/vendor/github.com/rdallman/migrate/database/clickhouse/examples/migrations/002_create_table.up.sql +++ /dev/null @@ -1,3 +0,0 @@ -CREATE TABLE test_2 ( - Date Date -) Engine=Memory; \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/README.md b/vendor/github.com/rdallman/migrate/database/cockroachdb/README.md deleted file mode 100644 index 7931c2791..000000000 --- a/vendor/github.com/rdallman/migrate/database/cockroachdb/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# cockroachdb - -`cockroachdb://user:password@host:port/dbname?query` (`cockroach://`, and `crdb-postgres://` work, too) - -| URL Query | WithInstance Config | Description | -|------------|---------------------|-------------| -| `x-migrations-table` | `MigrationsTable` | Name of the migrations table | -| `x-lock-table` | `LockTable` | Name of the table which maintains the migration lock | -| `x-force-lock` | `ForceLock` | Force lock acquisition to fix faulty migrations which may not have released the schema lock (Boolean, default is `false`) | -| `dbname` | `DatabaseName` | The name of the database to connect to | -| `user` | | The user to sign in as | -| `password` | | The user's password | -| `host` | | The host to connect to. Values that start with / are for unix domain sockets. (default is localhost) | -| `port` | | The port to bind to. (default is 5432) | -| `connect_timeout` | | Maximum wait for connection, in seconds. Zero or not specified means wait indefinitely. | -| `sslcert` | | Cert file location. The file must contain PEM encoded data. | -| `sslkey` | | Key file location. The file must contain PEM encoded data. | -| `sslrootcert` | | The location of the root certificate file. The file must contain PEM encoded data. | -| `sslmode` | | Whether or not to use SSL (disable\|require\|verify-ca\|verify-full) | diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/cockroachdb.go b/vendor/github.com/rdallman/migrate/database/cockroachdb/cockroachdb.go deleted file mode 100644 index 8da31d378..000000000 --- a/vendor/github.com/rdallman/migrate/database/cockroachdb/cockroachdb.go +++ /dev/null @@ -1,338 +0,0 @@ -package cockroachdb - -import ( - "database/sql" - "fmt" - "io" - "io/ioutil" - nurl "net/url" - - "github.com/cockroachdb/cockroach-go/crdb" - "github.com/lib/pq" - "github.com/mattes/migrate" - "github.com/mattes/migrate/database" - "regexp" - "strconv" - "context" -) - -func init() { - db := CockroachDb{} - database.Register("cockroach", &db) - database.Register("cockroachdb", &db) - database.Register("crdb-postgres", &db) -} - -var DefaultMigrationsTable = "schema_migrations" -var DefaultLockTable = "schema_lock" - -var ( - ErrNilConfig = fmt.Errorf("no config") - ErrNoDatabaseName = fmt.Errorf("no database name") -) - -type Config struct { - MigrationsTable string - LockTable string - ForceLock bool - DatabaseName string -} - -type CockroachDb struct { - db *sql.DB - isLocked bool - - // Open and WithInstance need to guarantee that config is never nil - config *Config -} - -func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { - if config == nil { - return nil, ErrNilConfig - } - - if err := instance.Ping(); err != nil { - return nil, err - } - - query := `SELECT current_database()` - var databaseName string - if err := instance.QueryRow(query).Scan(&databaseName); err != nil { - return nil, &database.Error{OrigErr: err, Query: []byte(query)} - } - - if len(databaseName) == 0 { - return nil, ErrNoDatabaseName - } - - config.DatabaseName = databaseName - - if len(config.MigrationsTable) == 0 { - config.MigrationsTable = DefaultMigrationsTable - } - - if len(config.LockTable) == 0 { - config.LockTable = DefaultLockTable - } - - px := &CockroachDb{ - db: instance, - config: config, - } - - if err := px.ensureVersionTable(); err != nil { - return nil, err - } - - if err := px.ensureLockTable(); err != nil { - return nil, err - } - - return px, nil -} - -func (c *CockroachDb) Open(url string) (database.Driver, error) { - purl, err := nurl.Parse(url) - if err != nil { - return nil, err - } - - // As Cockroach uses the postgres protocol, and 'postgres' is already a registered database, we need to replace the - // connect prefix, with the actual protocol, so that the library can differentiate between the implementations - re := regexp.MustCompile("^(cockroach(db)?|crdb-postgres)") - connectString := re.ReplaceAllString(migrate.FilterCustomQuery(purl).String(), "postgres") - - db, err := sql.Open("postgres", connectString) - if err != nil { - return nil, err - } - - migrationsTable := purl.Query().Get("x-migrations-table") - if len(migrationsTable) == 0 { - migrationsTable = DefaultMigrationsTable - } - - lockTable := purl.Query().Get("x-lock-table") - if len(lockTable) == 0 { - lockTable = DefaultLockTable - } - - forceLockQuery := purl.Query().Get("x-force-lock") - forceLock, err := strconv.ParseBool(forceLockQuery) - if err != nil { - forceLock = false - } - - px, err := WithInstance(db, &Config{ - DatabaseName: purl.Path, - MigrationsTable: migrationsTable, - LockTable: lockTable, - ForceLock: forceLock, - }) - if err != nil { - return nil, err - } - - return px, nil -} - -func (c *CockroachDb) Close() error { - return c.db.Close() -} - -// Locking is done manually with a separate lock table. Implementing advisory locks in CRDB is being discussed -// See: https://github.com/cockroachdb/cockroach/issues/13546 -func (c *CockroachDb) Lock() error { - err := crdb.ExecuteTx(context.Background(), c.db, nil, func(tx *sql.Tx) error { - aid, err := database.GenerateAdvisoryLockId(c.config.DatabaseName) - if err != nil { - return err - } - - query := "SELECT * FROM " + c.config.LockTable + " WHERE lock_id = $1" - rows, err := tx.Query(query, aid) - if err != nil { - return database.Error{OrigErr: err, Err: "failed to fetch migration lock", Query: []byte(query)} - } - defer rows.Close() - - // If row exists at all, lock is present - locked := rows.Next() - if locked && !c.config.ForceLock { - return database.Error{Err: "lock could not be acquired; already locked", Query: []byte(query)} - } - - query = "INSERT INTO " + c.config.LockTable + " (lock_id) VALUES ($1)" - if _, err := tx.Exec(query, aid) ; err != nil { - return database.Error{OrigErr: err, Err: "failed to set migration lock", Query: []byte(query)} - } - - return nil - }) - - if err != nil { - return err - } else { - c.isLocked = true - return nil - } -} - -// Locking is done manually with a separate lock table. Implementing advisory locks in CRDB is being discussed -// See: https://github.com/cockroachdb/cockroach/issues/13546 -func (c *CockroachDb) Unlock() error { - aid, err := database.GenerateAdvisoryLockId(c.config.DatabaseName) - if err != nil { - return err - } - - // In the event of an implementation (non-migration) error, it is possible for the lock to not be released. Until - // a better locking mechanism is added, a manual purging of the lock table may be required in such circumstances - query := "DELETE FROM " + c.config.LockTable + " WHERE lock_id = $1" - if _, err := c.db.Exec(query, aid); err != nil { - if e, ok := err.(*pq.Error); ok { - // 42P01 is "UndefinedTableError" in CockroachDB - // https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/pgwire/pgerror/codes.go - if e.Code == "42P01" { - // On drops, the lock table is fully removed; This is fine, and is a valid "unlocked" state for the schema - c.isLocked = false - return nil - } - } - return database.Error{OrigErr: err, Err: "failed to release migration lock", Query: []byte(query)} - } - - c.isLocked = false - return nil -} - -func (c *CockroachDb) Run(migration io.Reader) error { - migr, err := ioutil.ReadAll(migration) - if err != nil { - return err - } - - // run migration - query := string(migr[:]) - if _, err := c.db.Exec(query); err != nil { - return database.Error{OrigErr: err, Err: "migration failed", Query: migr} - } - - return nil -} - -func (c *CockroachDb) SetVersion(version int, dirty bool) error { - return crdb.ExecuteTx(context.Background(), c.db, nil, func(tx *sql.Tx) error { - if _, err := tx.Exec( `TRUNCATE "` + c.config.MigrationsTable + `"`); err != nil { - return err - } - - if version >= 0 { - if _, err := tx.Exec(`INSERT INTO "` + c.config.MigrationsTable + `" (version, dirty) VALUES ($1, $2)`, version, dirty); err != nil { - return err - } - } - - return nil - }) -} - -func (c *CockroachDb) Version() (version int, dirty bool, err error) { - query := `SELECT version, dirty FROM "` + c.config.MigrationsTable + `" LIMIT 1` - err = c.db.QueryRow(query).Scan(&version, &dirty) - - switch { - case err == sql.ErrNoRows: - return database.NilVersion, false, nil - - case err != nil: - if e, ok := err.(*pq.Error); ok { - // 42P01 is "UndefinedTableError" in CockroachDB - // https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/pgwire/pgerror/codes.go - if e.Code == "42P01" { - return database.NilVersion, false, nil - } - } - return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} - - default: - return version, dirty, nil - } -} - -func (c *CockroachDb) Drop() error { - // select all tables in current schema - query := `SELECT table_name FROM information_schema.tables WHERE table_schema=(SELECT current_schema())` - tables, err := c.db.Query(query) - if err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - defer tables.Close() - - // delete one table after another - tableNames := make([]string, 0) - for tables.Next() { - var tableName string - if err := tables.Scan(&tableName); err != nil { - return err - } - if len(tableName) > 0 { - tableNames = append(tableNames, tableName) - } - } - - if len(tableNames) > 0 { - // delete one by one ... - for _, t := range tableNames { - query = `DROP TABLE IF EXISTS ` + t + ` CASCADE` - if _, err := c.db.Exec(query); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - } - if err := c.ensureVersionTable(); err != nil { - return err - } - } - - return nil -} - -func (c *CockroachDb) ensureVersionTable() error { - // check if migration table exists - var count int - query := `SELECT COUNT(1) FROM information_schema.tables WHERE table_name = $1 AND table_schema = (SELECT current_schema()) LIMIT 1` - if err := c.db.QueryRow(query, c.config.MigrationsTable).Scan(&count); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - if count == 1 { - return nil - } - - // if not, create the empty migration table - query = `CREATE TABLE "` + c.config.MigrationsTable + `" (version INT NOT NULL PRIMARY KEY, dirty BOOL NOT NULL)` - if _, err := c.db.Exec(query); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - return nil -} - - -func (c *CockroachDb) ensureLockTable() error { - // check if lock table exists - var count int - query := `SELECT COUNT(1) FROM information_schema.tables WHERE table_name = $1 AND table_schema = (SELECT current_schema()) LIMIT 1` - if err := c.db.QueryRow(query, c.config.LockTable).Scan(&count); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - if count == 1 { - return nil - } - - // if not, create the empty lock table - query = `CREATE TABLE "` + c.config.LockTable + `" (lock_id INT NOT NULL PRIMARY KEY)` - if _, err := c.db.Exec(query); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - - return nil -} diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/cockroachdb_test.go b/vendor/github.com/rdallman/migrate/database/cockroachdb/cockroachdb_test.go deleted file mode 100644 index e2dc1f86e..000000000 --- a/vendor/github.com/rdallman/migrate/database/cockroachdb/cockroachdb_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package cockroachdb - -// error codes https://github.com/lib/pq/blob/master/error.go - -import ( - //"bytes" - "database/sql" - "fmt" - "io" - "testing" - - "github.com/lib/pq" - dt "github.com/mattes/migrate/database/testing" - mt "github.com/mattes/migrate/testing" - "bytes" -) - -var versions = []mt.Version{ - {Image: "cockroachdb/cockroach:v1.0.2", Cmd: []string{"start", "--insecure"}}, -} - -func isReady(i mt.Instance) bool { - db, err := sql.Open("postgres", fmt.Sprintf("postgres://root@%v:%v?sslmode=disable", i.Host(), i.PortFor(26257))) - if err != nil { - return false - } - defer db.Close() - err = db.Ping() - if err == io.EOF { - _, err = db.Exec("CREATE DATABASE migrate") - return err == nil; - } else if e, ok := err.(*pq.Error); ok { - if e.Code.Name() == "cannot_connect_now" { - return false - } - } - - _, err = db.Exec("CREATE DATABASE migrate") - return err == nil; - - return true -} - -func Test(t *testing.T) { - mt.ParallelTest(t, versions, isReady, - func(t *testing.T, i mt.Instance) { - c := &CockroachDb{} - addr := fmt.Sprintf("cockroach://root@%v:%v/migrate?sslmode=disable", i.Host(), i.PortFor(26257)) - d, err := c.Open(addr) - if err != nil { - t.Fatalf("%v", err) - } - dt.Test(t, d, []byte("SELECT 1")) - }) -} - -func TestMultiStatement(t *testing.T) { - mt.ParallelTest(t, versions, isReady, - func(t *testing.T, i mt.Instance) { - c := &CockroachDb{} - addr := fmt.Sprintf("cockroach://root@%v:%v/migrate?sslmode=disable", i.Host(), i.Port()) - d, err := c.Open(addr) - if err != nil { - t.Fatalf("%v", err) - } - if err := d.Run(bytes.NewReader([]byte("CREATE TABLE foo (foo text); CREATE TABLE bar (bar text);"))); err != nil { - t.Fatalf("expected err to be nil, got %v", err) - } - - // make sure second table exists - var exists bool - if err := d.(*CockroachDb).db.QueryRow("SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'bar' AND table_schema = (SELECT current_schema()))").Scan(&exists); err != nil { - t.Fatal(err) - } - if !exists { - t.Fatalf("expected table bar to exist") - } - }) -} - -func TestFilterCustomQuery(t *testing.T) { - mt.ParallelTest(t, versions, isReady, - func(t *testing.T, i mt.Instance) { - c := &CockroachDb{} - addr := fmt.Sprintf("cockroach://root@%v:%v/migrate?sslmode=disable&x-custom=foobar", i.Host(), i.PortFor(26257)) - _, err := c.Open(addr) - if err != nil { - t.Fatalf("%v", err) - } - }) -} diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.down.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.down.sql deleted file mode 100644 index c99ddcdc8..000000000 --- a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS users; diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.up.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.up.sql deleted file mode 100644 index fc3210181..000000000 --- a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.up.sql +++ /dev/null @@ -1,5 +0,0 @@ -CREATE TABLE users ( - user_id INT UNIQUE, - name STRING(40), - email STRING(40) -); diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.down.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.down.sql deleted file mode 100644 index 940c60712..000000000 --- a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.down.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE users DROP COLUMN IF EXISTS city; diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.up.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.up.sql deleted file mode 100644 index 46204b0f8..000000000 --- a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.up.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE users ADD COLUMN city TEXT; diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.down.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.down.sql deleted file mode 100644 index 3e87dd229..000000000 --- a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP INDEX IF EXISTS users_email_index; diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.up.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.up.sql deleted file mode 100644 index 61f8ba0b9..000000000 --- a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.up.sql +++ /dev/null @@ -1,3 +0,0 @@ -CREATE UNIQUE INDEX IF NOT EXISTS users_email_index ON users (email); - --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.down.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.down.sql deleted file mode 100644 index 1a0b1a214..000000000 --- a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS books; diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.up.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.up.sql deleted file mode 100644 index 0d3b99928..000000000 --- a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.up.sql +++ /dev/null @@ -1,5 +0,0 @@ -CREATE TABLE books ( - user_id INT, - name STRING(40), - author STRING(40) -); diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.down.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.down.sql deleted file mode 100644 index 3a5187689..000000000 --- a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS movies; diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.up.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.up.sql deleted file mode 100644 index d533be900..000000000 --- a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.up.sql +++ /dev/null @@ -1,5 +0,0 @@ -CREATE TABLE movies ( - user_id INT, - name STRING(40), - director STRING(40) -); diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1585849751_just_a_comment.up.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1585849751_just_a_comment.up.sql deleted file mode 100644 index 9b6b57a61..000000000 --- a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1585849751_just_a_comment.up.sql +++ /dev/null @@ -1 +0,0 @@ --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1685849751_another_comment.up.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1685849751_another_comment.up.sql deleted file mode 100644 index 9b6b57a61..000000000 --- a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1685849751_another_comment.up.sql +++ /dev/null @@ -1 +0,0 @@ --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1785849751_another_comment.up.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1785849751_another_comment.up.sql deleted file mode 100644 index 9b6b57a61..000000000 --- a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1785849751_another_comment.up.sql +++ /dev/null @@ -1 +0,0 @@ --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1885849751_another_comment.up.sql b/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1885849751_another_comment.up.sql deleted file mode 100644 index 9b6b57a61..000000000 --- a/vendor/github.com/rdallman/migrate/database/cockroachdb/examples/migrations/1885849751_another_comment.up.sql +++ /dev/null @@ -1 +0,0 @@ --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/database/crate/README.md b/vendor/github.com/rdallman/migrate/database/crate/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/rdallman/migrate/database/driver.go b/vendor/github.com/rdallman/migrate/database/driver.go deleted file mode 100644 index 016eedcba..000000000 --- a/vendor/github.com/rdallman/migrate/database/driver.go +++ /dev/null @@ -1,112 +0,0 @@ -// Package database provides the Database interface. -// All database drivers must implement this interface, register themselves, -// optionally provide a `WithInstance` function and pass the tests -// in package database/testing. -package database - -import ( - "fmt" - "io" - nurl "net/url" - "sync" -) - -var ( - ErrLocked = fmt.Errorf("can't acquire lock") -) - -const NilVersion int = -1 - -var driversMu sync.RWMutex -var drivers = make(map[string]Driver) - -// Driver is the interface every database driver must implement. -// -// How to implement a database driver? -// 1. Implement this interface. -// 2. Optionally, add a function named `WithInstance`. -// This function should accept an existing DB instance and a Config{} struct -// and return a driver instance. -// 3. Add a test that calls database/testing.go:Test() -// 4. Add own tests for Open(), WithInstance() (when provided) and Close(). -// All other functions are tested by tests in database/testing. -// Saves you some time and makes sure all database drivers behave the same way. -// 5. Call Register in init(). -// 6. Create a migrate/cli/build_.go file -// 7. Add driver name in 'DATABASE' variable in Makefile -// -// Guidelines: -// * Don't try to correct user input. Don't assume things. -// When in doubt, return an error and explain the situation to the user. -// * All configuration input must come from the URL string in func Open() -// or the Config{} struct in WithInstance. Don't os.Getenv(). -type Driver interface { - // Open returns a new driver instance configured with parameters - // coming from the URL string. Migrate will call this function - // only once per instance. - Open(url string) (Driver, error) - - // Close closes the underlying database instance managed by the driver. - // Migrate will call this function only once per instance. - Close() error - - // Lock should acquire a database lock so that only one migration process - // can run at a time. Migrate will call this function before Run is called. - // If the implementation can't provide this functionality, return nil. - // Return database.ErrLocked if database is already locked. - Lock() error - - // Unlock should release the lock. Migrate will call this function after - // all migrations have been run. - Unlock() error - - // Run applies a migration to the database. migration is garantueed to be not nil. - Run(migration io.Reader) error - - // SetVersion saves version and dirty state. - // Migrate will call this function before and after each call to Run. - // version must be >= -1. -1 means NilVersion. - SetVersion(version int, dirty bool) error - - // Version returns the currently active version and if the database is dirty. - // When no migration has been applied, it must return version -1. - // Dirty means, a previous migration failed and user interaction is required. - Version() (version int, dirty bool, err error) - - // Drop deletes everything in the database. - Drop() error -} - -// Open returns a new driver instance. -func Open(url string) (Driver, error) { - u, err := nurl.Parse(url) - if err != nil { - return nil, err - } - - if u.Scheme == "" { - return nil, fmt.Errorf("database driver: invalid URL scheme") - } - - driversMu.RLock() - d, ok := drivers[u.Scheme] - driversMu.RUnlock() - if !ok { - return nil, fmt.Errorf("database driver: unknown driver %v (forgotten import?)", u.Scheme) - } - - return d.Open(url) -} - -// Register globally registers a driver. -func Register(name string, driver Driver) { - driversMu.Lock() - defer driversMu.Unlock() - if driver == nil { - panic("Register driver is nil") - } - if _, dup := drivers[name]; dup { - panic("Register called twice for driver " + name) - } - drivers[name] = driver -} diff --git a/vendor/github.com/rdallman/migrate/database/driver_test.go b/vendor/github.com/rdallman/migrate/database/driver_test.go deleted file mode 100644 index c0a29304f..000000000 --- a/vendor/github.com/rdallman/migrate/database/driver_test.go +++ /dev/null @@ -1,8 +0,0 @@ -package database - -func ExampleDriver() { - // see database/stub for an example - - // database/stub/stub.go has the driver implementation - // database/stub/stub_test.go runs database/testing/test.go:Test -} diff --git a/vendor/github.com/rdallman/migrate/database/error.go b/vendor/github.com/rdallman/migrate/database/error.go deleted file mode 100644 index eb802c753..000000000 --- a/vendor/github.com/rdallman/migrate/database/error.go +++ /dev/null @@ -1,27 +0,0 @@ -package database - -import ( - "fmt" -) - -// Error should be used for errors involving queries ran against the database -type Error struct { - // Optional: the line number - Line uint - - // Query is a query excerpt - Query []byte - - // Err is a useful/helping error message for humans - Err string - - // OrigErr is the underlying error - OrigErr error -} - -func (e Error) Error() string { - if len(e.Err) == 0 { - return fmt.Sprintf("%v in line %v: %s", e.OrigErr, e.Line, e.Query) - } - return fmt.Sprintf("%v in line %v: %s (details: %v)", e.Err, e.Line, e.Query, e.OrigErr) -} diff --git a/vendor/github.com/rdallman/migrate/database/mongodb/README.md b/vendor/github.com/rdallman/migrate/database/mongodb/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/rdallman/migrate/database/mysql/README.md b/vendor/github.com/rdallman/migrate/database/mysql/README.md deleted file mode 100644 index 490e90b21..000000000 --- a/vendor/github.com/rdallman/migrate/database/mysql/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# MySQL - -`mysql://user:password@tcp(host:port)/dbname?query` - -| URL Query | WithInstance Config | Description | -|------------|---------------------|-------------| -| `x-migrations-table` | `MigrationsTable` | Name of the migrations table | -| `dbname` | `DatabaseName` | The name of the database to connect to | -| `user` | | The user to sign in as | -| `password` | | The user's password | -| `host` | | The host to connect to. | -| `port` | | The port to bind to. | -| `x-tls-ca` | | The location of the root certificate file. | -| `x-tls-cert` | | Cert file location. | -| `x-tls-key` | | Key file location. | -| `x-tls-insecure-skip-verify` | | Whether or not to use SSL (true\|false) | - -## Use with existing client - -If you use the MySQL driver with existing database client, you must create the client with parameter `multiStatements=true`: - -```go -package main - -import ( - "database/sql" - - _ "github.com/go-sql-driver/mysql" - "github.com/mattes/migrate" - "github.com/mattes/migrate/database/mysql" - _ "github.com/mattes/migrate/source/file" -) - -func main() { - db, _ := sql.Open("mysql", "user:password@tcp(host:port)/dbname?multiStatements=true") - driver, _ := mysql.WithInstance(db, &mysql.Config{}) - m, _ := migrate.NewWithDatabaseInstance( - "file:///migrations", - "mysql", - driver, - ) - - m.Steps(2) -} -``` - -## Upgrading from v1 - -1. Write down the current migration version from schema_migrations -1. `DROP TABLE schema_migrations` -2. Wrap your existing migrations in transactions ([BEGIN/COMMIT](https://dev.mysql.com/doc/refman/5.7/en/commit.html)) if you use multiple statements within one migration. -3. Download and install the latest migrate version. -4. Force the current migration version with `migrate force `. diff --git a/vendor/github.com/rdallman/migrate/database/mysql/mysql.go b/vendor/github.com/rdallman/migrate/database/mysql/mysql.go deleted file mode 100644 index 3aecfe19c..000000000 --- a/vendor/github.com/rdallman/migrate/database/mysql/mysql.go +++ /dev/null @@ -1,344 +0,0 @@ -// +build go1.9 - -package mysql - -import ( - "context" - "crypto/tls" - "crypto/x509" - "database/sql" - "fmt" - "io" - "io/ioutil" - nurl "net/url" - "strconv" - "strings" - - "github.com/go-sql-driver/mysql" - "github.com/mattes/migrate" - "github.com/mattes/migrate/database" -) - -func init() { - database.Register("mysql", &Mysql{}) -} - -var DefaultMigrationsTable = "schema_migrations" - -var ( - ErrDatabaseDirty = fmt.Errorf("database is dirty") - ErrNilConfig = fmt.Errorf("no config") - ErrNoDatabaseName = fmt.Errorf("no database name") - ErrAppendPEM = fmt.Errorf("failed to append PEM") -) - -type Config struct { - MigrationsTable string - DatabaseName string -} - -type Mysql struct { - // mysql RELEASE_LOCK must be called from the same conn, so - // just do everything over a single conn anyway. - db *sql.Conn - isLocked bool - - config *Config -} - -// instance must have `multiStatements` set to true -func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { - if config == nil { - return nil, ErrNilConfig - } - - if err := instance.Ping(); err != nil { - return nil, err - } - - query := `SELECT DATABASE()` - var databaseName sql.NullString - if err := instance.QueryRow(query).Scan(&databaseName); err != nil { - return nil, &database.Error{OrigErr: err, Query: []byte(query)} - } - - if len(databaseName.String) == 0 { - return nil, ErrNoDatabaseName - } - - config.DatabaseName = databaseName.String - - if len(config.MigrationsTable) == 0 { - config.MigrationsTable = DefaultMigrationsTable - } - - conn, err := instance.Conn(context.Background()) - if err != nil { - return nil, err - } - - mx := &Mysql{ - db: conn, - config: config, - } - - if err := mx.ensureVersionTable(); err != nil { - return nil, err - } - - return mx, nil -} - -func (m *Mysql) Open(url string) (database.Driver, error) { - purl, err := nurl.Parse(url) - if err != nil { - return nil, err - } - - q := purl.Query() - q.Set("multiStatements", "true") - purl.RawQuery = q.Encode() - - db, err := sql.Open("mysql", strings.Replace( - migrate.FilterCustomQuery(purl).String(), "mysql://", "", 1)) - if err != nil { - return nil, err - } - - migrationsTable := purl.Query().Get("x-migrations-table") - if len(migrationsTable) == 0 { - migrationsTable = DefaultMigrationsTable - } - - // use custom TLS? - ctls := purl.Query().Get("tls") - if len(ctls) > 0 { - if _, isBool := readBool(ctls); !isBool && strings.ToLower(ctls) != "skip-verify" { - rootCertPool := x509.NewCertPool() - pem, err := ioutil.ReadFile(purl.Query().Get("x-tls-ca")) - if err != nil { - return nil, err - } - - if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { - return nil, ErrAppendPEM - } - - certs, err := tls.LoadX509KeyPair(purl.Query().Get("x-tls-cert"), purl.Query().Get("x-tls-key")) - if err != nil { - return nil, err - } - - insecureSkipVerify := false - if len(purl.Query().Get("x-tls-insecure-skip-verify")) > 0 { - x, err := strconv.ParseBool(purl.Query().Get("x-tls-insecure-skip-verify")) - if err != nil { - return nil, err - } - insecureSkipVerify = x - } - - mysql.RegisterTLSConfig(ctls, &tls.Config{ - RootCAs: rootCertPool, - Certificates: []tls.Certificate{certs}, - InsecureSkipVerify: insecureSkipVerify, - }) - } - } - - mx, err := WithInstance(db, &Config{ - DatabaseName: purl.Path, - MigrationsTable: migrationsTable, - }) - if err != nil { - return nil, err - } - - return mx, nil -} - -func (m *Mysql) Close() error { - return m.db.Close() -} - -func (m *Mysql) Lock() error { - if m.isLocked { - return database.ErrLocked - } - - aid, err := database.GenerateAdvisoryLockId( - fmt.Sprintf("%s:%s", m.config.DatabaseName, m.config.MigrationsTable)) - if err != nil { - return err - } - - query := "SELECT GET_LOCK(?, 10)" - var success bool - if err := m.db.QueryRowContext(context.Background(), query, aid).Scan(&success); err != nil { - return &database.Error{OrigErr: err, Err: "try lock failed", Query: []byte(query)} - } - - if success { - m.isLocked = true - return nil - } - - return database.ErrLocked -} - -func (m *Mysql) Unlock() error { - if !m.isLocked { - return nil - } - - aid, err := database.GenerateAdvisoryLockId( - fmt.Sprintf("%s:%s", m.config.DatabaseName, m.config.MigrationsTable)) - if err != nil { - return err - } - - query := `SELECT RELEASE_LOCK(?)` - if _, err := m.db.ExecContext(context.Background(), query, aid); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - - // NOTE: RELEASE_LOCK could return NULL or (or 0 if the code is changed), - // in which case isLocked should be true until the timeout expires -- synchronizing - // these states is likely not worth trying to do; reconsider the necessity of isLocked. - - m.isLocked = false - return nil -} - -func (m *Mysql) Run(migration io.Reader) error { - migr, err := ioutil.ReadAll(migration) - if err != nil { - return err - } - - query := string(migr[:]) - if _, err := m.db.ExecContext(context.Background(), query); err != nil { - return database.Error{OrigErr: err, Err: "migration failed", Query: migr} - } - - return nil -} - -func (m *Mysql) SetVersion(version int, dirty bool) error { - tx, err := m.db.BeginTx(context.Background(), &sql.TxOptions{}) - if err != nil { - return &database.Error{OrigErr: err, Err: "transaction start failed"} - } - - query := "TRUNCATE `" + m.config.MigrationsTable + "`" - if _, err := tx.ExecContext(context.Background(), query); err != nil { - tx.Rollback() - return &database.Error{OrigErr: err, Query: []byte(query)} - } - - if version >= 0 { - query := "INSERT INTO `" + m.config.MigrationsTable + "` (version, dirty) VALUES (?, ?)" - if _, err := tx.ExecContext(context.Background(), query, version, dirty); err != nil { - tx.Rollback() - return &database.Error{OrigErr: err, Query: []byte(query)} - } - } - - if err := tx.Commit(); err != nil { - return &database.Error{OrigErr: err, Err: "transaction commit failed"} - } - - return nil -} - -func (m *Mysql) Version() (version int, dirty bool, err error) { - query := "SELECT version, dirty FROM `" + m.config.MigrationsTable + "` LIMIT 1" - err = m.db.QueryRowContext(context.Background(), query).Scan(&version, &dirty) - switch { - case err == sql.ErrNoRows: - return database.NilVersion, false, nil - - case err != nil: - if e, ok := err.(*mysql.MySQLError); ok { - if e.Number == 0 { - return database.NilVersion, false, nil - } - } - return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} - - default: - return version, dirty, nil - } -} - -func (m *Mysql) Drop() error { - // select all tables - query := `SHOW TABLES LIKE '%'` - tables, err := m.db.QueryContext(context.Background(), query) - if err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - defer tables.Close() - - // delete one table after another - tableNames := make([]string, 0) - for tables.Next() { - var tableName string - if err := tables.Scan(&tableName); err != nil { - return err - } - if len(tableName) > 0 { - tableNames = append(tableNames, tableName) - } - } - - if len(tableNames) > 0 { - // delete one by one ... - for _, t := range tableNames { - query = "DROP TABLE IF EXISTS `" + t + "` CASCADE" - if _, err := m.db.ExecContext(context.Background(), query); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - } - if err := m.ensureVersionTable(); err != nil { - return err - } - } - - return nil -} - -func (m *Mysql) ensureVersionTable() error { - // check if migration table exists - var result string - query := `SHOW TABLES LIKE "` + m.config.MigrationsTable + `"` - if err := m.db.QueryRowContext(context.Background(), query).Scan(&result); err != nil { - if err != sql.ErrNoRows { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - } else { - return nil - } - - // if not, create the empty migration table - query = "CREATE TABLE `" + m.config.MigrationsTable + "` (version bigint not null primary key, dirty boolean not null)" - if _, err := m.db.ExecContext(context.Background(), query); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - return nil -} - -// Returns the bool value of the input. -// The 2nd return value indicates if the input was a valid bool value -// See https://github.com/go-sql-driver/mysql/blob/a059889267dc7170331388008528b3b44479bffb/utils.go#L71 -func readBool(input string) (value bool, valid bool) { - switch input { - case "1", "true", "TRUE", "True": - return true, true - case "0", "false", "FALSE", "False": - return false, true - } - - // Not a valid bool value - return -} diff --git a/vendor/github.com/rdallman/migrate/database/mysql/mysql_test.go b/vendor/github.com/rdallman/migrate/database/mysql/mysql_test.go deleted file mode 100644 index 64176f64a..000000000 --- a/vendor/github.com/rdallman/migrate/database/mysql/mysql_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package mysql - -import ( - "database/sql" - sqldriver "database/sql/driver" - "fmt" - // "io/ioutil" - // "log" - "testing" - - // "github.com/go-sql-driver/mysql" - dt "github.com/mattes/migrate/database/testing" - mt "github.com/mattes/migrate/testing" -) - -var versions = []mt.Version{ - {Image: "mysql:8", ENV: []string{"MYSQL_ROOT_PASSWORD=root", "MYSQL_DATABASE=public"}}, - {Image: "mysql:5.7", ENV: []string{"MYSQL_ROOT_PASSWORD=root", "MYSQL_DATABASE=public"}}, - {Image: "mysql:5.6", ENV: []string{"MYSQL_ROOT_PASSWORD=root", "MYSQL_DATABASE=public"}}, - {Image: "mysql:5.5", ENV: []string{"MYSQL_ROOT_PASSWORD=root", "MYSQL_DATABASE=public"}}, -} - -func isReady(i mt.Instance) bool { - db, err := sql.Open("mysql", fmt.Sprintf("root:root@tcp(%v:%v)/public", i.Host(), i.Port())) - if err != nil { - return false - } - defer db.Close() - err = db.Ping() - - if err == sqldriver.ErrBadConn { - return false - } - - return true -} - -func Test(t *testing.T) { - // mysql.SetLogger(mysql.Logger(log.New(ioutil.Discard, "", log.Ltime))) - - mt.ParallelTest(t, versions, isReady, - func(t *testing.T, i mt.Instance) { - p := &Mysql{} - addr := fmt.Sprintf("mysql://root:root@tcp(%v:%v)/public", i.Host(), i.Port()) - d, err := p.Open(addr) - if err != nil { - t.Fatalf("%v", err) - } - dt.Test(t, d, []byte("SELECT 1")) - - // check ensureVersionTable - if err := d.(*Mysql).ensureVersionTable(); err != nil { - t.Fatal(err) - } - // check again - if err := d.(*Mysql).ensureVersionTable(); err != nil { - t.Fatal(err) - } - }) -} - -func TestLockWorks(t *testing.T) { - mt.ParallelTest(t, versions, isReady, - func(t *testing.T, i mt.Instance) { - p := &Mysql{} - addr := fmt.Sprintf("mysql://root:root@tcp(%v:%v)/public", i.Host(), i.Port()) - d, err := p.Open(addr) - if err != nil { - t.Fatalf("%v", err) - } - dt.Test(t, d, []byte("SELECT 1")) - - ms := d.(*Mysql) - - err = ms.Lock() - if err != nil { - t.Fatal(err) - } - err = ms.Unlock() - if err != nil { - t.Fatal(err) - } - - // make sure the 2nd lock works (RELEASE_LOCK is very finicky) - err = ms.Lock() - if err != nil { - t.Fatal(err) - } - err = ms.Unlock() - if err != nil { - t.Fatal(err) - } - }) -} diff --git a/vendor/github.com/rdallman/migrate/database/neo4j/README.md b/vendor/github.com/rdallman/migrate/database/neo4j/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/rdallman/migrate/database/postgres/README.md b/vendor/github.com/rdallman/migrate/database/postgres/README.md deleted file mode 100644 index f6312392b..000000000 --- a/vendor/github.com/rdallman/migrate/database/postgres/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# postgres - -`postgres://user:password@host:port/dbname?query` (`postgresql://` works, too) - -| URL Query | WithInstance Config | Description | -|------------|---------------------|-------------| -| `x-migrations-table` | `MigrationsTable` | Name of the migrations table | -| `dbname` | `DatabaseName` | The name of the database to connect to | -| `search_path` | | This variable specifies the order in which schemas are searched when an object is referenced by a simple name with no schema specified. | -| `user` | | The user to sign in as | -| `password` | | The user's password | -| `host` | | The host to connect to. Values that start with / are for unix domain sockets. (default is localhost) | -| `port` | | The port to bind to. (default is 5432) | -| `fallback_application_name` | | An application_name to fall back to if one isn't provided. | -| `connect_timeout` | | Maximum wait for connection, in seconds. Zero or not specified means wait indefinitely. | -| `sslcert` | | Cert file location. The file must contain PEM encoded data. | -| `sslkey` | | Key file location. The file must contain PEM encoded data. | -| `sslrootcert` | | The location of the root certificate file. The file must contain PEM encoded data. | -| `sslmode` | | Whether or not to use SSL (disable\|require\|verify-ca\|verify-full) | - - -## Upgrading from v1 - -1. Write down the current migration version from schema_migrations -1. `DROP TABLE schema_migrations` -2. Wrap your existing migrations in transactions ([BEGIN/COMMIT](https://www.postgresql.org/docs/current/static/transaction-iso.html)) if you use multiple statements within one migration. -3. Download and install the latest migrate version. -4. Force the current migration version with `migrate force `. diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1085649617_create_users_table.down.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1085649617_create_users_table.down.sql deleted file mode 100644 index c99ddcdc8..000000000 --- a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1085649617_create_users_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS users; diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1085649617_create_users_table.up.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1085649617_create_users_table.up.sql deleted file mode 100644 index 92897dcab..000000000 --- a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1085649617_create_users_table.up.sql +++ /dev/null @@ -1,5 +0,0 @@ -CREATE TABLE users ( - user_id integer unique, - name varchar(40), - email varchar(40) -); diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.down.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.down.sql deleted file mode 100644 index 940c60712..000000000 --- a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.down.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE users DROP COLUMN IF EXISTS city; diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.up.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.up.sql deleted file mode 100644 index 67823edc9..000000000 --- a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.up.sql +++ /dev/null @@ -1,3 +0,0 @@ -ALTER TABLE users ADD COLUMN city varchar(100); - - diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.down.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.down.sql deleted file mode 100644 index 3e87dd229..000000000 --- a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP INDEX IF EXISTS users_email_index; diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.up.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.up.sql deleted file mode 100644 index fbeb4ab4e..000000000 --- a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.up.sql +++ /dev/null @@ -1,3 +0,0 @@ -CREATE UNIQUE INDEX CONCURRENTLY users_email_index ON users (email); - --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1385949617_create_books_table.down.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1385949617_create_books_table.down.sql deleted file mode 100644 index 1a0b1a214..000000000 --- a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1385949617_create_books_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS books; diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1385949617_create_books_table.up.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1385949617_create_books_table.up.sql deleted file mode 100644 index f1503b518..000000000 --- a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1385949617_create_books_table.up.sql +++ /dev/null @@ -1,5 +0,0 @@ -CREATE TABLE books ( - user_id integer, - name varchar(40), - author varchar(40) -); diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.down.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.down.sql deleted file mode 100644 index 3a5187689..000000000 --- a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS movies; diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.up.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.up.sql deleted file mode 100644 index f0ef5943b..000000000 --- a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.up.sql +++ /dev/null @@ -1,5 +0,0 @@ -CREATE TABLE movies ( - user_id integer, - name varchar(40), - director varchar(40) -); diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1585849751_just_a_comment.up.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1585849751_just_a_comment.up.sql deleted file mode 100644 index 9b6b57a61..000000000 --- a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1585849751_just_a_comment.up.sql +++ /dev/null @@ -1 +0,0 @@ --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1685849751_another_comment.up.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1685849751_another_comment.up.sql deleted file mode 100644 index 9b6b57a61..000000000 --- a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1685849751_another_comment.up.sql +++ /dev/null @@ -1 +0,0 @@ --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1785849751_another_comment.up.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1785849751_another_comment.up.sql deleted file mode 100644 index 9b6b57a61..000000000 --- a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1785849751_another_comment.up.sql +++ /dev/null @@ -1 +0,0 @@ --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1885849751_another_comment.up.sql b/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1885849751_another_comment.up.sql deleted file mode 100644 index 9b6b57a61..000000000 --- a/vendor/github.com/rdallman/migrate/database/postgres/examples/migrations/1885849751_another_comment.up.sql +++ /dev/null @@ -1 +0,0 @@ --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/database/postgres/postgres.go b/vendor/github.com/rdallman/migrate/database/postgres/postgres.go deleted file mode 100644 index fb2d61c28..000000000 --- a/vendor/github.com/rdallman/migrate/database/postgres/postgres.go +++ /dev/null @@ -1,273 +0,0 @@ -package postgres - -import ( - "database/sql" - "fmt" - "io" - "io/ioutil" - nurl "net/url" - - "github.com/lib/pq" - "github.com/mattes/migrate" - "github.com/mattes/migrate/database" -) - -func init() { - db := Postgres{} - database.Register("postgres", &db) - database.Register("postgresql", &db) -} - -var DefaultMigrationsTable = "schema_migrations" - -var ( - ErrNilConfig = fmt.Errorf("no config") - ErrNoDatabaseName = fmt.Errorf("no database name") - ErrNoSchema = fmt.Errorf("no schema") - ErrDatabaseDirty = fmt.Errorf("database is dirty") -) - -type Config struct { - MigrationsTable string - DatabaseName string -} - -type Postgres struct { - db *sql.DB - isLocked bool - - // Open and WithInstance need to garantuee that config is never nil - config *Config -} - -func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { - if config == nil { - return nil, ErrNilConfig - } - - if err := instance.Ping(); err != nil { - return nil, err - } - - query := `SELECT CURRENT_DATABASE()` - var databaseName string - if err := instance.QueryRow(query).Scan(&databaseName); err != nil { - return nil, &database.Error{OrigErr: err, Query: []byte(query)} - } - - if len(databaseName) == 0 { - return nil, ErrNoDatabaseName - } - - config.DatabaseName = databaseName - - if len(config.MigrationsTable) == 0 { - config.MigrationsTable = DefaultMigrationsTable - } - - px := &Postgres{ - db: instance, - config: config, - } - - if err := px.ensureVersionTable(); err != nil { - return nil, err - } - - return px, nil -} - -func (p *Postgres) Open(url string) (database.Driver, error) { - purl, err := nurl.Parse(url) - if err != nil { - return nil, err - } - - db, err := sql.Open("postgres", migrate.FilterCustomQuery(purl).String()) - if err != nil { - return nil, err - } - - migrationsTable := purl.Query().Get("x-migrations-table") - if len(migrationsTable) == 0 { - migrationsTable = DefaultMigrationsTable - } - - px, err := WithInstance(db, &Config{ - DatabaseName: purl.Path, - MigrationsTable: migrationsTable, - }) - if err != nil { - return nil, err - } - - return px, nil -} - -func (p *Postgres) Close() error { - return p.db.Close() -} - -// https://www.postgresql.org/docs/9.6/static/explicit-locking.html#ADVISORY-LOCKS -func (p *Postgres) Lock() error { - if p.isLocked { - return database.ErrLocked - } - - aid, err := database.GenerateAdvisoryLockId(p.config.DatabaseName) - if err != nil { - return err - } - - // This will either obtain the lock immediately and return true, - // or return false if the lock cannot be acquired immediately. - query := `SELECT pg_try_advisory_lock($1)` - var success bool - if err := p.db.QueryRow(query, aid).Scan(&success); err != nil { - return &database.Error{OrigErr: err, Err: "try lock failed", Query: []byte(query)} - } - - if success { - p.isLocked = true - return nil - } - - return database.ErrLocked -} - -func (p *Postgres) Unlock() error { - if !p.isLocked { - return nil - } - - aid, err := database.GenerateAdvisoryLockId(p.config.DatabaseName) - if err != nil { - return err - } - - query := `SELECT pg_advisory_unlock($1)` - if _, err := p.db.Exec(query, aid); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - p.isLocked = false - return nil -} - -func (p *Postgres) Run(migration io.Reader) error { - migr, err := ioutil.ReadAll(migration) - if err != nil { - return err - } - - // run migration - query := string(migr[:]) - if _, err := p.db.Exec(query); err != nil { - // TODO: cast to postgress error and get line number - return database.Error{OrigErr: err, Err: "migration failed", Query: migr} - } - - return nil -} - -func (p *Postgres) SetVersion(version int, dirty bool) error { - tx, err := p.db.Begin() - if err != nil { - return &database.Error{OrigErr: err, Err: "transaction start failed"} - } - - query := `TRUNCATE "` + p.config.MigrationsTable + `"` - if _, err := tx.Exec(query); err != nil { - tx.Rollback() - return &database.Error{OrigErr: err, Query: []byte(query)} - } - - if version >= 0 { - query = `INSERT INTO "` + p.config.MigrationsTable + `" (version, dirty) VALUES ($1, $2)` - if _, err := tx.Exec(query, version, dirty); err != nil { - tx.Rollback() - return &database.Error{OrigErr: err, Query: []byte(query)} - } - } - - if err := tx.Commit(); err != nil { - return &database.Error{OrigErr: err, Err: "transaction commit failed"} - } - - return nil -} - -func (p *Postgres) Version() (version int, dirty bool, err error) { - query := `SELECT version, dirty FROM "` + p.config.MigrationsTable + `" LIMIT 1` - err = p.db.QueryRow(query).Scan(&version, &dirty) - switch { - case err == sql.ErrNoRows: - return database.NilVersion, false, nil - - case err != nil: - if e, ok := err.(*pq.Error); ok { - if e.Code.Name() == "undefined_table" { - return database.NilVersion, false, nil - } - } - return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} - - default: - return version, dirty, nil - } -} - -func (p *Postgres) Drop() error { - // select all tables in current schema - query := `SELECT table_name FROM information_schema.tables WHERE table_schema=(SELECT current_schema())` - tables, err := p.db.Query(query) - if err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - defer tables.Close() - - // delete one table after another - tableNames := make([]string, 0) - for tables.Next() { - var tableName string - if err := tables.Scan(&tableName); err != nil { - return err - } - if len(tableName) > 0 { - tableNames = append(tableNames, tableName) - } - } - - if len(tableNames) > 0 { - // delete one by one ... - for _, t := range tableNames { - query = `DROP TABLE IF EXISTS ` + t + ` CASCADE` - if _, err := p.db.Exec(query); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - } - if err := p.ensureVersionTable(); err != nil { - return err - } - } - - return nil -} - -func (p *Postgres) ensureVersionTable() error { - // check if migration table exists - var count int - query := `SELECT COUNT(1) FROM information_schema.tables WHERE table_name = $1 AND table_schema = (SELECT current_schema()) LIMIT 1` - if err := p.db.QueryRow(query, p.config.MigrationsTable).Scan(&count); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - if count == 1 { - return nil - } - - // if not, create the empty migration table - query = `CREATE TABLE "` + p.config.MigrationsTable + `" (version bigint not null primary key, dirty boolean not null)` - if _, err := p.db.Exec(query); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - return nil -} diff --git a/vendor/github.com/rdallman/migrate/database/postgres/postgres_test.go b/vendor/github.com/rdallman/migrate/database/postgres/postgres_test.go deleted file mode 100644 index 9a367a059..000000000 --- a/vendor/github.com/rdallman/migrate/database/postgres/postgres_test.go +++ /dev/null @@ -1,150 +0,0 @@ -package postgres - -// error codes https://github.com/lib/pq/blob/master/error.go - -import ( - "bytes" - "database/sql" - "fmt" - "io" - "testing" - - "github.com/lib/pq" - dt "github.com/mattes/migrate/database/testing" - mt "github.com/mattes/migrate/testing" -) - -var versions = []mt.Version{ - {Image: "postgres:9.6"}, - {Image: "postgres:9.5"}, - {Image: "postgres:9.4"}, - {Image: "postgres:9.3"}, - {Image: "postgres:9.2"}, -} - -func isReady(i mt.Instance) bool { - db, err := sql.Open("postgres", fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port())) - if err != nil { - return false - } - defer db.Close() - err = db.Ping() - if err == io.EOF { - return false - - } else if e, ok := err.(*pq.Error); ok { - if e.Code.Name() == "cannot_connect_now" { - return false - } - } - - return true -} - -func Test(t *testing.T) { - mt.ParallelTest(t, versions, isReady, - func(t *testing.T, i mt.Instance) { - p := &Postgres{} - addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port()) - d, err := p.Open(addr) - if err != nil { - t.Fatalf("%v", err) - } - dt.Test(t, d, []byte("SELECT 1")) - }) -} - -func TestMultiStatement(t *testing.T) { - mt.ParallelTest(t, versions, isReady, - func(t *testing.T, i mt.Instance) { - p := &Postgres{} - addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port()) - d, err := p.Open(addr) - if err != nil { - t.Fatalf("%v", err) - } - if err := d.Run(bytes.NewReader([]byte("CREATE TABLE foo (foo text); CREATE TABLE bar (bar text);"))); err != nil { - t.Fatalf("expected err to be nil, got %v", err) - } - - // make sure second table exists - var exists bool - if err := d.(*Postgres).db.QueryRow("SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'bar' AND table_schema = (SELECT current_schema()))").Scan(&exists); err != nil { - t.Fatal(err) - } - if !exists { - t.Fatalf("expected table bar to exist") - } - }) -} - -func TestFilterCustomQuery(t *testing.T) { - mt.ParallelTest(t, versions, isReady, - func(t *testing.T, i mt.Instance) { - p := &Postgres{} - addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable&x-custom=foobar", i.Host(), i.Port()) - _, err := p.Open(addr) - if err != nil { - t.Fatalf("%v", err) - } - }) -} - -func TestWithSchema(t *testing.T) { - mt.ParallelTest(t, versions, isReady, - func(t *testing.T, i mt.Instance) { - p := &Postgres{} - addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port()) - d, err := p.Open(addr) - if err != nil { - t.Fatalf("%v", err) - } - - // create foobar schema - if err := d.Run(bytes.NewReader([]byte("CREATE SCHEMA foobar AUTHORIZATION postgres"))); err != nil { - t.Fatal(err) - } - if err := d.SetVersion(1, false); err != nil { - t.Fatal(err) - } - - // re-connect using that schema - d2, err := p.Open(fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable&search_path=foobar", i.Host(), i.Port())) - if err != nil { - t.Fatalf("%v", err) - } - - version, _, err := d2.Version() - if err != nil { - t.Fatal(err) - } - if version != -1 { - t.Fatal("expected NilVersion") - } - - // now update version and compare - if err := d2.SetVersion(2, false); err != nil { - t.Fatal(err) - } - version, _, err = d2.Version() - if err != nil { - t.Fatal(err) - } - if version != 2 { - t.Fatal("expected version 2") - } - - // meanwhile, the public schema still has the other version - version, _, err = d.Version() - if err != nil { - t.Fatal(err) - } - if version != 1 { - t.Fatal("expected version 2") - } - }) -} - -func TestWithInstance(t *testing.T) { - -} diff --git a/vendor/github.com/rdallman/migrate/database/ql/README.md b/vendor/github.com/rdallman/migrate/database/ql/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/rdallman/migrate/database/ql/migration/33_create_table.down.sql b/vendor/github.com/rdallman/migrate/database/ql/migration/33_create_table.down.sql deleted file mode 100644 index 72d18c554..000000000 --- a/vendor/github.com/rdallman/migrate/database/ql/migration/33_create_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS pets; \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/ql/migration/33_create_table.up.sql b/vendor/github.com/rdallman/migrate/database/ql/migration/33_create_table.up.sql deleted file mode 100644 index 5ad3404d1..000000000 --- a/vendor/github.com/rdallman/migrate/database/ql/migration/33_create_table.up.sql +++ /dev/null @@ -1,3 +0,0 @@ -CREATE TABLE pets ( - name string -); \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/ql/migration/44_alter_table.down.sql b/vendor/github.com/rdallman/migrate/database/ql/migration/44_alter_table.down.sql deleted file mode 100644 index 72d18c554..000000000 --- a/vendor/github.com/rdallman/migrate/database/ql/migration/44_alter_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS pets; \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/ql/migration/44_alter_table.up.sql b/vendor/github.com/rdallman/migrate/database/ql/migration/44_alter_table.up.sql deleted file mode 100644 index 3993698de..000000000 --- a/vendor/github.com/rdallman/migrate/database/ql/migration/44_alter_table.up.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE pets ADD predator bool;; \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/ql/ql.go b/vendor/github.com/rdallman/migrate/database/ql/ql.go deleted file mode 100644 index 46722a9c2..000000000 --- a/vendor/github.com/rdallman/migrate/database/ql/ql.go +++ /dev/null @@ -1,212 +0,0 @@ -package ql - -import ( - "database/sql" - "fmt" - "io" - "io/ioutil" - "strings" - - nurl "net/url" - - _ "github.com/cznic/ql/driver" - "github.com/mattes/migrate" - "github.com/mattes/migrate/database" -) - -func init() { - database.Register("ql", &Ql{}) -} - -var DefaultMigrationsTable = "schema_migrations" -var ( - ErrDatabaseDirty = fmt.Errorf("database is dirty") - ErrNilConfig = fmt.Errorf("no config") - ErrNoDatabaseName = fmt.Errorf("no database name") - ErrAppendPEM = fmt.Errorf("failed to append PEM") -) - -type Config struct { - MigrationsTable string - DatabaseName string -} - -type Ql struct { - db *sql.DB - isLocked bool - - config *Config -} - -func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { - if config == nil { - return nil, ErrNilConfig - } - - if err := instance.Ping(); err != nil { - return nil, err - } - if len(config.MigrationsTable) == 0 { - config.MigrationsTable = DefaultMigrationsTable - } - - mx := &Ql{ - db: instance, - config: config, - } - if err := mx.ensureVersionTable(); err != nil { - return nil, err - } - return mx, nil -} -func (m *Ql) ensureVersionTable() error { - tx, err := m.db.Begin() - if err != nil { - return err - } - if _, err := tx.Exec(fmt.Sprintf(` - CREATE TABLE IF NOT EXISTS %s (version uint64,dirty bool); - CREATE UNIQUE INDEX IF NOT EXISTS version_unique ON %s (version); -`, m.config.MigrationsTable, m.config.MigrationsTable)); err != nil { - if err := tx.Rollback(); err != nil { - return err - } - return err - } - if err := tx.Commit(); err != nil { - return err - } - return nil -} - -func (m *Ql) Open(url string) (database.Driver, error) { - purl, err := nurl.Parse(url) - if err != nil { - return nil, err - } - dbfile := strings.Replace(migrate.FilterCustomQuery(purl).String(), "ql://", "", 1) - db, err := sql.Open("ql", dbfile) - if err != nil { - return nil, err - } - migrationsTable := purl.Query().Get("x-migrations-table") - if len(migrationsTable) == 0 { - migrationsTable = DefaultMigrationsTable - } - mx, err := WithInstance(db, &Config{ - DatabaseName: purl.Path, - MigrationsTable: migrationsTable, - }) - if err != nil { - return nil, err - } - return mx, nil -} -func (m *Ql) Close() error { - return m.db.Close() -} -func (m *Ql) Drop() error { - query := `SELECT Name FROM __Table` - tables, err := m.db.Query(query) - if err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - defer tables.Close() - tableNames := make([]string, 0) - for tables.Next() { - var tableName string - if err := tables.Scan(&tableName); err != nil { - return err - } - if len(tableName) > 0 { - if strings.HasPrefix(tableName, "__") == false { - tableNames = append(tableNames, tableName) - } - } - } - if len(tableNames) > 0 { - for _, t := range tableNames { - query := "DROP TABLE " + t - err = m.executeQuery(query) - if err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - } - if err := m.ensureVersionTable(); err != nil { - return err - } - } - - return nil -} -func (m *Ql) Lock() error { - if m.isLocked { - return database.ErrLocked - } - m.isLocked = true - return nil -} -func (m *Ql) Unlock() error { - if !m.isLocked { - return nil - } - m.isLocked = false - return nil -} -func (m *Ql) Run(migration io.Reader) error { - migr, err := ioutil.ReadAll(migration) - if err != nil { - return err - } - query := string(migr[:]) - - return m.executeQuery(query) -} -func (m *Ql) executeQuery(query string) error { - tx, err := m.db.Begin() - if err != nil { - return &database.Error{OrigErr: err, Err: "transaction start failed"} - } - if _, err := tx.Exec(query); err != nil { - tx.Rollback() - return &database.Error{OrigErr: err, Query: []byte(query)} - } - if err := tx.Commit(); err != nil { - return &database.Error{OrigErr: err, Err: "transaction commit failed"} - } - return nil -} -func (m *Ql) SetVersion(version int, dirty bool) error { - tx, err := m.db.Begin() - if err != nil { - return &database.Error{OrigErr: err, Err: "transaction start failed"} - } - - query := "TRUNCATE TABLE " + m.config.MigrationsTable - if _, err := tx.Exec(query); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - - if version >= 0 { - query := fmt.Sprintf(`INSERT INTO %s (version, dirty) VALUES (%d, %t)`, m.config.MigrationsTable, version, dirty) - if _, err := tx.Exec(query); err != nil { - tx.Rollback() - return &database.Error{OrigErr: err, Query: []byte(query)} - } - } - - if err := tx.Commit(); err != nil { - return &database.Error{OrigErr: err, Err: "transaction commit failed"} - } - - return nil -} - -func (m *Ql) Version() (version int, dirty bool, err error) { - query := "SELECT version, dirty FROM " + m.config.MigrationsTable + " LIMIT 1" - err = m.db.QueryRow(query).Scan(&version, &dirty) - if err != nil { - return database.NilVersion, false, nil - } - return version, dirty, nil -} diff --git a/vendor/github.com/rdallman/migrate/database/ql/ql_test.go b/vendor/github.com/rdallman/migrate/database/ql/ql_test.go deleted file mode 100644 index f04383fa2..000000000 --- a/vendor/github.com/rdallman/migrate/database/ql/ql_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package ql - -import ( - "database/sql" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" - - _ "github.com/cznic/ql/driver" - "github.com/mattes/migrate" - dt "github.com/mattes/migrate/database/testing" - _ "github.com/mattes/migrate/source/file" -) - -func Test(t *testing.T) { - dir, err := ioutil.TempDir("", "ql-driver-test") - if err != nil { - return - } - defer func() { - os.RemoveAll(dir) - }() - fmt.Printf("DB path : %s\n", filepath.Join(dir, "ql.db")) - p := &Ql{} - addr := fmt.Sprintf("ql://%s", filepath.Join(dir, "ql.db")) - d, err := p.Open(addr) - if err != nil { - t.Fatalf("%v", err) - } - - db, err := sql.Open("ql", filepath.Join(dir, "ql.db")) - if err != nil { - return - } - defer func() { - if err := db.Close(); err != nil { - return - } - }() - dt.Test(t, d, []byte("CREATE TABLE t (Qty int, Name string);")) - driver, err := WithInstance(db, &Config{}) - if err != nil { - t.Fatalf("%v", err) - } - if err := d.Drop(); err != nil { - t.Fatal(err) - } - - m, err := migrate.NewWithDatabaseInstance( - "file://./migration", - "ql", driver) - if err != nil { - t.Fatalf("%v", err) - } - fmt.Println("UP") - err = m.Up() - if err != nil { - t.Fatalf("%v", err) - } -} diff --git a/vendor/github.com/rdallman/migrate/database/redshift/README.md b/vendor/github.com/rdallman/migrate/database/redshift/README.md deleted file mode 100644 index a03d109ae..000000000 --- a/vendor/github.com/rdallman/migrate/database/redshift/README.md +++ /dev/null @@ -1,6 +0,0 @@ -Redshift -=== - -This provides a Redshift driver for migrations. It is used whenever the URL of the database starts with `redshift://`. - -Redshift is PostgreSQL compatible but has some specific features (or lack thereof) that require slightly different behavior. diff --git a/vendor/github.com/rdallman/migrate/database/redshift/redshift.go b/vendor/github.com/rdallman/migrate/database/redshift/redshift.go deleted file mode 100644 index 99cdde725..000000000 --- a/vendor/github.com/rdallman/migrate/database/redshift/redshift.go +++ /dev/null @@ -1,46 +0,0 @@ -package redshift - -import ( - "net/url" - - "github.com/mattes/migrate/database" - "github.com/mattes/migrate/database/postgres" -) - -// init registers the driver under the name 'redshift' -func init() { - db := new(Redshift) - db.Driver = new(postgres.Postgres) - - database.Register("redshift", db) -} - -// Redshift is a wrapper around the PostgreSQL driver which implements Redshift-specific behavior. -// -// Currently, the only different behaviour is the lack of locking in Redshift. The (Un)Lock() method(s) have been overridden from the PostgreSQL adapter to simply return nil. -type Redshift struct { - // The wrapped PostgreSQL driver. - database.Driver -} - -// Open implements the database.Driver interface by parsing the URL, switching the scheme from "redshift" to "postgres", and delegating to the underlying PostgreSQL driver. -func (driver *Redshift) Open(dsn string) (database.Driver, error) { - parsed, err := url.Parse(dsn) - if err != nil { - return nil, err - } - - parsed.Scheme = "postgres" - psql, err := driver.Driver.Open(parsed.String()) - if err != nil { - return nil, err - } - - return &Redshift{Driver: psql}, nil -} - -// Lock implements the database.Driver interface by not locking and returning nil. -func (driver *Redshift) Lock() error { return nil } - -// Unlock implements the database.Driver interface by not unlocking and returning nil. -func (driver *Redshift) Unlock() error { return nil } diff --git a/vendor/github.com/rdallman/migrate/database/shell/README.md b/vendor/github.com/rdallman/migrate/database/shell/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/rdallman/migrate/database/spanner/README.md b/vendor/github.com/rdallman/migrate/database/spanner/README.md deleted file mode 100644 index 0de867a8d..000000000 --- a/vendor/github.com/rdallman/migrate/database/spanner/README.md +++ /dev/null @@ -1,35 +0,0 @@ -# Google Cloud Spanner - -## Usage - -The DSN must be given in the following format. - -`spanner://projects/{projectId}/instances/{instanceId}/databases/{databaseName}` - -See [Google Spanner Documentation](https://cloud.google.com/spanner/docs) for details. - - -| Param | WithInstance Config | Description | -| ----- | ------------------- | ----------- | -| `x-migrations-table` | `MigrationsTable` | Name of the migrations table | -| `url` | `DatabaseName` | The full path to the Spanner database resource. If provided as part of `Config` it must not contain a scheme or query string to match the format `projects/{projectId}/instances/{instanceId}/databases/{databaseName}`| -| `projectId` || The Google Cloud Platform project id -| `instanceId` || The id of the instance running Spanner -| `databaseName` || The name of the Spanner database - - -> **Note:** Google Cloud Spanner migrations can take a considerable amount of -> time. The migrations provided as part of the example take about 6 minutes to -> run on a small instance. -> -> ```log -> 1481574547/u create_users_table (21.354507597s) -> 1496539702/u add_city_to_users (41.647359754s) -> 1496601752/u add_index_on_user_emails (2m12.155787369s) -> 1496602638/u create_books_table (2m30.77299181s) - -## Testing - -To unit test the `spanner` driver, `SPANNER_DATABASE` needs to be set. You'll -need to sign-up to Google Cloud Platform (GCP) and have a running Spanner -instance since it is not possible to run Google Spanner outside GCP. \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1481574547_create_users_table.down.sql b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1481574547_create_users_table.down.sql deleted file mode 100644 index 7bd522c12..000000000 --- a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1481574547_create_users_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE Users diff --git a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1481574547_create_users_table.up.sql b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1481574547_create_users_table.up.sql deleted file mode 100644 index 97b8bdb74..000000000 --- a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1481574547_create_users_table.up.sql +++ /dev/null @@ -1,5 +0,0 @@ -CREATE TABLE Users ( - UserId INT64, - Name STRING(40), - Email STRING(83) -) PRIMARY KEY(UserId) \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.down.sql b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.down.sql deleted file mode 100644 index f0fcd0854..000000000 --- a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.down.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE Users DROP COLUMN city \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.up.sql b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.up.sql deleted file mode 100644 index b2d6c02bf..000000000 --- a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.up.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE Users ADD COLUMN city STRING(100) \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.down.sql b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.down.sql deleted file mode 100644 index 29f92559d..000000000 --- a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP INDEX UsersEmailIndex diff --git a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.up.sql b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.up.sql deleted file mode 100644 index e77b7f2db..000000000 --- a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.up.sql +++ /dev/null @@ -1 +0,0 @@ -CREATE UNIQUE INDEX UsersEmailIndex ON Users (Email) diff --git a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496602638_create_books_table.down.sql b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496602638_create_books_table.down.sql deleted file mode 100644 index bd2ce054c..000000000 --- a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496602638_create_books_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE Books \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496602638_create_books_table.up.sql b/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496602638_create_books_table.up.sql deleted file mode 100644 index 0bfa0d484..000000000 --- a/vendor/github.com/rdallman/migrate/database/spanner/examples/migrations/1496602638_create_books_table.up.sql +++ /dev/null @@ -1,6 +0,0 @@ -CREATE TABLE Books ( - UserId INT64, - Name STRING(40), - Author STRING(40) -) PRIMARY KEY(UserId, Name), -INTERLEAVE IN PARENT Users ON DELETE CASCADE diff --git a/vendor/github.com/rdallman/migrate/database/spanner/spanner.go b/vendor/github.com/rdallman/migrate/database/spanner/spanner.go deleted file mode 100644 index 6c65bab3f..000000000 --- a/vendor/github.com/rdallman/migrate/database/spanner/spanner.go +++ /dev/null @@ -1,294 +0,0 @@ -package spanner - -import ( - "fmt" - "io" - "io/ioutil" - "log" - nurl "net/url" - "regexp" - "strings" - - "golang.org/x/net/context" - - "cloud.google.com/go/spanner" - sdb "cloud.google.com/go/spanner/admin/database/apiv1" - - "github.com/mattes/migrate" - "github.com/mattes/migrate/database" - - "google.golang.org/api/iterator" - adminpb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" -) - -func init() { - db := Spanner{} - database.Register("spanner", &db) -} - -// DefaultMigrationsTable is used if no custom table is specified -const DefaultMigrationsTable = "SchemaMigrations" - -// Driver errors -var ( - ErrNilConfig = fmt.Errorf("no config") - ErrNoDatabaseName = fmt.Errorf("no database name") - ErrNoSchema = fmt.Errorf("no schema") - ErrDatabaseDirty = fmt.Errorf("database is dirty") -) - -// Config used for a Spanner instance -type Config struct { - MigrationsTable string - DatabaseName string -} - -// Spanner implements database.Driver for Google Cloud Spanner -type Spanner struct { - db *DB - - config *Config -} - -type DB struct { - admin *sdb.DatabaseAdminClient - data *spanner.Client -} - -// WithInstance implements database.Driver -func WithInstance(instance *DB, config *Config) (database.Driver, error) { - if config == nil { - return nil, ErrNilConfig - } - - if len(config.DatabaseName) == 0 { - return nil, ErrNoDatabaseName - } - - if len(config.MigrationsTable) == 0 { - config.MigrationsTable = DefaultMigrationsTable - } - - sx := &Spanner{ - db: instance, - config: config, - } - - if err := sx.ensureVersionTable(); err != nil { - return nil, err - } - - return sx, nil -} - -// Open implements database.Driver -func (s *Spanner) Open(url string) (database.Driver, error) { - purl, err := nurl.Parse(url) - if err != nil { - return nil, err - } - - ctx := context.Background() - - adminClient, err := sdb.NewDatabaseAdminClient(ctx) - if err != nil { - return nil, err - } - dbname := strings.Replace(migrate.FilterCustomQuery(purl).String(), "spanner://", "", 1) - dataClient, err := spanner.NewClient(ctx, dbname) - if err != nil { - log.Fatal(err) - } - - migrationsTable := purl.Query().Get("x-migrations-table") - if len(migrationsTable) == 0 { - migrationsTable = DefaultMigrationsTable - } - - db := &DB{admin: adminClient, data: dataClient} - return WithInstance(db, &Config{ - DatabaseName: dbname, - MigrationsTable: migrationsTable, - }) -} - -// Close implements database.Driver -func (s *Spanner) Close() error { - s.db.data.Close() - return s.db.admin.Close() -} - -// Lock implements database.Driver but doesn't do anything because Spanner only -// enqueues the UpdateDatabaseDdlRequest. -func (s *Spanner) Lock() error { - return nil -} - -// Unlock implements database.Driver but no action required, see Lock. -func (s *Spanner) Unlock() error { - return nil -} - -// Run implements database.Driver -func (s *Spanner) Run(migration io.Reader) error { - migr, err := ioutil.ReadAll(migration) - if err != nil { - return err - } - - // run migration - stmts := migrationStatements(migr) - ctx := context.Background() - - op, err := s.db.admin.UpdateDatabaseDdl(ctx, &adminpb.UpdateDatabaseDdlRequest{ - Database: s.config.DatabaseName, - Statements: stmts, - }) - - if err != nil { - return &database.Error{OrigErr: err, Err: "migration failed", Query: migr} - } - - if err := op.Wait(ctx); err != nil { - return &database.Error{OrigErr: err, Err: "migration failed", Query: migr} - } - - return nil -} - -// SetVersion implements database.Driver -func (s *Spanner) SetVersion(version int, dirty bool) error { - ctx := context.Background() - - _, err := s.db.data.ReadWriteTransaction(ctx, - func(ctx context.Context, txn *spanner.ReadWriteTransaction) error { - m := []*spanner.Mutation{ - spanner.Delete(s.config.MigrationsTable, spanner.AllKeys()), - spanner.Insert(s.config.MigrationsTable, - []string{"Version", "Dirty"}, - []interface{}{version, dirty}, - )} - return txn.BufferWrite(m) - }) - if err != nil { - return &database.Error{OrigErr: err} - } - - return nil -} - -// Version implements database.Driver -func (s *Spanner) Version() (version int, dirty bool, err error) { - ctx := context.Background() - - stmt := spanner.Statement{ - SQL: `SELECT Version, Dirty FROM ` + s.config.MigrationsTable + ` LIMIT 1`, - } - iter := s.db.data.Single().Query(ctx, stmt) - defer iter.Stop() - - row, err := iter.Next() - switch err { - case iterator.Done: - return database.NilVersion, false, nil - case nil: - var v int64 - if err = row.Columns(&v, &dirty); err != nil { - return 0, false, &database.Error{OrigErr: err, Query: []byte(stmt.SQL)} - } - version = int(v) - default: - return 0, false, &database.Error{OrigErr: err, Query: []byte(stmt.SQL)} - } - - return version, dirty, nil -} - -// Drop implements database.Driver. Retrieves the database schema first and -// creates statements to drop the indexes and tables accordingly. -// Note: The drop statements are created in reverse order to how they're -// provided in the schema. Assuming the schema describes how the database can -// be "build up", it seems logical to "unbuild" the database simply by going the -// opposite direction. More testing -func (s *Spanner) Drop() error { - ctx := context.Background() - res, err := s.db.admin.GetDatabaseDdl(ctx, &adminpb.GetDatabaseDdlRequest{ - Database: s.config.DatabaseName, - }) - if err != nil { - return &database.Error{OrigErr: err, Err: "drop failed"} - } - if len(res.Statements) == 0 { - return nil - } - - r := regexp.MustCompile(`(CREATE TABLE\s(\S+)\s)|(CREATE.+INDEX\s(\S+)\s)`) - stmts := make([]string, 0) - for i := len(res.Statements) - 1; i >= 0; i-- { - s := res.Statements[i] - m := r.FindSubmatch([]byte(s)) - - if len(m) == 0 { - continue - } else if tbl := m[2]; len(tbl) > 0 { - stmts = append(stmts, fmt.Sprintf(`DROP TABLE %s`, tbl)) - } else if idx := m[4]; len(idx) > 0 { - stmts = append(stmts, fmt.Sprintf(`DROP INDEX %s`, idx)) - } - } - - op, err := s.db.admin.UpdateDatabaseDdl(ctx, &adminpb.UpdateDatabaseDdlRequest{ - Database: s.config.DatabaseName, - Statements: stmts, - }) - if err != nil { - return &database.Error{OrigErr: err, Query: []byte(strings.Join(stmts, "; "))} - } - if err := op.Wait(ctx); err != nil { - return &database.Error{OrigErr: err, Query: []byte(strings.Join(stmts, "; "))} - } - - if err := s.ensureVersionTable(); err != nil { - return err - } - - return nil -} - -func (s *Spanner) ensureVersionTable() error { - ctx := context.Background() - tbl := s.config.MigrationsTable - iter := s.db.data.Single().Read(ctx, tbl, spanner.AllKeys(), []string{"Version"}) - if err := iter.Do(func(r *spanner.Row) error { return nil }); err == nil { - return nil - } - - stmt := fmt.Sprintf(`CREATE TABLE %s ( - Version INT64 NOT NULL, - Dirty BOOL NOT NULL - ) PRIMARY KEY(Version)`, tbl) - - op, err := s.db.admin.UpdateDatabaseDdl(ctx, &adminpb.UpdateDatabaseDdlRequest{ - Database: s.config.DatabaseName, - Statements: []string{stmt}, - }) - - if err != nil { - return &database.Error{OrigErr: err, Query: []byte(stmt)} - } - if err := op.Wait(ctx); err != nil { - return &database.Error{OrigErr: err, Query: []byte(stmt)} - } - - return nil -} - -func migrationStatements(migration []byte) []string { - regex := regexp.MustCompile(";$") - migrationString := string(migration[:]) - migrationString = strings.TrimSpace(migrationString) - migrationString = regex.ReplaceAllString(migrationString, "") - - statements := strings.Split(migrationString, ";") - return statements -} diff --git a/vendor/github.com/rdallman/migrate/database/spanner/spanner_test.go b/vendor/github.com/rdallman/migrate/database/spanner/spanner_test.go deleted file mode 100644 index 43d475ca4..000000000 --- a/vendor/github.com/rdallman/migrate/database/spanner/spanner_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package spanner - -import ( - "fmt" - "os" - "testing" - - dt "github.com/mattes/migrate/database/testing" -) - -func Test(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - db, ok := os.LookupEnv("SPANNER_DATABASE") - if !ok { - t.Skip("SPANNER_DATABASE not set, skipping test.") - } - - s := &Spanner{} - addr := fmt.Sprintf("spanner://%v", db) - d, err := s.Open(addr) - if err != nil { - t.Fatalf("%v", err) - } - dt.Test(t, d, []byte("SELECT 1")) -} diff --git a/vendor/github.com/rdallman/migrate/database/sqlite3/README.md b/vendor/github.com/rdallman/migrate/database/sqlite3/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/rdallman/migrate/database/sqlite3/migration/33_create_table.down.sql b/vendor/github.com/rdallman/migrate/database/sqlite3/migration/33_create_table.down.sql deleted file mode 100644 index 72d18c554..000000000 --- a/vendor/github.com/rdallman/migrate/database/sqlite3/migration/33_create_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS pets; \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/sqlite3/migration/33_create_table.up.sql b/vendor/github.com/rdallman/migrate/database/sqlite3/migration/33_create_table.up.sql deleted file mode 100644 index 5ad3404d1..000000000 --- a/vendor/github.com/rdallman/migrate/database/sqlite3/migration/33_create_table.up.sql +++ /dev/null @@ -1,3 +0,0 @@ -CREATE TABLE pets ( - name string -); \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/sqlite3/migration/44_alter_table.down.sql b/vendor/github.com/rdallman/migrate/database/sqlite3/migration/44_alter_table.down.sql deleted file mode 100644 index 72d18c554..000000000 --- a/vendor/github.com/rdallman/migrate/database/sqlite3/migration/44_alter_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS pets; \ No newline at end of file diff --git a/vendor/github.com/rdallman/migrate/database/sqlite3/migration/44_alter_table.up.sql b/vendor/github.com/rdallman/migrate/database/sqlite3/migration/44_alter_table.up.sql deleted file mode 100644 index f0682fcca..000000000 --- a/vendor/github.com/rdallman/migrate/database/sqlite3/migration/44_alter_table.up.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE pets ADD predator bool; diff --git a/vendor/github.com/rdallman/migrate/database/sqlite3/sqlite3.go b/vendor/github.com/rdallman/migrate/database/sqlite3/sqlite3.go deleted file mode 100644 index bfd1a5b81..000000000 --- a/vendor/github.com/rdallman/migrate/database/sqlite3/sqlite3.go +++ /dev/null @@ -1,214 +0,0 @@ -package sqlite3 - -import ( - "database/sql" - "fmt" - "github.com/mattes/migrate" - "github.com/mattes/migrate/database" - _ "github.com/mattn/go-sqlite3" - "io" - "io/ioutil" - nurl "net/url" - "strings" -) - -func init() { - database.Register("sqlite3", &Sqlite{}) -} - -var DefaultMigrationsTable = "schema_migrations" -var ( - ErrDatabaseDirty = fmt.Errorf("database is dirty") - ErrNilConfig = fmt.Errorf("no config") - ErrNoDatabaseName = fmt.Errorf("no database name") -) - -type Config struct { - MigrationsTable string - DatabaseName string -} - -type Sqlite struct { - db *sql.DB - isLocked bool - - config *Config -} - -func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { - if config == nil { - return nil, ErrNilConfig - } - - if err := instance.Ping(); err != nil { - return nil, err - } - if len(config.MigrationsTable) == 0 { - config.MigrationsTable = DefaultMigrationsTable - } - - mx := &Sqlite{ - db: instance, - config: config, - } - if err := mx.ensureVersionTable(); err != nil { - return nil, err - } - return mx, nil -} - -func (m *Sqlite) ensureVersionTable() error { - - query := fmt.Sprintf(` - CREATE TABLE IF NOT EXISTS %s (version uint64,dirty bool); - CREATE UNIQUE INDEX IF NOT EXISTS version_unique ON %s (version); - `, DefaultMigrationsTable, DefaultMigrationsTable) - - if _, err := m.db.Exec(query); err != nil { - return err - } - return nil -} - -func (m *Sqlite) Open(url string) (database.Driver, error) { - purl, err := nurl.Parse(url) - if err != nil { - return nil, err - } - dbfile := strings.Replace(migrate.FilterCustomQuery(purl).String(), "sqlite3://", "", 1) - db, err := sql.Open("sqlite3", dbfile) - if err != nil { - return nil, err - } - - migrationsTable := purl.Query().Get("x-migrations-table") - if len(migrationsTable) == 0 { - migrationsTable = DefaultMigrationsTable - } - mx, err := WithInstance(db, &Config{ - DatabaseName: purl.Path, - MigrationsTable: migrationsTable, - }) - if err != nil { - return nil, err - } - return mx, nil -} - -func (m *Sqlite) Close() error { - return m.db.Close() -} - -func (m *Sqlite) Drop() error { - query := `SELECT name FROM sqlite_master WHERE type = 'table';` - tables, err := m.db.Query(query) - if err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - defer tables.Close() - tableNames := make([]string, 0) - for tables.Next() { - var tableName string - if err := tables.Scan(&tableName); err != nil { - return err - } - if len(tableName) > 0 { - tableNames = append(tableNames, tableName) - } - } - if len(tableNames) > 0 { - for _, t := range tableNames { - query := "DROP TABLE " + t - err = m.executeQuery(query) - if err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - } - if err := m.ensureVersionTable(); err != nil { - return err - } - query := "VACUUM" - _, err = m.db.Query(query) - if err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - } - - return nil -} - -func (m *Sqlite) Lock() error { - if m.isLocked { - return database.ErrLocked - } - m.isLocked = true - return nil -} - -func (m *Sqlite) Unlock() error { - if !m.isLocked { - return nil - } - m.isLocked = false - return nil -} - -func (m *Sqlite) Run(migration io.Reader) error { - migr, err := ioutil.ReadAll(migration) - if err != nil { - return err - } - query := string(migr[:]) - - return m.executeQuery(query) -} - -func (m *Sqlite) executeQuery(query string) error { - tx, err := m.db.Begin() - if err != nil { - return &database.Error{OrigErr: err, Err: "transaction start failed"} - } - if _, err := tx.Exec(query); err != nil { - tx.Rollback() - return &database.Error{OrigErr: err, Query: []byte(query)} - } - if err := tx.Commit(); err != nil { - return &database.Error{OrigErr: err, Err: "transaction commit failed"} - } - return nil -} - -func (m *Sqlite) SetVersion(version int, dirty bool) error { - tx, err := m.db.Begin() - if err != nil { - return &database.Error{OrigErr: err, Err: "transaction start failed"} - } - - query := "DELETE FROM " + m.config.MigrationsTable - if _, err := tx.Exec(query); err != nil { - return &database.Error{OrigErr: err, Query: []byte(query)} - } - - if version >= 0 { - query := fmt.Sprintf(`INSERT INTO %s (version, dirty) VALUES (%d, '%t')`, m.config.MigrationsTable, version, dirty) - if _, err := tx.Exec(query); err != nil { - tx.Rollback() - return &database.Error{OrigErr: err, Query: []byte(query)} - } - } - - if err := tx.Commit(); err != nil { - return &database.Error{OrigErr: err, Err: "transaction commit failed"} - } - - return nil -} - -func (m *Sqlite) Version() (version int, dirty bool, err error) { - query := "SELECT version, dirty FROM " + m.config.MigrationsTable + " LIMIT 1" - err = m.db.QueryRow(query).Scan(&version, &dirty) - if err != nil { - return database.NilVersion, false, nil - } - return version, dirty, nil -} diff --git a/vendor/github.com/rdallman/migrate/database/sqlite3/sqlite3_test.go b/vendor/github.com/rdallman/migrate/database/sqlite3/sqlite3_test.go deleted file mode 100644 index 6a5c5c864..000000000 --- a/vendor/github.com/rdallman/migrate/database/sqlite3/sqlite3_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package sqlite3 - -import ( - "database/sql" - "fmt" - "github.com/mattes/migrate" - dt "github.com/mattes/migrate/database/testing" - _ "github.com/mattes/migrate/source/file" - _ "github.com/mattn/go-sqlite3" - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -func Test(t *testing.T) { - dir, err := ioutil.TempDir("", "sqlite3-driver-test") - if err != nil { - return - } - defer func() { - os.RemoveAll(dir) - }() - fmt.Printf("DB path : %s\n", filepath.Join(dir, "sqlite3.db")) - p := &Sqlite{} - addr := fmt.Sprintf("sqlite3://%s", filepath.Join(dir, "sqlite3.db")) - d, err := p.Open(addr) - if err != nil { - t.Fatalf("%v", err) - } - - db, err := sql.Open("sqlite3", filepath.Join(dir, "sqlite3.db")) - if err != nil { - return - } - defer func() { - if err := db.Close(); err != nil { - return - } - }() - dt.Test(t, d, []byte("CREATE TABLE t (Qty int, Name string);")) - driver, err := WithInstance(db, &Config{}) - if err != nil { - t.Fatalf("%v", err) - } - if err := d.Drop(); err != nil { - t.Fatal(err) - } - - m, err := migrate.NewWithDatabaseInstance( - "file://./migration", - "ql", driver) - if err != nil { - t.Fatalf("%v", err) - } - fmt.Println("UP") - err = m.Up() - if err != nil { - t.Fatalf("%v", err) - } -} diff --git a/vendor/github.com/rdallman/migrate/database/stub/stub.go b/vendor/github.com/rdallman/migrate/database/stub/stub.go deleted file mode 100644 index 172bcd37b..000000000 --- a/vendor/github.com/rdallman/migrate/database/stub/stub.go +++ /dev/null @@ -1,95 +0,0 @@ -package stub - -import ( - "io" - "io/ioutil" - "reflect" - - "github.com/mattes/migrate/database" -) - -func init() { - database.Register("stub", &Stub{}) -} - -type Stub struct { - Url string - Instance interface{} - CurrentVersion int - MigrationSequence []string - LastRunMigration []byte // todo: make []string - IsDirty bool - IsLocked bool - - Config *Config -} - -func (s *Stub) Open(url string) (database.Driver, error) { - return &Stub{ - Url: url, - CurrentVersion: -1, - MigrationSequence: make([]string, 0), - Config: &Config{}, - }, nil -} - -type Config struct{} - -func WithInstance(instance interface{}, config *Config) (database.Driver, error) { - return &Stub{ - Instance: instance, - CurrentVersion: -1, - MigrationSequence: make([]string, 0), - Config: config, - }, nil -} - -func (s *Stub) Close() error { - return nil -} - -func (s *Stub) Lock() error { - if s.IsLocked { - return database.ErrLocked - } - s.IsLocked = true - return nil -} - -func (s *Stub) Unlock() error { - s.IsLocked = false - return nil -} - -func (s *Stub) Run(migration io.Reader) error { - m, err := ioutil.ReadAll(migration) - if err != nil { - return err - } - s.LastRunMigration = m - s.MigrationSequence = append(s.MigrationSequence, string(m[:])) - return nil -} - -func (s *Stub) SetVersion(version int, state bool) error { - s.CurrentVersion = version - s.IsDirty = state - return nil -} - -func (s *Stub) Version() (version int, dirty bool, err error) { - return s.CurrentVersion, s.IsDirty, nil -} - -const DROP = "DROP" - -func (s *Stub) Drop() error { - s.CurrentVersion = -1 - s.LastRunMigration = nil - s.MigrationSequence = append(s.MigrationSequence, DROP) - return nil -} - -func (s *Stub) EqualSequence(seq []string) bool { - return reflect.DeepEqual(seq, s.MigrationSequence) -} diff --git a/vendor/github.com/rdallman/migrate/database/stub/stub_test.go b/vendor/github.com/rdallman/migrate/database/stub/stub_test.go deleted file mode 100644 index 3d8b8926c..000000000 --- a/vendor/github.com/rdallman/migrate/database/stub/stub_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package stub - -import ( - "testing" - - dt "github.com/mattes/migrate/database/testing" -) - -func Test(t *testing.T) { - s := &Stub{} - d, err := s.Open("") - if err != nil { - t.Fatal(err) - } - dt.Test(t, d, []byte("/* foobar migration */")) -} diff --git a/vendor/github.com/rdallman/migrate/database/testing/testing.go b/vendor/github.com/rdallman/migrate/database/testing/testing.go deleted file mode 100644 index 4ab090d1a..000000000 --- a/vendor/github.com/rdallman/migrate/database/testing/testing.go +++ /dev/null @@ -1,138 +0,0 @@ -// Package testing has the database tests. -// All database drivers must pass the Test function. -// This lives in it's own package so it stays a test dependency. -package testing - -import ( - "bytes" - "fmt" - "io" - "testing" - "time" - - "github.com/mattes/migrate/database" -) - -// Test runs tests against database implementations. -func Test(t *testing.T, d database.Driver, migration []byte) { - if migration == nil { - panic("test must provide migration reader") - } - - TestNilVersion(t, d) // test first - TestLockAndUnlock(t, d) - TestRun(t, d, bytes.NewReader(migration)) - TestDrop(t, d) - TestSetVersion(t, d) // also tests Version() -} - -func TestNilVersion(t *testing.T, d database.Driver) { - v, _, err := d.Version() - if err != nil { - t.Fatal(err) - } - if v != database.NilVersion { - t.Fatalf("Version: expected version to be NilVersion (-1), got %v", v) - } -} - -func TestLockAndUnlock(t *testing.T, d database.Driver) { - // add a timeout, in case there is a deadlock - done := make(chan bool, 1) - go func() { - timeout := time.After(15 * time.Second) - for { - select { - case <-done: - return - case <-timeout: - panic(fmt.Sprintf("Timeout after 15 seconds. Looks like a deadlock in Lock/UnLock.\n%#v", d)) - } - } - }() - defer func() { - done <- true - }() - - // run the locking test ... - - if err := d.Lock(); err != nil { - t.Fatal(err) - } - - // try to acquire lock again - if err := d.Lock(); err == nil { - t.Fatal("Lock: expected err not to be nil") - } - - // unlock - if err := d.Unlock(); err != nil { - t.Fatal(err) - } - - // try to lock - if err := d.Lock(); err != nil { - t.Fatal(err) - } - if err := d.Unlock(); err != nil { - t.Fatal(err) - } -} - -func TestRun(t *testing.T, d database.Driver, migration io.Reader) { - if migration == nil { - panic("migration can't be nil") - } - - if err := d.Run(migration); err != nil { - t.Fatal(err) - } -} - -func TestDrop(t *testing.T, d database.Driver) { - if err := d.Drop(); err != nil { - t.Fatal(err) - } -} - -func TestSetVersion(t *testing.T, d database.Driver) { - if err := d.SetVersion(1, true); err != nil { - t.Fatal(err) - } - - // call again - if err := d.SetVersion(1, true); err != nil { - t.Fatal(err) - } - - v, dirty, err := d.Version() - if err != nil { - t.Fatal(err) - } - if !dirty { - t.Fatal("expected dirty") - } - if v != 1 { - t.Fatal("expected version to be 1") - } - - if err := d.SetVersion(2, false); err != nil { - t.Fatal(err) - } - - // call again - if err := d.SetVersion(2, false); err != nil { - t.Fatal(err) - } - - v, dirty, err = d.Version() - if err != nil { - t.Fatal(err) - } - if dirty { - t.Fatal("expected not dirty") - } - if v != 2 { - t.Fatal("expected version to be 2") - } -} diff --git a/vendor/github.com/rdallman/migrate/database/util.go b/vendor/github.com/rdallman/migrate/database/util.go deleted file mode 100644 index c636a7abe..000000000 --- a/vendor/github.com/rdallman/migrate/database/util.go +++ /dev/null @@ -1,15 +0,0 @@ -package database - -import ( - "fmt" - "hash/crc32" -) - -const advisoryLockIdSalt uint = 1486364155 - -// inspired by rails migrations, see https://goo.gl/8o9bCT -func GenerateAdvisoryLockId(databaseName string) (string, error) { - sum := crc32.ChecksumIEEE([]byte(databaseName)) - sum = sum * uint32(advisoryLockIdSalt) - return fmt.Sprintf("%v", sum), nil -} diff --git a/vendor/github.com/rdallman/migrate/database/util_test.go b/vendor/github.com/rdallman/migrate/database/util_test.go deleted file mode 100644 index 905c840b9..000000000 --- a/vendor/github.com/rdallman/migrate/database/util_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package database - -func TestGenerateAdvisoryLockId(t *testing.T) { - id, err := p.generateAdvisoryLockId("database_name") - if err != nil { - t.Errorf("expected err to be nil, got %v", err) - } - if len(id) == 0 { - t.Errorf("expected generated id not to be empty") - } - t.Logf("generated id: %v", id) -} diff --git a/vendor/github.com/rdallman/migrate/log.go b/vendor/github.com/rdallman/migrate/log.go deleted file mode 100644 index cb00b7798..000000000 --- a/vendor/github.com/rdallman/migrate/log.go +++ /dev/null @@ -1,12 +0,0 @@ -package migrate - -// Logger is an interface so you can pass in your own -// logging implementation. -type Logger interface { - - // Printf is like fmt.Printf - Printf(format string, v ...interface{}) - - // Verbose should return true when verbose logging output is wanted - Verbose() bool -} diff --git a/vendor/github.com/rdallman/migrate/migrate.go b/vendor/github.com/rdallman/migrate/migrate.go deleted file mode 100644 index 58414e8fc..000000000 --- a/vendor/github.com/rdallman/migrate/migrate.go +++ /dev/null @@ -1,920 +0,0 @@ -// Package migrate reads migrations from sources and runs them against databases. -// Sources are defined by the `source.Driver` and databases by the `database.Driver` -// interface. The driver interfaces are kept "dump", all migration logic is kept -// in this package. -package migrate - -import ( - "fmt" - "os" - "sync" - "time" - - "github.com/mattes/migrate/database" - "github.com/mattes/migrate/source" -) - -// DefaultPrefetchMigrations sets the number of migrations to pre-read -// from the source. This is helpful if the source is remote, but has little -// effect for a local source (i.e. file system). -// Please note that this setting has a major impact on the memory usage, -// since each pre-read migration is buffered in memory. See DefaultBufferSize. -var DefaultPrefetchMigrations = uint(10) - -// DefaultLockTimeout sets the max time a database driver has to acquire a lock. -var DefaultLockTimeout = 15 * time.Second - -var ( - ErrNoChange = fmt.Errorf("no change") - ErrNilVersion = fmt.Errorf("no migration") - ErrLocked = fmt.Errorf("database locked") - ErrLockTimeout = fmt.Errorf("timeout: can't acquire database lock") -) - -// ErrShortLimit is an error returned when not enough migrations -// can be returned by a source for a given limit. -type ErrShortLimit struct { - Short uint -} - -// Error implements the error interface. -func (e ErrShortLimit) Error() string { - return fmt.Sprintf("limit %v short", e.Short) -} - -type ErrDirty struct { - Version int -} - -func (e ErrDirty) Error() string { - return fmt.Sprintf("Dirty database version %v. Fix and force version.", e.Version) -} - -type Migrate struct { - sourceName string - sourceDrv source.Driver - databaseName string - databaseDrv database.Driver - - // Log accepts a Logger interface - Log Logger - - // GracefulStop accepts `true` and will stop executing migrations - // as soon as possible at a safe break point, so that the database - // is not corrupted. - GracefulStop chan bool - isGracefulStop bool - - isLockedMu *sync.Mutex - isLocked bool - - // PrefetchMigrations defaults to DefaultPrefetchMigrations, - // but can be set per Migrate instance. - PrefetchMigrations uint - - // LockTimeout defaults to DefaultLockTimeout, - // but can be set per Migrate instance. - LockTimeout time.Duration -} - -// New returns a new Migrate instance from a source URL and a database URL. -// The URL scheme is defined by each driver. -func New(sourceUrl, databaseUrl string) (*Migrate, error) { - m := newCommon() - - sourceName, err := schemeFromUrl(sourceUrl) - if err != nil { - return nil, err - } - m.sourceName = sourceName - - databaseName, err := schemeFromUrl(databaseUrl) - if err != nil { - return nil, err - } - m.databaseName = databaseName - - sourceDrv, err := source.Open(sourceUrl) - if err != nil { - return nil, err - } - m.sourceDrv = sourceDrv - - databaseDrv, err := database.Open(databaseUrl) - if err != nil { - return nil, err - } - m.databaseDrv = databaseDrv - - return m, nil -} - -// NewWithDatabaseInstance returns a new Migrate instance from a source URL -// and an existing database instance. The source URL scheme is defined by each driver. -// Use any string that can serve as an identifier during logging as databaseName. -// You are responsible for closing the underlying database client if necessary. -func NewWithDatabaseInstance(sourceUrl string, databaseName string, databaseInstance database.Driver) (*Migrate, error) { - m := newCommon() - - sourceName, err := schemeFromUrl(sourceUrl) - if err != nil { - return nil, err - } - m.sourceName = sourceName - - m.databaseName = databaseName - - sourceDrv, err := source.Open(sourceUrl) - if err != nil { - return nil, err - } - m.sourceDrv = sourceDrv - - m.databaseDrv = databaseInstance - - return m, nil -} - -// NewWithSourceInstance returns a new Migrate instance from an existing source instance -// and a database URL. The database URL scheme is defined by each driver. -// Use any string that can serve as an identifier during logging as sourceName. -// You are responsible for closing the underlying source client if necessary. -func NewWithSourceInstance(sourceName string, sourceInstance source.Driver, databaseUrl string) (*Migrate, error) { - m := newCommon() - - databaseName, err := schemeFromUrl(databaseUrl) - if err != nil { - return nil, err - } - m.databaseName = databaseName - - m.sourceName = sourceName - - databaseDrv, err := database.Open(databaseUrl) - if err != nil { - return nil, err - } - m.databaseDrv = databaseDrv - - m.sourceDrv = sourceInstance - - return m, nil -} - -// NewWithInstance returns a new Migrate instance from an existing source and -// database instance. Use any string that can serve as an identifier during logging -// as sourceName and databaseName. You are responsible for closing down -// the underlying source and database client if necessary. -func NewWithInstance(sourceName string, sourceInstance source.Driver, databaseName string, databaseInstance database.Driver) (*Migrate, error) { - m := newCommon() - - m.sourceName = sourceName - m.databaseName = databaseName - - m.sourceDrv = sourceInstance - m.databaseDrv = databaseInstance - - return m, nil -} - -func newCommon() *Migrate { - return &Migrate{ - GracefulStop: make(chan bool, 1), - PrefetchMigrations: DefaultPrefetchMigrations, - LockTimeout: DefaultLockTimeout, - isLockedMu: &sync.Mutex{}, - } -} - -// Close closes the the source and the database. -func (m *Migrate) Close() (source error, database error) { - databaseSrvClose := make(chan error) - sourceSrvClose := make(chan error) - - m.logVerbosePrintf("Closing source and database\n") - - go func() { - databaseSrvClose <- m.databaseDrv.Close() - }() - - go func() { - sourceSrvClose <- m.sourceDrv.Close() - }() - - return <-sourceSrvClose, <-databaseSrvClose -} - -// Migrate looks at the currently active migration version, -// then migrates either up or down to the specified version. -func (m *Migrate) Migrate(version uint) error { - if err := m.lock(); err != nil { - return err - } - - curVersion, dirty, err := m.databaseDrv.Version() - if err != nil { - return m.unlockErr(err) - } - - if dirty { - return m.unlockErr(ErrDirty{curVersion}) - } - - ret := make(chan interface{}, m.PrefetchMigrations) - go m.read(curVersion, int(version), ret) - - return m.unlockErr(m.runMigrations(ret)) -} - -// Steps looks at the currently active migration version. -// It will migrate up if n > 0, and down if n < 0. -func (m *Migrate) Steps(n int) error { - if n == 0 { - return ErrNoChange - } - - if err := m.lock(); err != nil { - return err - } - - curVersion, dirty, err := m.databaseDrv.Version() - if err != nil { - return m.unlockErr(err) - } - - if dirty { - return m.unlockErr(ErrDirty{curVersion}) - } - - ret := make(chan interface{}, m.PrefetchMigrations) - - if n > 0 { - go m.readUp(curVersion, n, ret) - } else { - go m.readDown(curVersion, -n, ret) - } - - return m.unlockErr(m.runMigrations(ret)) -} - -// Up looks at the currently active migration version -// and will migrate all the way up (applying all up migrations). -func (m *Migrate) Up() error { - if err := m.lock(); err != nil { - return err - } - - curVersion, dirty, err := m.databaseDrv.Version() - if err != nil { - return m.unlockErr(err) - } - - if dirty { - return m.unlockErr(ErrDirty{curVersion}) - } - - ret := make(chan interface{}, m.PrefetchMigrations) - - go m.readUp(curVersion, -1, ret) - return m.unlockErr(m.runMigrations(ret)) -} - -// Down looks at the currently active migration version -// and will migrate all the way down (applying all down migrations). -func (m *Migrate) Down() error { - if err := m.lock(); err != nil { - return err - } - - curVersion, dirty, err := m.databaseDrv.Version() - if err != nil { - return m.unlockErr(err) - } - - if dirty { - return m.unlockErr(ErrDirty{curVersion}) - } - - ret := make(chan interface{}, m.PrefetchMigrations) - go m.readDown(curVersion, -1, ret) - return m.unlockErr(m.runMigrations(ret)) -} - -// Drop deletes everything in the database. -func (m *Migrate) Drop() error { - if err := m.lock(); err != nil { - return err - } - if err := m.databaseDrv.Drop(); err != nil { - return m.unlockErr(err) - } - return m.unlock() -} - -// Run runs any migration provided by you against the database. -// It does not check any currently active version in database. -// Usually you don't need this function at all. Use Migrate, -// Steps, Up or Down instead. -func (m *Migrate) Run(migration ...*Migration) error { - if len(migration) == 0 { - return ErrNoChange - } - - if err := m.lock(); err != nil { - return err - } - - curVersion, dirty, err := m.databaseDrv.Version() - if err != nil { - return m.unlockErr(err) - } - - if dirty { - return m.unlockErr(ErrDirty{curVersion}) - } - - ret := make(chan interface{}, m.PrefetchMigrations) - - go func() { - defer close(ret) - for _, migr := range migration { - if m.PrefetchMigrations > 0 && migr.Body != nil { - m.logVerbosePrintf("Start buffering %v\n", migr.LogString()) - } else { - m.logVerbosePrintf("Scheduled %v\n", migr.LogString()) - } - - ret <- migr - go migr.Buffer() - } - }() - - return m.unlockErr(m.runMigrations(ret)) -} - -// Force sets a migration version. -// It does not check any currently active version in database. -// It resets the dirty state to false. -func (m *Migrate) Force(version int) error { - if version < -1 { - panic("version must be >= -1") - } - - if err := m.lock(); err != nil { - return err - } - - if err := m.databaseDrv.SetVersion(version, false); err != nil { - return m.unlockErr(err) - } - - return m.unlock() -} - -// Version returns the currently active migration version. -// If no migration has been applied, yet, it will return ErrNilVersion. -func (m *Migrate) Version() (version uint, dirty bool, err error) { - v, d, err := m.databaseDrv.Version() - if err != nil { - return 0, false, err - } - - if v == database.NilVersion { - return 0, false, ErrNilVersion - } - - return suint(v), d, nil -} - -// read reads either up or down migrations from source `from` to `to`. -// Each migration is then written to the ret channel. -// If an error occurs during reading, that error is written to the ret channel, too. -// Once read is done reading it will close the ret channel. -func (m *Migrate) read(from int, to int, ret chan<- interface{}) { - defer close(ret) - - // check if from version exists - if from >= 0 { - if m.versionExists(suint(from)) != nil { - ret <- os.ErrNotExist - return - } - } - - // check if to version exists - if to >= 0 { - if m.versionExists(suint(to)) != nil { - ret <- os.ErrNotExist - return - } - } - - // no change? - if from == to { - ret <- ErrNoChange - return - } - - if from < to { - // it's going up - // apply first migration if from is nil version - if from == -1 { - firstVersion, err := m.sourceDrv.First() - if err != nil { - ret <- err - return - } - - migr, err := m.newMigration(firstVersion, int(firstVersion)) - if err != nil { - ret <- err - return - } - - ret <- migr - go migr.Buffer() - from = int(firstVersion) - } - - // run until we reach target ... - for from < to { - if m.stop() { - return - } - - next, err := m.sourceDrv.Next(suint(from)) - if err != nil { - ret <- err - return - } - - migr, err := m.newMigration(next, int(next)) - if err != nil { - ret <- err - return - } - - ret <- migr - go migr.Buffer() - from = int(next) - } - - } else { - // it's going down - // run until we reach target ... - for from > to && from >= 0 { - if m.stop() { - return - } - - prev, err := m.sourceDrv.Prev(suint(from)) - if os.IsNotExist(err) && to == -1 { - // apply nil migration - migr, err := m.newMigration(suint(from), -1) - if err != nil { - ret <- err - return - } - ret <- migr - go migr.Buffer() - return - - } else if err != nil { - ret <- err - return - } - - migr, err := m.newMigration(suint(from), int(prev)) - if err != nil { - ret <- err - return - } - - ret <- migr - go migr.Buffer() - from = int(prev) - } - } -} - -// readUp reads up migrations from `from` limitted by `limit`. -// limit can be -1, implying no limit and reading until there are no more migrations. -// Each migration is then written to the ret channel. -// If an error occurs during reading, that error is written to the ret channel, too. -// Once readUp is done reading it will close the ret channel. -func (m *Migrate) readUp(from int, limit int, ret chan<- interface{}) { - defer close(ret) - - // check if from version exists - if from >= 0 { - if m.versionExists(suint(from)) != nil { - ret <- os.ErrNotExist - return - } - } - - if limit == 0 { - ret <- ErrNoChange - return - } - - count := 0 - for count < limit || limit == -1 { - if m.stop() { - return - } - - // apply first migration if from is nil version - if from == -1 { - firstVersion, err := m.sourceDrv.First() - if err != nil { - ret <- err - return - } - - migr, err := m.newMigration(firstVersion, int(firstVersion)) - if err != nil { - ret <- err - return - } - - ret <- migr - go migr.Buffer() - from = int(firstVersion) - count++ - continue - } - - // apply next migration - next, err := m.sourceDrv.Next(suint(from)) - if os.IsNotExist(err) { - // no limit, but no migrations applied? - if limit == -1 && count == 0 { - ret <- ErrNoChange - return - } - - // no limit, reached end - if limit == -1 { - return - } - - // reached end, and didn't apply any migrations - if limit > 0 && count == 0 { - ret <- os.ErrNotExist - return - } - - // applied less migrations than limit? - if count < limit { - ret <- ErrShortLimit{suint(limit - count)} - return - } - } - if err != nil { - ret <- err - return - } - - migr, err := m.newMigration(next, int(next)) - if err != nil { - ret <- err - return - } - - ret <- migr - go migr.Buffer() - from = int(next) - count++ - } -} - -// readDown reads down migrations from `from` limitted by `limit`. -// limit can be -1, implying no limit and reading until there are no more migrations. -// Each migration is then written to the ret channel. -// If an error occurs during reading, that error is written to the ret channel, too. -// Once readDown is done reading it will close the ret channel. -func (m *Migrate) readDown(from int, limit int, ret chan<- interface{}) { - defer close(ret) - - // check if from version exists - if from >= 0 { - if m.versionExists(suint(from)) != nil { - ret <- os.ErrNotExist - return - } - } - - if limit == 0 { - ret <- ErrNoChange - return - } - - // no change if already at nil version - if from == -1 && limit == -1 { - ret <- ErrNoChange - return - } - - // can't go over limit if already at nil version - if from == -1 && limit > 0 { - ret <- os.ErrNotExist - return - } - - count := 0 - for count < limit || limit == -1 { - if m.stop() { - return - } - - prev, err := m.sourceDrv.Prev(suint(from)) - if os.IsNotExist(err) { - // no limit or haven't reached limit, apply "first" migration - if limit == -1 || limit-count > 0 { - firstVersion, err := m.sourceDrv.First() - if err != nil { - ret <- err - return - } - - migr, err := m.newMigration(firstVersion, -1) - if err != nil { - ret <- err - return - } - ret <- migr - go migr.Buffer() - count++ - } - - if count < limit { - ret <- ErrShortLimit{suint(limit - count)} - } - return - } - if err != nil { - ret <- err - return - } - - migr, err := m.newMigration(suint(from), int(prev)) - if err != nil { - ret <- err - return - } - - ret <- migr - go migr.Buffer() - from = int(prev) - count++ - } -} - -// runMigrations reads *Migration and error from a channel. Any other type -// sent on this channel will result in a panic. Each migration is then -// proxied to the database driver and run against the database. -// Before running a newly received migration it will check if it's supposed -// to stop execution because it might have received a stop signal on the -// GracefulStop channel. -func (m *Migrate) runMigrations(ret <-chan interface{}) error { - for r := range ret { - - if m.stop() { - return nil - } - - switch r.(type) { - case error: - return r.(error) - - case *Migration: - migr := r.(*Migration) - - // set version with dirty state - if err := m.databaseDrv.SetVersion(migr.TargetVersion, true); err != nil { - return err - } - - if migr.Body != nil { - m.logVerbosePrintf("Read and execute %v\n", migr.LogString()) - if err := m.databaseDrv.Run(migr.BufferedBody); err != nil { - return err - } - } - - // set clean state - if err := m.databaseDrv.SetVersion(migr.TargetVersion, false); err != nil { - return err - } - - endTime := time.Now() - readTime := migr.FinishedReading.Sub(migr.StartedBuffering) - runTime := endTime.Sub(migr.FinishedReading) - - // log either verbose or normal - if m.Log != nil { - if m.Log.Verbose() { - m.logPrintf("Finished %v (read %v, ran %v)\n", migr.LogString(), readTime, runTime) - } else { - m.logPrintf("%v (%v)\n", migr.LogString(), readTime+runTime) - } - } - - default: - panic("unknown type") - } - } - return nil -} - -// versionExists checks the source if either the up or down migration for -// the specified migration version exists. -func (m *Migrate) versionExists(version uint) error { - // try up migration first - up, _, err := m.sourceDrv.ReadUp(version) - if err == nil { - defer up.Close() - } - if os.IsExist(err) { - return nil - } else if !os.IsNotExist(err) { - return err - } - - // then try down migration - down, _, err := m.sourceDrv.ReadDown(version) - if err == nil { - defer down.Close() - } - if os.IsExist(err) { - return nil - } else if !os.IsNotExist(err) { - return err - } - - return os.ErrNotExist -} - -// stop returns true if no more migrations should be run against the database -// because a stop signal was received on the GracefulStop channel. -// Calls are cheap and this function is not blocking. -func (m *Migrate) stop() bool { - if m.isGracefulStop { - return true - } - - select { - case <-m.GracefulStop: - m.isGracefulStop = true - return true - - default: - return false - } -} - -// newMigration is a helper func that returns a *Migration for the -// specified version and targetVersion. -func (m *Migrate) newMigration(version uint, targetVersion int) (*Migration, error) { - var migr *Migration - - if targetVersion >= int(version) { - r, identifier, err := m.sourceDrv.ReadUp(version) - if os.IsNotExist(err) { - // create "empty" migration - migr, err = NewMigration(nil, "", version, targetVersion) - if err != nil { - return nil, err - } - - } else if err != nil { - return nil, err - - } else { - // create migration from up source - migr, err = NewMigration(r, identifier, version, targetVersion) - if err != nil { - return nil, err - } - } - - } else { - r, identifier, err := m.sourceDrv.ReadDown(version) - if os.IsNotExist(err) { - // create "empty" migration - migr, err = NewMigration(nil, "", version, targetVersion) - if err != nil { - return nil, err - } - - } else if err != nil { - return nil, err - - } else { - // create migration from down source - migr, err = NewMigration(r, identifier, version, targetVersion) - if err != nil { - return nil, err - } - } - } - - if m.PrefetchMigrations > 0 && migr.Body != nil { - m.logVerbosePrintf("Start buffering %v\n", migr.LogString()) - } else { - m.logVerbosePrintf("Scheduled %v\n", migr.LogString()) - } - - return migr, nil -} - -// lock is a thread safe helper function to lock the database. -// It should be called as late as possible when running migrations. -func (m *Migrate) lock() error { - m.isLockedMu.Lock() - defer m.isLockedMu.Unlock() - - if m.isLocked { - return ErrLocked - } - - // create done channel, used in the timeout goroutine - done := make(chan bool, 1) - defer func() { - done <- true - }() - - // use errchan to signal error back to this context - errchan := make(chan error, 2) - - // start timeout goroutine - timeout := time.After(m.LockTimeout) - go func() { - for { - select { - case <-done: - return - case <-timeout: - errchan <- ErrLockTimeout - return - } - } - }() - - // now try to acquire the lock - go func() { - if err := m.databaseDrv.Lock(); err != nil { - errchan <- err - } else { - errchan <- nil - } - return - }() - - // wait until we either recieve ErrLockTimeout or error from Lock operation - err := <-errchan - if err == nil { - m.isLocked = true - } - return err -} - -// unlock is a thread safe helper function to unlock the database. -// It should be called as early as possible when no more migrations are -// expected to be executed. -func (m *Migrate) unlock() error { - m.isLockedMu.Lock() - defer m.isLockedMu.Unlock() - - if err := m.databaseDrv.Unlock(); err != nil { - // BUG: Can potentially create a deadlock. Add a timeout. - return err - } - - m.isLocked = false - return nil -} - -// unlockErr calls unlock and returns a combined error -// if a prevErr is not nil. -func (m *Migrate) unlockErr(prevErr error) error { - if err := m.unlock(); err != nil { - return NewMultiError(prevErr, err) - } - return prevErr -} - -// logPrintf writes to m.Log if not nil -func (m *Migrate) logPrintf(format string, v ...interface{}) { - if m.Log != nil { - m.Log.Printf(format, v...) - } -} - -// logVerbosePrintf writes to m.Log if not nil. Use for verbose logging output. -func (m *Migrate) logVerbosePrintf(format string, v ...interface{}) { - if m.Log != nil && m.Log.Verbose() { - m.Log.Printf(format, v...) - } -} diff --git a/vendor/github.com/rdallman/migrate/migrate_test.go b/vendor/github.com/rdallman/migrate/migrate_test.go deleted file mode 100644 index 0ec4bce21..000000000 --- a/vendor/github.com/rdallman/migrate/migrate_test.go +++ /dev/null @@ -1,941 +0,0 @@ -package migrate - -import ( - "bytes" - "database/sql" - "io/ioutil" - "log" - "os" - "testing" - - dStub "github.com/mattes/migrate/database/stub" - "github.com/mattes/migrate/source" - sStub "github.com/mattes/migrate/source/stub" -) - -// sourceStubMigrations hold the following migrations: -// u = up migration, d = down migration, n = version -// | 1 | - | 3 | 4 | 5 | - | 7 | -// | u d | - | u | u d | d | - | u d | -var sourceStubMigrations *source.Migrations - -func init() { - sourceStubMigrations = source.NewMigrations() - sourceStubMigrations.Append(&source.Migration{Version: 1, Direction: source.Up}) - sourceStubMigrations.Append(&source.Migration{Version: 1, Direction: source.Down}) - sourceStubMigrations.Append(&source.Migration{Version: 3, Direction: source.Up}) - sourceStubMigrations.Append(&source.Migration{Version: 4, Direction: source.Up}) - sourceStubMigrations.Append(&source.Migration{Version: 4, Direction: source.Down}) - sourceStubMigrations.Append(&source.Migration{Version: 5, Direction: source.Down}) - sourceStubMigrations.Append(&source.Migration{Version: 7, Direction: source.Up}) - sourceStubMigrations.Append(&source.Migration{Version: 7, Direction: source.Down}) -} - -type DummyInstance struct{ Name string } - -func TestNew(t *testing.T) { - m, err := New("stub://", "stub://") - if err != nil { - t.Fatal(err) - } - - if m.sourceName != "stub" { - t.Errorf("expected stub, got %v", m.sourceName) - } - if m.sourceDrv == nil { - t.Error("expected sourceDrv not to be nil") - } - - if m.databaseName != "stub" { - t.Errorf("expected stub, got %v", m.databaseName) - } - if m.databaseDrv == nil { - t.Error("expected databaseDrv not to be nil") - } -} - -func ExampleNew() { - // Read migrations from /home/mattes/migrations and connect to a local postgres database. - m, err := New("file:///home/mattes/migrations", "postgres://mattes:secret@localhost:5432/database?sslmode=disable") - if err != nil { - log.Fatal(err) - } - - // Migrate all the way up ... - if err := m.Up(); err != nil { - log.Fatal(err) - } -} - -func TestNewWithDatabaseInstance(t *testing.T) { - dummyDb := &DummyInstance{"database"} - dbInst, err := dStub.WithInstance(dummyDb, &dStub.Config{}) - if err != nil { - t.Fatal(err) - } - - m, err := NewWithDatabaseInstance("stub://", "stub", dbInst) - if err != nil { - t.Fatal(err) - } - - if m.sourceName != "stub" { - t.Errorf("expected stub, got %v", m.sourceName) - } - if m.sourceDrv == nil { - t.Error("expected sourceDrv not to be nil") - } - - if m.databaseName != "stub" { - t.Errorf("expected stub, got %v", m.databaseName) - } - if m.databaseDrv == nil { - t.Error("expected databaseDrv not to be nil") - } -} - -func ExampleNewWithDatabaseInstance() { - // Create and use an existing database instance. - db, err := sql.Open("postgres", "postgres://mattes:secret@localhost:5432/database?sslmode=disable") - if err != nil { - log.Fatal(err) - } - defer db.Close() - - // Create driver instance from db. - // Check each driver if it supports the WithInstance function. - // `import "github.com/mattes/migrate/database/postgres"` - instance, err := dStub.WithInstance(db, &dStub.Config{}) - if err != nil { - log.Fatal(err) - } - - // Read migrations from /home/mattes/migrations and connect to a local postgres database. - m, err := NewWithDatabaseInstance("file:///home/mattes/migrations", "postgres", instance) - if err != nil { - log.Fatal(err) - } - - // Migrate all the way up ... - if err := m.Up(); err != nil { - log.Fatal(err) - } -} - -func TestNewWithSourceInstance(t *testing.T) { - dummySource := &DummyInstance{"source"} - sInst, err := sStub.WithInstance(dummySource, &sStub.Config{}) - if err != nil { - t.Fatal(err) - } - - m, err := NewWithSourceInstance("stub", sInst, "stub://") - if err != nil { - t.Fatal(err) - } - - if m.sourceName != "stub" { - t.Errorf("expected stub, got %v", m.sourceName) - } - if m.sourceDrv == nil { - t.Error("expected sourceDrv not to be nil") - } - - if m.databaseName != "stub" { - t.Errorf("expected stub, got %v", m.databaseName) - } - if m.databaseDrv == nil { - t.Error("expected databaseDrv not to be nil") - } -} - -func ExampleNewWithSourceInstance() { - di := &DummyInstance{"think any client required for a source here"} - - // Create driver instance from DummyInstance di. - // Check each driver if it support the WithInstance function. - // `import "github.com/mattes/migrate/source/stub"` - instance, err := sStub.WithInstance(di, &sStub.Config{}) - if err != nil { - log.Fatal(err) - } - - // Read migrations from Stub and connect to a local postgres database. - m, err := NewWithSourceInstance("stub", instance, "postgres://mattes:secret@localhost:5432/database?sslmode=disable") - if err != nil { - log.Fatal(err) - } - - // Migrate all the way up ... - if err := m.Up(); err != nil { - log.Fatal(err) - } -} - -func TestNewWithInstance(t *testing.T) { - dummyDb := &DummyInstance{"database"} - dbInst, err := dStub.WithInstance(dummyDb, &dStub.Config{}) - if err != nil { - t.Fatal(err) - } - - dummySource := &DummyInstance{"source"} - sInst, err := sStub.WithInstance(dummySource, &sStub.Config{}) - if err != nil { - t.Fatal(err) - } - - m, err := NewWithInstance("stub", sInst, "stub", dbInst) - if err != nil { - t.Fatal(err) - } - - if m.sourceName != "stub" { - t.Errorf("expected stub, got %v", m.sourceName) - } - if m.sourceDrv == nil { - t.Error("expected sourceDrv not to be nil") - } - - if m.databaseName != "stub" { - t.Errorf("expected stub, got %v", m.databaseName) - } - if m.databaseDrv == nil { - t.Error("expected databaseDrv not to be nil") - } -} - -func ExampleNewWithInstance() { - // See NewWithDatabaseInstance and NewWithSourceInstance for an example. -} - -func TestClose(t *testing.T) { - m, _ := New("stub://", "stub://") - sourceErr, databaseErr := m.Close() - if sourceErr != nil { - t.Error(sourceErr) - } - if databaseErr != nil { - t.Error(databaseErr) - } -} - -func TestMigrate(t *testing.T) { - m, _ := New("stub://", "stub://") - m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations - dbDrv := m.databaseDrv.(*dStub.Stub) - seq := newMigSeq() - - tt := []struct { - version uint - expectErr error - expectVersion uint - expectSeq migrationSequence - }{ - // migrate all the way Up in single steps - {version: 0, expectErr: os.ErrNotExist}, - {version: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(1))}, - {version: 2, expectErr: os.ErrNotExist}, - {version: 3, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(3))}, - {version: 4, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(4))}, - {version: 5, expectErr: nil, expectVersion: 5, expectSeq: seq.add()}, // 5 has no up migration - {version: 6, expectErr: os.ErrNotExist}, - {version: 7, expectErr: nil, expectVersion: 7, expectSeq: seq.add(M(7))}, - {version: 8, expectErr: os.ErrNotExist}, - - // migrate all the way Down in single steps - {version: 6, expectErr: os.ErrNotExist}, - {version: 5, expectErr: nil, expectVersion: 5, expectSeq: seq.add(M(7, 5))}, - {version: 4, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(5, 4))}, - {version: 3, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(4, 3))}, - {version: 2, expectErr: os.ErrNotExist}, - {version: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add()}, // 3 has no down migration - {version: 0, expectErr: os.ErrNotExist}, - - // migrate all the way Up in one step - {version: 7, expectErr: nil, expectVersion: 7, expectSeq: seq.add(M(3), M(4), M(7))}, - - // migrate all the way Down in one step - {version: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1))}, - - // can't migrate the same version twice - {version: 1, expectErr: ErrNoChange}, - } - - for i, v := range tt { - err := m.Migrate(v.version) - if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || - (v.expectErr != os.ErrNotExist && err != v.expectErr) { - t.Errorf("expected err %v, got %v, in %v", v.expectErr, err, i) - - } else if err == nil { - version, _, err := m.Version() - if err != nil { - t.Error(err) - } - if version != v.expectVersion { - t.Errorf("expected version %v, got %v, in %v", v.expectVersion, version, i) - } - equalDbSeq(t, i, v.expectSeq, dbDrv) - } - } -} - -func TestMigrateDirty(t *testing.T) { - m, _ := New("stub://", "stub://") - dbDrv := m.databaseDrv.(*dStub.Stub) - if err := dbDrv.SetVersion(0, true); err != nil { - t.Fatal(err) - } - - err := m.Migrate(1) - if _, ok := err.(ErrDirty); !ok { - t.Fatalf("expected ErrDirty, got %v", err) - } -} - -func TestSteps(t *testing.T) { - m, _ := New("stub://", "stub://") - m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations - dbDrv := m.databaseDrv.(*dStub.Stub) - seq := newMigSeq() - - tt := []struct { - n int - expectErr error - expectVersion int - expectSeq migrationSequence - }{ - // step must be != 0 - {n: 0, expectErr: ErrNoChange}, - - // can't go Down if ErrNilVersion - {n: -1, expectErr: os.ErrNotExist}, - - // migrate all the way Up - {n: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(1))}, - {n: 1, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(3))}, - {n: 1, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(4))}, - {n: 1, expectErr: nil, expectVersion: 5, expectSeq: seq.add()}, - {n: 1, expectErr: nil, expectVersion: 7, expectSeq: seq.add(M(7))}, - {n: 1, expectErr: os.ErrNotExist}, - - // migrate all the way Down - {n: -1, expectErr: nil, expectVersion: 5, expectSeq: seq.add(M(7, 5))}, - {n: -1, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(5, 4))}, - {n: -1, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(4, 3))}, - {n: -1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(3, 1))}, - {n: -1, expectErr: nil, expectVersion: -1, expectSeq: seq.add(M(1, -1))}, - - // migrate Up in bigger step - {n: 4, expectErr: nil, expectVersion: 5, expectSeq: seq.add(M(1), M(3), M(4), M(5))}, - - // apply one migration, then reaches out of boundary - {n: 2, expectErr: ErrShortLimit{1}, expectVersion: 7, expectSeq: seq.add(M(7))}, - - // migrate Down in bigger step - {n: -4, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1))}, - - // apply one migration, then reaches out of boundary - {n: -2, expectErr: ErrShortLimit{1}, expectVersion: -1, expectSeq: seq.add(M(1, -1))}, - } - - for i, v := range tt { - err := m.Steps(v.n) - if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || - (v.expectErr != os.ErrNotExist && err != v.expectErr) { - t.Errorf("expected err %v, got %v, in %v", v.expectErr, err, i) - - } else if err == nil { - version, _, err := m.Version() - if err != ErrNilVersion && err != nil { - t.Error(err) - } - if v.expectVersion == -1 && err != ErrNilVersion { - t.Errorf("expected ErrNilVersion, got %v, in %v", version, i) - - } else if v.expectVersion >= 0 && version != uint(v.expectVersion) { - t.Errorf("expected version %v, got %v, in %v", v.expectVersion, version, i) - } - equalDbSeq(t, i, v.expectSeq, dbDrv) - } - } -} - -func TestStepsDirty(t *testing.T) { - m, _ := New("stub://", "stub://") - dbDrv := m.databaseDrv.(*dStub.Stub) - if err := dbDrv.SetVersion(0, true); err != nil { - t.Fatal(err) - } - - err := m.Steps(1) - if _, ok := err.(ErrDirty); !ok { - t.Fatalf("expected ErrDirty, got %v", err) - } -} - -func TestUpAndDown(t *testing.T) { - m, _ := New("stub://", "stub://") - m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations - dbDrv := m.databaseDrv.(*dStub.Stub) - seq := newMigSeq() - - // go Up first - if err := m.Up(); err != nil { - t.Fatal(err) - } - equalDbSeq(t, 0, seq.add(M(1), M(3), M(4), M(5), M(7)), dbDrv) - - // go Down - if err := m.Down(); err != nil { - t.Fatal(err) - } - equalDbSeq(t, 1, seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1)), dbDrv) - - // go 1 Up and then all the way Up - if err := m.Steps(1); err != nil { - t.Fatal(err) - } - if err := m.Up(); err != nil { - t.Fatal(err) - } - equalDbSeq(t, 2, seq.add(M(1), M(3), M(4), M(5), M(7)), dbDrv) - - // go 1 Down and then all the way Down - if err := m.Steps(-1); err != nil { - t.Fatal(err) - } - if err := m.Down(); err != nil { - t.Fatal(err) - } - equalDbSeq(t, 0, seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1)), dbDrv) -} - -func TestUpDirty(t *testing.T) { - m, _ := New("stub://", "stub://") - dbDrv := m.databaseDrv.(*dStub.Stub) - if err := dbDrv.SetVersion(0, true); err != nil { - t.Fatal(err) - } - - err := m.Up() - if _, ok := err.(ErrDirty); !ok { - t.Fatalf("expected ErrDirty, got %v", err) - } -} - -func TestDownDirty(t *testing.T) { - m, _ := New("stub://", "stub://") - dbDrv := m.databaseDrv.(*dStub.Stub) - if err := dbDrv.SetVersion(0, true); err != nil { - t.Fatal(err) - } - - err := m.Down() - if _, ok := err.(ErrDirty); !ok { - t.Fatalf("expected ErrDirty, got %v", err) - } -} - -func TestDrop(t *testing.T) { - m, _ := New("stub://", "stub://") - m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations - dbDrv := m.databaseDrv.(*dStub.Stub) - - if err := m.Drop(); err != nil { - t.Fatal(err) - } - - if dbDrv.MigrationSequence[len(dbDrv.MigrationSequence)-1] != dStub.DROP { - t.Fatalf("expected database to DROP, got sequence %v", dbDrv.MigrationSequence) - } -} - -func TestVersion(t *testing.T) { - m, _ := New("stub://", "stub://") - dbDrv := m.databaseDrv.(*dStub.Stub) - - _, _, err := m.Version() - if err != ErrNilVersion { - t.Fatalf("expected ErrNilVersion, got %v", err) - } - - if err := dbDrv.Run(bytes.NewBufferString("1_up")); err != nil { - t.Fatal(err) - } - - if err := dbDrv.SetVersion(1, false); err != nil { - t.Fatal(err) - } - - v, _, err := m.Version() - if err != nil { - t.Fatal(err) - } - - if v != 1 { - t.Fatalf("expected version 1, got %v", v) - } -} - -func TestRun(t *testing.T) { - m, _ := New("stub://", "stub://") - - mx, err := NewMigration(nil, "", 1, 2) - if err != nil { - t.Fatal(err) - } - - if err := m.Run(mx); err != nil { - t.Fatal(err) - } - - v, _, err := m.Version() - if err != nil { - t.Fatal(err) - } - - if v != 2 { - t.Errorf("expected version 2, got %v", v) - } -} - -func TestRunDirty(t *testing.T) { - m, _ := New("stub://", "stub://") - dbDrv := m.databaseDrv.(*dStub.Stub) - if err := dbDrv.SetVersion(0, true); err != nil { - t.Fatal(err) - } - - migr, err := NewMigration(nil, "", 1, 2) - if err != nil { - t.Fatal(err) - } - - err = m.Run(migr) - if _, ok := err.(ErrDirty); !ok { - t.Fatalf("expected ErrDirty, got %v", err) - } -} - -func TestForce(t *testing.T) { - m, _ := New("stub://", "stub://") - m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations - - if err := m.Force(7); err != nil { - t.Fatal(err) - } - - v, dirty, err := m.Version() - if err != nil { - t.Fatal(err) - } - if dirty { - t.Errorf("expected dirty to be false") - } - if v != 7 { - t.Errorf("expected version to be 7") - } -} - -func TestForceDirty(t *testing.T) { - m, _ := New("stub://", "stub://") - dbDrv := m.databaseDrv.(*dStub.Stub) - if err := dbDrv.SetVersion(0, true); err != nil { - t.Fatal(err) - } - - if err := m.Force(1); err != nil { - t.Fatal(err) - } -} - -func TestRead(t *testing.T) { - m, _ := New("stub://", "stub://") - m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations - - tt := []struct { - from int - to int - expectErr error - expectMigrations migrationSequence - }{ - {from: -1, to: -1, expectErr: ErrNoChange}, - {from: -1, to: 0, expectErr: os.ErrNotExist}, - {from: -1, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(1))}, - {from: -1, to: 2, expectErr: os.ErrNotExist}, - {from: -1, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3))}, - {from: -1, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4))}, - {from: -1, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4), M(5))}, - {from: -1, to: 6, expectErr: os.ErrNotExist}, - {from: -1, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4), M(5), M(7))}, - {from: -1, to: 8, expectErr: os.ErrNotExist}, - - {from: 0, to: -1, expectErr: os.ErrNotExist}, - {from: 0, to: 0, expectErr: os.ErrNotExist}, - {from: 0, to: 1, expectErr: os.ErrNotExist}, - {from: 0, to: 2, expectErr: os.ErrNotExist}, - {from: 0, to: 3, expectErr: os.ErrNotExist}, - {from: 0, to: 4, expectErr: os.ErrNotExist}, - {from: 0, to: 5, expectErr: os.ErrNotExist}, - {from: 0, to: 6, expectErr: os.ErrNotExist}, - {from: 0, to: 7, expectErr: os.ErrNotExist}, - {from: 0, to: 8, expectErr: os.ErrNotExist}, - - {from: 1, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(1, -1))}, - {from: 1, to: 0, expectErr: os.ErrNotExist}, - {from: 1, to: 1, expectErr: ErrNoChange}, - {from: 1, to: 2, expectErr: os.ErrNotExist}, - {from: 1, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(3))}, - {from: 1, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4))}, - {from: 1, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4), M(5))}, - {from: 1, to: 6, expectErr: os.ErrNotExist}, - {from: 1, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4), M(5), M(7))}, - {from: 1, to: 8, expectErr: os.ErrNotExist}, - - {from: 2, to: -1, expectErr: os.ErrNotExist}, - {from: 2, to: 0, expectErr: os.ErrNotExist}, - {from: 2, to: 1, expectErr: os.ErrNotExist}, - {from: 2, to: 2, expectErr: os.ErrNotExist}, - {from: 2, to: 3, expectErr: os.ErrNotExist}, - {from: 2, to: 4, expectErr: os.ErrNotExist}, - {from: 2, to: 5, expectErr: os.ErrNotExist}, - {from: 2, to: 6, expectErr: os.ErrNotExist}, - {from: 2, to: 7, expectErr: os.ErrNotExist}, - {from: 2, to: 8, expectErr: os.ErrNotExist}, - - {from: 3, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1), M(1, -1))}, - {from: 3, to: 0, expectErr: os.ErrNotExist}, - {from: 3, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1))}, - {from: 3, to: 2, expectErr: os.ErrNotExist}, - {from: 3, to: 3, expectErr: ErrNoChange}, - {from: 3, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(4))}, - {from: 3, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5))}, - {from: 3, to: 6, expectErr: os.ErrNotExist}, - {from: 3, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5), M(7))}, - {from: 3, to: 8, expectErr: os.ErrNotExist}, - - {from: 4, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1), M(1, -1))}, - {from: 4, to: 0, expectErr: os.ErrNotExist}, - {from: 4, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1))}, - {from: 4, to: 2, expectErr: os.ErrNotExist}, - {from: 4, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(4, 3))}, - {from: 4, to: 4, expectErr: ErrNoChange}, - {from: 4, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(5))}, - {from: 4, to: 6, expectErr: os.ErrNotExist}, - {from: 4, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(5), M(7))}, - {from: 4, to: 8, expectErr: os.ErrNotExist}, - - {from: 5, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3), M(3, 1), M(1, -1))}, - {from: 5, to: 0, expectErr: os.ErrNotExist}, - {from: 5, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3), M(3, 1))}, - {from: 5, to: 2, expectErr: os.ErrNotExist}, - {from: 5, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3))}, - {from: 5, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(5, 4))}, - {from: 5, to: 5, expectErr: ErrNoChange}, - {from: 5, to: 6, expectErr: os.ErrNotExist}, - {from: 5, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(7))}, - {from: 5, to: 8, expectErr: os.ErrNotExist}, - - {from: 6, to: -1, expectErr: os.ErrNotExist}, - {from: 6, to: 0, expectErr: os.ErrNotExist}, - {from: 6, to: 1, expectErr: os.ErrNotExist}, - {from: 6, to: 2, expectErr: os.ErrNotExist}, - {from: 6, to: 3, expectErr: os.ErrNotExist}, - {from: 6, to: 4, expectErr: os.ErrNotExist}, - {from: 6, to: 5, expectErr: os.ErrNotExist}, - {from: 6, to: 6, expectErr: os.ErrNotExist}, - {from: 6, to: 7, expectErr: os.ErrNotExist}, - {from: 6, to: 8, expectErr: os.ErrNotExist}, - - {from: 7, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1))}, - {from: 7, to: 0, expectErr: os.ErrNotExist}, - {from: 7, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3), M(3, 1))}, - {from: 7, to: 2, expectErr: os.ErrNotExist}, - {from: 7, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3))}, - {from: 7, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4))}, - {from: 7, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(7, 5))}, - {from: 7, to: 6, expectErr: os.ErrNotExist}, - {from: 7, to: 7, expectErr: ErrNoChange}, - {from: 7, to: 8, expectErr: os.ErrNotExist}, - - {from: 8, to: -1, expectErr: os.ErrNotExist}, - {from: 8, to: 0, expectErr: os.ErrNotExist}, - {from: 8, to: 1, expectErr: os.ErrNotExist}, - {from: 8, to: 2, expectErr: os.ErrNotExist}, - {from: 8, to: 3, expectErr: os.ErrNotExist}, - {from: 8, to: 4, expectErr: os.ErrNotExist}, - {from: 8, to: 5, expectErr: os.ErrNotExist}, - {from: 8, to: 6, expectErr: os.ErrNotExist}, - {from: 8, to: 7, expectErr: os.ErrNotExist}, - {from: 8, to: 8, expectErr: os.ErrNotExist}, - } - - for i, v := range tt { - ret := make(chan interface{}) - go m.read(v.from, v.to, ret) - migrations, err := migrationsFromChannel(ret) - - if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || - (v.expectErr != os.ErrNotExist && v.expectErr != err) { - t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) - t.Logf("%v, in %v", migrations, i) - } - if len(v.expectMigrations) > 0 { - equalMigSeq(t, i, v.expectMigrations, migrations) - } - } -} - -func TestReadUp(t *testing.T) { - m, _ := New("stub://", "stub://") - m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations - - tt := []struct { - from int - limit int // -1 means no limit - expectErr error - expectMigrations migrationSequence - }{ - {from: -1, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4), M(5), M(7))}, - {from: -1, limit: 0, expectErr: ErrNoChange}, - {from: -1, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(1))}, - {from: -1, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3))}, - - {from: 0, limit: -1, expectErr: os.ErrNotExist}, - {from: 0, limit: 0, expectErr: os.ErrNotExist}, - {from: 0, limit: 1, expectErr: os.ErrNotExist}, - {from: 0, limit: 2, expectErr: os.ErrNotExist}, - - {from: 1, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4), M(5), M(7))}, - {from: 1, limit: 0, expectErr: ErrNoChange}, - {from: 1, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(3))}, - {from: 1, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4))}, - - {from: 2, limit: -1, expectErr: os.ErrNotExist}, - {from: 2, limit: 0, expectErr: os.ErrNotExist}, - {from: 2, limit: 1, expectErr: os.ErrNotExist}, - {from: 2, limit: 2, expectErr: os.ErrNotExist}, - - {from: 3, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5), M(7))}, - {from: 3, limit: 0, expectErr: ErrNoChange}, - {from: 3, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(4))}, - {from: 3, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5))}, - - {from: 4, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(5), M(7))}, - {from: 4, limit: 0, expectErr: ErrNoChange}, - {from: 4, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(5))}, - {from: 4, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(5), M(7))}, - - {from: 5, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(7))}, - {from: 5, limit: 0, expectErr: ErrNoChange}, - {from: 5, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(7))}, - {from: 5, limit: 2, expectErr: ErrShortLimit{1}, expectMigrations: newMigSeq(M(7))}, - - {from: 6, limit: -1, expectErr: os.ErrNotExist}, - {from: 6, limit: 0, expectErr: os.ErrNotExist}, - {from: 6, limit: 1, expectErr: os.ErrNotExist}, - {from: 6, limit: 2, expectErr: os.ErrNotExist}, - - {from: 7, limit: -1, expectErr: ErrNoChange}, - {from: 7, limit: 0, expectErr: ErrNoChange}, - {from: 7, limit: 1, expectErr: os.ErrNotExist}, - {from: 7, limit: 2, expectErr: os.ErrNotExist}, - - {from: 8, limit: -1, expectErr: os.ErrNotExist}, - {from: 8, limit: 0, expectErr: os.ErrNotExist}, - {from: 8, limit: 1, expectErr: os.ErrNotExist}, - {from: 8, limit: 2, expectErr: os.ErrNotExist}, - } - - for i, v := range tt { - ret := make(chan interface{}) - go m.readUp(v.from, v.limit, ret) - migrations, err := migrationsFromChannel(ret) - - if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || - (v.expectErr != os.ErrNotExist && v.expectErr != err) { - t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) - t.Logf("%v, in %v", migrations, i) - } - if len(v.expectMigrations) > 0 { - equalMigSeq(t, i, v.expectMigrations, migrations) - } - } -} - -func TestReadDown(t *testing.T) { - m, _ := New("stub://", "stub://") - m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations - - tt := []struct { - from int - limit int // -1 means no limit - expectErr error - expectMigrations migrationSequence - }{ - {from: -1, limit: -1, expectErr: ErrNoChange}, - {from: -1, limit: 0, expectErr: ErrNoChange}, - {from: -1, limit: 1, expectErr: os.ErrNotExist}, - {from: -1, limit: 2, expectErr: os.ErrNotExist}, - - {from: 0, limit: -1, expectErr: os.ErrNotExist}, - {from: 0, limit: 0, expectErr: os.ErrNotExist}, - {from: 0, limit: 1, expectErr: os.ErrNotExist}, - {from: 0, limit: 2, expectErr: os.ErrNotExist}, - - {from: 1, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(1, -1))}, - {from: 1, limit: 0, expectErr: ErrNoChange}, - {from: 1, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(1, -1))}, - {from: 1, limit: 2, expectErr: ErrShortLimit{1}, expectMigrations: newMigSeq(M(1, -1))}, - - {from: 2, limit: -1, expectErr: os.ErrNotExist}, - {from: 2, limit: 0, expectErr: os.ErrNotExist}, - {from: 2, limit: 1, expectErr: os.ErrNotExist}, - {from: 2, limit: 2, expectErr: os.ErrNotExist}, - - {from: 3, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1), M(1, -1))}, - {from: 3, limit: 0, expectErr: ErrNoChange}, - {from: 3, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1))}, - {from: 3, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(3, 1), M(1, -1))}, - - {from: 4, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1), M(1, -1))}, - {from: 4, limit: 0, expectErr: ErrNoChange}, - {from: 4, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3))}, - {from: 4, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1))}, - - {from: 5, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3), M(3, 1), M(1, -1))}, - {from: 5, limit: 0, expectErr: ErrNoChange}, - {from: 5, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4))}, - {from: 5, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3))}, - - {from: 6, limit: -1, expectErr: os.ErrNotExist}, - {from: 6, limit: 0, expectErr: os.ErrNotExist}, - {from: 6, limit: 1, expectErr: os.ErrNotExist}, - {from: 6, limit: 2, expectErr: os.ErrNotExist}, - - {from: 7, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1))}, - {from: 7, limit: 0, expectErr: ErrNoChange}, - {from: 7, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5))}, - {from: 7, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4))}, - - {from: 8, limit: -1, expectErr: os.ErrNotExist}, - {from: 8, limit: 0, expectErr: os.ErrNotExist}, - {from: 8, limit: 1, expectErr: os.ErrNotExist}, - {from: 8, limit: 2, expectErr: os.ErrNotExist}, - } - - for i, v := range tt { - ret := make(chan interface{}) - go m.readDown(v.from, v.limit, ret) - migrations, err := migrationsFromChannel(ret) - - if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || - (v.expectErr != os.ErrNotExist && v.expectErr != err) { - t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) - t.Logf("%v, in %v", migrations, i) - } - if len(v.expectMigrations) > 0 { - equalMigSeq(t, i, v.expectMigrations, migrations) - } - } -} - -func TestLock(t *testing.T) { - m, _ := New("stub://", "stub://") - if err := m.lock(); err != nil { - t.Fatal(err) - } - - if err := m.lock(); err == nil { - t.Fatal("should be locked already") - } -} - -func migrationsFromChannel(ret chan interface{}) ([]*Migration, error) { - slice := make([]*Migration, 0) - for r := range ret { - switch r.(type) { - case error: - return slice, r.(error) - - case *Migration: - slice = append(slice, r.(*Migration)) - } - } - return slice, nil -} - -type migrationSequence []*Migration - -func newMigSeq(migr ...*Migration) migrationSequence { - return migr -} - -func (m *migrationSequence) add(migr ...*Migration) migrationSequence { - *m = append(*m, migr...) - return *m -} - -func (m *migrationSequence) bodySequence() []string { - r := make([]string, 0) - for _, v := range *m { - if v.Body != nil { - body, err := ioutil.ReadAll(v.Body) - if err != nil { - panic(err) // that should never happen - } - - // reset body reader - // TODO: is there a better/nicer way? - v.Body = ioutil.NopCloser(bytes.NewReader(body)) - - r = append(r, string(body[:])) - } - } - return r -} - -// M is a convenience func to create a new *Migration -func M(version uint, targetVersion ...int) *Migration { - if len(targetVersion) > 1 { - panic("only one targetVersion allowed") - } - ts := int(version) - if len(targetVersion) == 1 { - ts = targetVersion[0] - } - - m, _ := New("stub://", "stub://") - m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations - migr, err := m.newMigration(version, ts) - if err != nil { - panic(err) - } - return migr -} - -func equalMigSeq(t *testing.T, i int, expected, got migrationSequence) { - if len(expected) != len(got) { - t.Errorf("expected migrations %v, got %v, in %v", expected, got, i) - - } else { - for ii := 0; ii < len(expected); ii++ { - if expected[ii].Version != got[ii].Version { - t.Errorf("expected version %v, got %v, in %v", expected[ii].Version, got[ii].Version, i) - } - - if expected[ii].TargetVersion != got[ii].TargetVersion { - t.Errorf("expected targetVersion %v, got %v, in %v", expected[ii].TargetVersion, got[ii].TargetVersion, i) - } - } - } -} - -func equalDbSeq(t *testing.T, i int, expected migrationSequence, got *dStub.Stub) { - bs := expected.bodySequence() - if !got.EqualSequence(bs) { - t.Fatalf("\nexpected sequence %v,\ngot %v, in %v", bs, got.MigrationSequence, i) - } -} diff --git a/vendor/github.com/rdallman/migrate/migration.go b/vendor/github.com/rdallman/migrate/migration.go deleted file mode 100644 index 069e7f038..000000000 --- a/vendor/github.com/rdallman/migrate/migration.go +++ /dev/null @@ -1,154 +0,0 @@ -package migrate - -import ( - "bufio" - "fmt" - "io" - "time" -) - -// DefaultBufferSize sets the in memory buffer size (in Bytes) for every -// pre-read migration (see DefaultPrefetchMigrations). -var DefaultBufferSize = uint(100000) - -// Migration holds information about a migration. -// It is initially created from data coming from the source and then -// used when run against the database. -type Migration struct { - // Identifier can be any string to help identifying - // the migration in the source. - Identifier string - - // Version is the version of this migration. - Version uint - - // TargetVersion is the migration version after this migration - // has been applied to the database. - // Can be -1, implying that this is a NilVersion. - TargetVersion int - - // Body holds an io.ReadCloser to the source. - Body io.ReadCloser - - // BufferedBody holds an buffered io.Reader to the underlying Body. - BufferedBody io.Reader - - // BufferSize defaults to DefaultBufferSize - BufferSize uint - - // bufferWriter holds an io.WriteCloser and pipes to BufferBody. - // It's an *Closer for flow control. - bufferWriter io.WriteCloser - - // Scheduled is the time when the migration was scheduled/ queued. - Scheduled time.Time - - // StartedBuffering is the time when buffering of the migration source started. - StartedBuffering time.Time - - // FinishedBuffering is the time when buffering of the migration source finished. - FinishedBuffering time.Time - - // FinishedReading is the time when the migration source is fully read. - FinishedReading time.Time - - // BytesRead holds the number of Bytes read from the migration source. - BytesRead int64 -} - -// NewMigration returns a new Migration and sets the body, identifier, -// version and targetVersion. Body can be nil, which turns this migration -// into a "NilMigration". If no identifier is provided, it will default to "". -// targetVersion can be -1, implying it is a NilVersion. -// -// What is a NilMigration? -// Usually each migration version coming from source is expected to have an -// Up and Down migration. This is not a hard requirement though, leading to -// a situation where only the Up or Down migration is present. So let's say -// the user wants to migrate up to a version that doesn't have the actual Up -// migration, in that case we still want to apply the version, but with an empty -// body. We are calling that a NilMigration, a migration with an empty body. -// -// What is a NilVersion? -// NilVersion is a const(-1). When running down migrations and we are at the -// last down migration, there is no next down migration, the targetVersion should -// be nil. Nil in this case is represented by -1 (because type int). -func NewMigration(body io.ReadCloser, identifier string, - version uint, targetVersion int) (*Migration, error) { - tnow := time.Now() - m := &Migration{ - Identifier: identifier, - Version: version, - TargetVersion: targetVersion, - Scheduled: tnow, - } - - if body == nil { - if len(identifier) == 0 { - m.Identifier = "" - } - - m.StartedBuffering = tnow - m.FinishedBuffering = tnow - m.FinishedReading = tnow - return m, nil - } - - br, bw := io.Pipe() - m.Body = body // want to simulate low latency? newSlowReader(body) - m.BufferSize = DefaultBufferSize - m.BufferedBody = br - m.bufferWriter = bw - return m, nil -} - -// String implements string.Stringer and is used in tests. -func (m *Migration) String() string { - return fmt.Sprintf("%v [%v=>%v]", m.Identifier, m.Version, m.TargetVersion) -} - -// LogString returns a string describing this migration to humans. -func (m *Migration) LogString() string { - directionStr := "u" - if m.TargetVersion < int(m.Version) { - directionStr = "d" - } - return fmt.Sprintf("%v/%v %v", m.Version, directionStr, m.Identifier) -} - -// Buffer buffers Body up to BufferSize. -// Calling this function blocks. Call with goroutine. -func (m *Migration) Buffer() error { - if m.Body == nil { - return nil - } - - m.StartedBuffering = time.Now() - - b := bufio.NewReaderSize(m.Body, int(m.BufferSize)) - - // start reading from body, peek won't move the read pointer though - // poor man's solution? - b.Peek(int(m.BufferSize)) - - m.FinishedBuffering = time.Now() - - // write to bufferWriter, this will block until - // something starts reading from m.Buffer - n, err := b.WriteTo(m.bufferWriter) - if err != nil { - return err - } - - m.FinishedReading = time.Now() - m.BytesRead = n - - // close bufferWriter so Buffer knows that there is no - // more data coming - m.bufferWriter.Close() - - // it's safe to close the Body too - m.Body.Close() - - return nil -} diff --git a/vendor/github.com/rdallman/migrate/migration_test.go b/vendor/github.com/rdallman/migrate/migration_test.go deleted file mode 100644 index b6589f938..000000000 --- a/vendor/github.com/rdallman/migrate/migration_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package migrate - -import ( - "fmt" - "io/ioutil" - "log" - "strings" -) - -func ExampleNewMigration() { - // Create a dummy migration body, this is coming from the source usually. - body := ioutil.NopCloser(strings.NewReader("dumy migration that creates users table")) - - // Create a new Migration that represents version 1486686016. - // Once this migration has been applied to the database, the new - // migration version will be 1486689359. - migr, err := NewMigration(body, "create_users_table", 1486686016, 1486689359) - if err != nil { - log.Fatal(err) - } - - fmt.Print(migr.LogString()) - // Output: - // 1486686016/u create_users_table -} - -func ExampleNewMigration_nilMigration() { - // Create a new Migration that represents a NilMigration. - // Once this migration has been applied to the database, the new - // migration version will be 1486689359. - migr, err := NewMigration(nil, "", 1486686016, 1486689359) - if err != nil { - log.Fatal(err) - } - - fmt.Print(migr.LogString()) - // Output: - // 1486686016/u -} - -func ExampleNewMigration_nilVersion() { - // Create a dummy migration body, this is coming from the source usually. - body := ioutil.NopCloser(strings.NewReader("dumy migration that deletes users table")) - - // Create a new Migration that represents version 1486686016. - // This is the last available down migration, so the migration version - // will be -1, meaning NilVersion once this migration ran. - migr, err := NewMigration(body, "drop_users_table", 1486686016, -1) - if err != nil { - log.Fatal(err) - } - - fmt.Print(migr.LogString()) - // Output: - // 1486686016/d drop_users_table -} diff --git a/vendor/github.com/rdallman/migrate/source/aws-s3/README.md b/vendor/github.com/rdallman/migrate/source/aws-s3/README.md deleted file mode 100644 index 3a59cfec9..000000000 --- a/vendor/github.com/rdallman/migrate/source/aws-s3/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# aws-s3 - -`s3:///` diff --git a/vendor/github.com/rdallman/migrate/source/aws-s3/s3.go b/vendor/github.com/rdallman/migrate/source/aws-s3/s3.go deleted file mode 100644 index 8b581402c..000000000 --- a/vendor/github.com/rdallman/migrate/source/aws-s3/s3.go +++ /dev/null @@ -1,125 +0,0 @@ -package awss3 - -import ( - "fmt" - "io" - "net/url" - "os" - "path" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3iface" - "github.com/mattes/migrate/source" -) - -func init() { - source.Register("s3", &s3Driver{}) -} - -type s3Driver struct { - s3client s3iface.S3API - bucket string - prefix string - migrations *source.Migrations -} - -func (s *s3Driver) Open(folder string) (source.Driver, error) { - u, err := url.Parse(folder) - if err != nil { - return nil, err - } - sess, err := session.NewSession() - if err != nil { - return nil, err - } - driver := s3Driver{ - bucket: u.Host, - prefix: strings.Trim(u.Path, "/") + "/", - s3client: s3.New(sess), - migrations: source.NewMigrations(), - } - err = driver.loadMigrations() - if err != nil { - return nil, err - } - return &driver, nil -} - -func (s *s3Driver) loadMigrations() error { - output, err := s.s3client.ListObjects(&s3.ListObjectsInput{ - Bucket: aws.String(s.bucket), - Prefix: aws.String(s.prefix), - Delimiter: aws.String("/"), - }) - if err != nil { - return err - } - for _, object := range output.Contents { - _, fileName := path.Split(aws.StringValue(object.Key)) - m, err := source.DefaultParse(fileName) - if err != nil { - continue - } - if !s.migrations.Append(m) { - return fmt.Errorf("unable to parse file %v", aws.StringValue(object.Key)) - } - } - return nil -} - -func (s *s3Driver) Close() error { - return nil -} - -func (s *s3Driver) First() (uint, error) { - v, ok := s.migrations.First() - if !ok { - return 0, os.ErrNotExist - } - return v, nil -} - -func (s *s3Driver) Prev(version uint) (uint, error) { - v, ok := s.migrations.Prev(version) - if !ok { - return 0, os.ErrNotExist - } - return v, nil -} - -func (s *s3Driver) Next(version uint) (uint, error) { - v, ok := s.migrations.Next(version) - if !ok { - return 0, os.ErrNotExist - } - return v, nil -} - -func (s *s3Driver) ReadUp(version uint) (io.ReadCloser, string, error) { - if m, ok := s.migrations.Up(version); ok { - return s.open(m) - } - return nil, "", os.ErrNotExist -} - -func (s *s3Driver) ReadDown(version uint) (io.ReadCloser, string, error) { - if m, ok := s.migrations.Down(version); ok { - return s.open(m) - } - return nil, "", os.ErrNotExist -} - -func (s *s3Driver) open(m *source.Migration) (io.ReadCloser, string, error) { - key := path.Join(s.prefix, m.Raw) - object, err := s.s3client.GetObject(&s3.GetObjectInput{ - Bucket: aws.String(s.bucket), - Key: aws.String(key), - }) - if err != nil { - return nil, "", err - } - return object.Body, m.Identifier, nil -} diff --git a/vendor/github.com/rdallman/migrate/source/aws-s3/s3_test.go b/vendor/github.com/rdallman/migrate/source/aws-s3/s3_test.go deleted file mode 100644 index f07d7ff2c..000000000 --- a/vendor/github.com/rdallman/migrate/source/aws-s3/s3_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package awss3 - -import ( - "errors" - "io/ioutil" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/mattes/migrate/source" - st "github.com/mattes/migrate/source/testing" -) - -func Test(t *testing.T) { - s3Client := fakeS3{ - bucket: "some-bucket", - objects: map[string]string{ - "staging/migrations/1_foobar.up.sql": "1 up", - "staging/migrations/1_foobar.down.sql": "1 down", - "prod/migrations/1_foobar.up.sql": "1 up", - "prod/migrations/1_foobar.down.sql": "1 down", - "prod/migrations/3_foobar.up.sql": "3 up", - "prod/migrations/4_foobar.up.sql": "4 up", - "prod/migrations/4_foobar.down.sql": "4 down", - "prod/migrations/5_foobar.down.sql": "5 down", - "prod/migrations/7_foobar.up.sql": "7 up", - "prod/migrations/7_foobar.down.sql": "7 down", - "prod/migrations/not-a-migration.txt": "", - "prod/migrations/0-random-stuff/whatever.txt": "", - }, - } - driver := s3Driver{ - bucket: "some-bucket", - prefix: "prod/migrations/", - migrations: source.NewMigrations(), - s3client: &s3Client, - } - err := driver.loadMigrations() - if err != nil { - t.Fatal(err) - } - st.Test(t, &driver) -} - -type fakeS3 struct { - s3.S3 - bucket string - objects map[string]string -} - -func (s *fakeS3) ListObjects(input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) { - bucket := aws.StringValue(input.Bucket) - if bucket != s.bucket { - return nil, errors.New("bucket not found") - } - prefix := aws.StringValue(input.Prefix) - delimiter := aws.StringValue(input.Delimiter) - var output s3.ListObjectsOutput - for name := range s.objects { - if strings.HasPrefix(name, prefix) { - if delimiter == "" || !strings.Contains(strings.Replace(name, prefix, "", 1), delimiter) { - output.Contents = append(output.Contents, &s3.Object{ - Key: aws.String(name), - }) - } - } - } - return &output, nil -} - -func (s *fakeS3) GetObject(input *s3.GetObjectInput) (*s3.GetObjectOutput, error) { - bucket := aws.StringValue(input.Bucket) - if bucket != s.bucket { - return nil, errors.New("bucket not found") - } - if data, ok := s.objects[aws.StringValue(input.Key)]; ok { - body := ioutil.NopCloser(strings.NewReader(data)) - return &s3.GetObjectOutput{Body: body}, nil - } - return nil, errors.New("object not found") -} diff --git a/vendor/github.com/rdallman/migrate/source/driver.go b/vendor/github.com/rdallman/migrate/source/driver.go deleted file mode 100644 index b9c052c16..000000000 --- a/vendor/github.com/rdallman/migrate/source/driver.go +++ /dev/null @@ -1,107 +0,0 @@ -// Package source provides the Source interface. -// All source drivers must implement this interface, register themselves, -// optionally provide a `WithInstance` function and pass the tests -// in package source/testing. -package source - -import ( - "fmt" - "io" - nurl "net/url" - "sync" -) - -var driversMu sync.RWMutex -var drivers = make(map[string]Driver) - -// Driver is the interface every source driver must implement. -// -// How to implement a source driver? -// 1. Implement this interface. -// 2. Optionally, add a function named `WithInstance`. -// This function should accept an existing source instance and a Config{} struct -// and return a driver instance. -// 3. Add a test that calls source/testing.go:Test() -// 4. Add own tests for Open(), WithInstance() (when provided) and Close(). -// All other functions are tested by tests in source/testing. -// Saves you some time and makes sure all source drivers behave the same way. -// 5. Call Register in init(). -// -// Guidelines: -// * All configuration input must come from the URL string in func Open() -// or the Config{} struct in WithInstance. Don't os.Getenv(). -// * Drivers are supposed to be read only. -// * Ideally don't load any contents (into memory) in Open or WithInstance. -type Driver interface { - // Open returns a a new driver instance configured with parameters - // coming from the URL string. Migrate will call this function - // only once per instance. - Open(url string) (Driver, error) - - // Close closes the underlying source instance managed by the driver. - // Migrate will call this function only once per instance. - Close() error - - // First returns the very first migration version available to the driver. - // Migrate will call this function multiple times. - // If there is no version available, it must return os.ErrNotExist. - First() (version uint, err error) - - // Prev returns the previous version for a given version available to the driver. - // Migrate will call this function multiple times. - // If there is no previous version available, it must return os.ErrNotExist. - Prev(version uint) (prevVersion uint, err error) - - // Next returns the next version for a given version available to the driver. - // Migrate will call this function multiple times. - // If there is no next version available, it must return os.ErrNotExist. - Next(version uint) (nextVersion uint, err error) - - // ReadUp returns the UP migration body and an identifier that helps - // finding this migration in the source for a given version. - // If there is no up migration available for this version, - // it must return os.ErrNotExist. - // Do not start reading, just return the ReadCloser! - ReadUp(version uint) (r io.ReadCloser, identifier string, err error) - - // ReadDown returns the DOWN migration body and an identifier that helps - // finding this migration in the source for a given version. - // If there is no down migration available for this version, - // it must return os.ErrNotExist. - // Do not start reading, just return the ReadCloser! - ReadDown(version uint) (r io.ReadCloser, identifier string, err error) -} - -// Open returns a new driver instance. -func Open(url string) (Driver, error) { - u, err := nurl.Parse(url) - if err != nil { - return nil, err - } - - if u.Scheme == "" { - return nil, fmt.Errorf("source driver: invalid URL scheme") - } - - driversMu.RLock() - d, ok := drivers[u.Scheme] - driversMu.RUnlock() - if !ok { - return nil, fmt.Errorf("source driver: unknown driver %v (forgotten import?)", u.Scheme) - } - - return d.Open(url) -} - -// Register globally registers a driver. -func Register(name string, driver Driver) { - driversMu.Lock() - defer driversMu.Unlock() - if driver == nil { - panic("Register driver is nil") - } - if _, dup := drivers[name]; dup { - panic("Register called twice for driver " + name) - } - drivers[name] = driver -} diff --git a/vendor/github.com/rdallman/migrate/source/driver_test.go b/vendor/github.com/rdallman/migrate/source/driver_test.go deleted file mode 100644 index 82284a0b9..000000000 --- a/vendor/github.com/rdallman/migrate/source/driver_test.go +++ /dev/null @@ -1,8 +0,0 @@ -package source - -func ExampleDriver() { - // see source/stub for an example - - // source/stub/stub.go has the driver implementation - // source/stub/stub_test.go runs source/testing/test.go:Test -} diff --git a/vendor/github.com/rdallman/migrate/source/file/README.md b/vendor/github.com/rdallman/migrate/source/file/README.md deleted file mode 100644 index 7912eff66..000000000 --- a/vendor/github.com/rdallman/migrate/source/file/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# file - -`file:///absolute/path` -`file://relative/path` diff --git a/vendor/github.com/rdallman/migrate/source/file/file.go b/vendor/github.com/rdallman/migrate/source/file/file.go deleted file mode 100644 index b97d0aa3d..000000000 --- a/vendor/github.com/rdallman/migrate/source/file/file.go +++ /dev/null @@ -1,127 +0,0 @@ -package file - -import ( - "fmt" - "io" - "io/ioutil" - nurl "net/url" - "os" - "path" - "path/filepath" - - "github.com/mattes/migrate/source" -) - -func init() { - source.Register("file", &File{}) -} - -type File struct { - url string - path string - migrations *source.Migrations -} - -func (f *File) Open(url string) (source.Driver, error) { - u, err := nurl.Parse(url) - if err != nil { - return nil, err - } - - // concat host and path to restore full path - // host might be `.` - p := u.Host + u.Path - - if len(p) == 0 { - // default to current directory if no path - wd, err := os.Getwd() - if err != nil { - return nil, err - } - p = wd - - } else if p[0:1] == "." || p[0:1] != "/" { - // make path absolute if relative - abs, err := filepath.Abs(p) - if err != nil { - return nil, err - } - p = abs - } - - // scan directory - files, err := ioutil.ReadDir(p) - if err != nil { - return nil, err - } - - nf := &File{ - url: url, - path: p, - migrations: source.NewMigrations(), - } - - for _, fi := range files { - if !fi.IsDir() { - m, err := source.DefaultParse(fi.Name()) - if err != nil { - continue // ignore files that we can't parse - } - if !nf.migrations.Append(m) { - return nil, fmt.Errorf("unable to parse file %v", fi.Name()) - } - } - } - return nf, nil -} - -func (f *File) Close() error { - // nothing do to here - return nil -} - -func (f *File) First() (version uint, err error) { - if v, ok := f.migrations.First(); !ok { - return 0, &os.PathError{"first", f.path, os.ErrNotExist} - } else { - return v, nil - } -} - -func (f *File) Prev(version uint) (prevVersion uint, err error) { - if v, ok := f.migrations.Prev(version); !ok { - return 0, &os.PathError{fmt.Sprintf("prev for version %v", version), f.path, os.ErrNotExist} - } else { - return v, nil - } -} - -func (f *File) Next(version uint) (nextVersion uint, err error) { - if v, ok := f.migrations.Next(version); !ok { - return 0, &os.PathError{fmt.Sprintf("next for version %v", version), f.path, os.ErrNotExist} - } else { - return v, nil - } -} - -func (f *File) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) { - if m, ok := f.migrations.Up(version); ok { - r, err := os.Open(path.Join(f.path, m.Raw)) - if err != nil { - return nil, "", err - } - return r, m.Identifier, nil - } - return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), f.path, os.ErrNotExist} -} - -func (f *File) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) { - if m, ok := f.migrations.Down(version); ok { - r, err := os.Open(path.Join(f.path, m.Raw)) - if err != nil { - return nil, "", err - } - return r, m.Identifier, nil - } - return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), f.path, os.ErrNotExist} -} diff --git a/vendor/github.com/rdallman/migrate/source/file/file_test.go b/vendor/github.com/rdallman/migrate/source/file/file_test.go deleted file mode 100644 index 310131c6f..000000000 --- a/vendor/github.com/rdallman/migrate/source/file/file_test.go +++ /dev/null @@ -1,207 +0,0 @@ -package file - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "path/filepath" - "testing" - - st "github.com/mattes/migrate/source/testing" -) - -func Test(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - // write files that meet driver test requirements - mustWriteFile(t, tmpDir, "1_foobar.up.sql", "1 up") - mustWriteFile(t, tmpDir, "1_foobar.down.sql", "1 down") - - mustWriteFile(t, tmpDir, "3_foobar.up.sql", "3 up") - - mustWriteFile(t, tmpDir, "4_foobar.up.sql", "4 up") - mustWriteFile(t, tmpDir, "4_foobar.down.sql", "4 down") - - mustWriteFile(t, tmpDir, "5_foobar.down.sql", "5 down") - - mustWriteFile(t, tmpDir, "7_foobar.up.sql", "7 up") - mustWriteFile(t, tmpDir, "7_foobar.down.sql", "7 down") - - f := &File{} - d, err := f.Open("file://" + tmpDir) - if err != nil { - t.Fatal(err) - } - - st.Test(t, d) -} - -func TestOpen(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "TestOpen") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - mustWriteFile(t, tmpDir, "1_foobar.up.sql", "") - mustWriteFile(t, tmpDir, "1_foobar.down.sql", "") - - if !filepath.IsAbs(tmpDir) { - t.Fatal("expected tmpDir to be absolute path") - } - - f := &File{} - _, err = f.Open("file://" + tmpDir) // absolute path - if err != nil { - t.Fatal(err) - } -} - -func TestOpenWithRelativePath(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "TestOpen") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - wd, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - defer os.Chdir(wd) // rescue working dir after we are done - - if err := os.Chdir(tmpDir); err != nil { - t.Fatal(err) - } - - if err := os.Mkdir(filepath.Join(tmpDir, "foo"), os.ModePerm); err != nil { - t.Fatal(err) - } - - mustWriteFile(t, filepath.Join(tmpDir, "foo"), "1_foobar.up.sql", "") - - f := &File{} - - // dir: foo - d, err := f.Open("file://foo") - if err != nil { - t.Fatal(err) - } - _, err = d.First() - if err != nil { - t.Fatalf("expected first file in working dir %v for foo", tmpDir) - } - - // dir: ./foo - d, err = f.Open("file://./foo") - if err != nil { - t.Fatal(err) - } - _, err = d.First() - if err != nil { - t.Fatalf("expected first file in working dir %v for ./foo", tmpDir) - } -} - -func TestOpenDefaultsToCurrentDirectory(t *testing.T) { - wd, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - f := &File{} - d, err := f.Open("file://") - if err != nil { - t.Fatal(err) - } - - if d.(*File).path != wd { - t.Fatal("expected driver to default to current directory") - } -} - -func TestOpenWithDuplicateVersion(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "TestOpenWithDuplicateVersion") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - mustWriteFile(t, tmpDir, "1_foo.up.sql", "") // 1 up - mustWriteFile(t, tmpDir, "1_bar.up.sql", "") // 1 up - - f := &File{} - _, err = f.Open("file://" + tmpDir) - if err == nil { - t.Fatal("expected err") - } -} - -func TestClose(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "TestOpen") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - f := &File{} - d, err := f.Open("file://" + tmpDir) - if err != nil { - t.Fatal(err) - } - - if d.Close() != nil { - t.Fatal("expected nil") - } -} - -func mustWriteFile(t testing.TB, dir, file string, body string) { - if err := ioutil.WriteFile(path.Join(dir, file), []byte(body), 06444); err != nil { - t.Fatal(err) - } -} - -func mustCreateBenchmarkDir(t *testing.B) (dir string) { - tmpDir, err := ioutil.TempDir("", "Benchmark") - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 1000; i++ { - mustWriteFile(t, tmpDir, fmt.Sprintf("%v_foobar.up.sql", i), "") - mustWriteFile(t, tmpDir, fmt.Sprintf("%v_foobar.down.sql", i), "") - } - - return tmpDir -} - -func BenchmarkOpen(b *testing.B) { - dir := mustCreateBenchmarkDir(b) - defer os.RemoveAll(dir) - b.ResetTimer() - for n := 0; n < b.N; n++ { - f := &File{} - f.Open("file://" + dir) - } - b.StopTimer() -} - -func BenchmarkNext(b *testing.B) { - dir := mustCreateBenchmarkDir(b) - defer os.RemoveAll(dir) - f := &File{} - d, _ := f.Open("file://" + dir) - b.ResetTimer() - v, err := d.First() - for n := 0; n < b.N; n++ { - for !os.IsNotExist(err) { - v, err = d.Next(v) - } - } - b.StopTimer() -} diff --git a/vendor/github.com/rdallman/migrate/source/github/.gitignore b/vendor/github.com/rdallman/migrate/source/github/.gitignore deleted file mode 100644 index 3006ad5eb..000000000 --- a/vendor/github.com/rdallman/migrate/source/github/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.github_test_secrets diff --git a/vendor/github.com/rdallman/migrate/source/github/README.md b/vendor/github.com/rdallman/migrate/source/github/README.md deleted file mode 100644 index 257f575c4..000000000 --- a/vendor/github.com/rdallman/migrate/source/github/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# github - -`github://user:personal-access-token@owner/repo/path` - -| URL Query | WithInstance Config | Description | -|------------|---------------------|-------------| -| user | | The username of the user connecting | -| personal-access-token | | An access token from Github (https://github.com/settings/tokens) | -| owner | | the repo owner | -| repo | | the name of the repository | -| path | | path in repo to migrations | diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1085649617_create_users_table.down.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1085649617_create_users_table.down.sql deleted file mode 100644 index c99ddcdc8..000000000 --- a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1085649617_create_users_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS users; diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1085649617_create_users_table.up.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1085649617_create_users_table.up.sql deleted file mode 100644 index 92897dcab..000000000 --- a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1085649617_create_users_table.up.sql +++ /dev/null @@ -1,5 +0,0 @@ -CREATE TABLE users ( - user_id integer unique, - name varchar(40), - email varchar(40) -); diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1185749658_add_city_to_users.down.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1185749658_add_city_to_users.down.sql deleted file mode 100644 index 940c60712..000000000 --- a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1185749658_add_city_to_users.down.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE users DROP COLUMN IF EXISTS city; diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1185749658_add_city_to_users.up.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1185749658_add_city_to_users.up.sql deleted file mode 100644 index 67823edc9..000000000 --- a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1185749658_add_city_to_users.up.sql +++ /dev/null @@ -1,3 +0,0 @@ -ALTER TABLE users ADD COLUMN city varchar(100); - - diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.down.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.down.sql deleted file mode 100644 index 3e87dd229..000000000 --- a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP INDEX IF EXISTS users_email_index; diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.up.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.up.sql deleted file mode 100644 index fbeb4ab4e..000000000 --- a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.up.sql +++ /dev/null @@ -1,3 +0,0 @@ -CREATE UNIQUE INDEX CONCURRENTLY users_email_index ON users (email); - --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1385949617_create_books_table.down.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1385949617_create_books_table.down.sql deleted file mode 100644 index 1a0b1a214..000000000 --- a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1385949617_create_books_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS books; diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1385949617_create_books_table.up.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1385949617_create_books_table.up.sql deleted file mode 100644 index f1503b518..000000000 --- a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1385949617_create_books_table.up.sql +++ /dev/null @@ -1,5 +0,0 @@ -CREATE TABLE books ( - user_id integer, - name varchar(40), - author varchar(40) -); diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1485949617_create_movies_table.down.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1485949617_create_movies_table.down.sql deleted file mode 100644 index 3a5187689..000000000 --- a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1485949617_create_movies_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS movies; diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1485949617_create_movies_table.up.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1485949617_create_movies_table.up.sql deleted file mode 100644 index f0ef5943b..000000000 --- a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1485949617_create_movies_table.up.sql +++ /dev/null @@ -1,5 +0,0 @@ -CREATE TABLE movies ( - user_id integer, - name varchar(40), - director varchar(40) -); diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1585849751_just_a_comment.up.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1585849751_just_a_comment.up.sql deleted file mode 100644 index 9b6b57a61..000000000 --- a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1585849751_just_a_comment.up.sql +++ /dev/null @@ -1 +0,0 @@ --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1685849751_another_comment.up.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1685849751_another_comment.up.sql deleted file mode 100644 index 9b6b57a61..000000000 --- a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1685849751_another_comment.up.sql +++ /dev/null @@ -1 +0,0 @@ --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1785849751_another_comment.up.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1785849751_another_comment.up.sql deleted file mode 100644 index 9b6b57a61..000000000 --- a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1785849751_another_comment.up.sql +++ /dev/null @@ -1 +0,0 @@ --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1885849751_another_comment.up.sql b/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1885849751_another_comment.up.sql deleted file mode 100644 index 9b6b57a61..000000000 --- a/vendor/github.com/rdallman/migrate/source/github/examples/migrations/1885849751_another_comment.up.sql +++ /dev/null @@ -1 +0,0 @@ --- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/github.com/rdallman/migrate/source/github/github.go b/vendor/github.com/rdallman/migrate/source/github/github.go deleted file mode 100644 index d534ed37b..000000000 --- a/vendor/github.com/rdallman/migrate/source/github/github.go +++ /dev/null @@ -1,180 +0,0 @@ -package github - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - nurl "net/url" - "os" - "path" - "strings" - - "github.com/google/go-github/github" - "github.com/mattes/migrate/source" -) - -func init() { - source.Register("github", &Github{}) -} - -var ( - ErrNoUserInfo = fmt.Errorf("no username:token provided") - ErrNoAccessToken = fmt.Errorf("no access token") - ErrInvalidRepo = fmt.Errorf("invalid repo") - ErrInvalidGithubClient = fmt.Errorf("expected *github.Client") - ErrNoDir = fmt.Errorf("no directory") -) - -type Github struct { - client *github.Client - url string - - pathOwner string - pathRepo string - path string - migrations *source.Migrations -} - -type Config struct { -} - -func (g *Github) Open(url string) (source.Driver, error) { - u, err := nurl.Parse(url) - if err != nil { - return nil, err - } - - if u.User == nil { - return nil, ErrNoUserInfo - } - - password, ok := u.User.Password() - if !ok { - return nil, ErrNoUserInfo - } - - tr := &github.BasicAuthTransport{ - Username: u.User.Username(), - Password: password, - } - - gn := &Github{ - client: github.NewClient(tr.Client()), - url: url, - migrations: source.NewMigrations(), - } - - // set owner, repo and path in repo - gn.pathOwner = u.Host - pe := strings.Split(strings.Trim(u.Path, "/"), "/") - if len(pe) < 1 { - return nil, ErrInvalidRepo - } - gn.pathRepo = pe[0] - if len(pe) > 1 { - gn.path = strings.Join(pe[1:], "/") - } - - if err := gn.readDirectory(); err != nil { - return nil, err - } - - return gn, nil -} - -func WithInstance(client *github.Client, config *Config) (source.Driver, error) { - gn := &Github{ - client: client, - migrations: source.NewMigrations(), - } - if err := gn.readDirectory(); err != nil { - return nil, err - } - return gn, nil -} - -func (g *Github) readDirectory() error { - fileContent, dirContents, _, err := g.client.Repositories.GetContents(context.Background(), g.pathOwner, g.pathRepo, g.path, &github.RepositoryContentGetOptions{}) - if err != nil { - return err - } - if fileContent != nil { - return ErrNoDir - } - - for _, fi := range dirContents { - m, err := source.DefaultParse(*fi.Name) - if err != nil { - continue // ignore files that we can't parse - } - if !g.migrations.Append(m) { - return fmt.Errorf("unable to parse file %v", *fi.Name) - } - } - - return nil -} - -func (g *Github) Close() error { - return nil -} - -func (g *Github) First() (version uint, er error) { - if v, ok := g.migrations.First(); !ok { - return 0, &os.PathError{"first", g.path, os.ErrNotExist} - } else { - return v, nil - } -} - -func (g *Github) Prev(version uint) (prevVersion uint, err error) { - if v, ok := g.migrations.Prev(version); !ok { - return 0, &os.PathError{fmt.Sprintf("prev for version %v", version), g.path, os.ErrNotExist} - } else { - return v, nil - } -} - -func (g *Github) Next(version uint) (nextVersion uint, err error) { - if v, ok := g.migrations.Next(version); !ok { - return 0, &os.PathError{fmt.Sprintf("next for version %v", version), g.path, os.ErrNotExist} - } else { - return v, nil - } -} - -func (g *Github) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) { - if m, ok := g.migrations.Up(version); ok { - file, _, _, err := g.client.Repositories.GetContents(context.Background(), g.pathOwner, g.pathRepo, path.Join(g.path, m.Raw), &github.RepositoryContentGetOptions{}) - if err != nil { - return nil, "", err - } - if file != nil { - r, err := file.GetContent() - if err != nil { - return nil, "", err - } - return ioutil.NopCloser(bytes.NewReader([]byte(r))), m.Identifier, nil - } - } - return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), g.path, os.ErrNotExist} -} - -func (g *Github) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) { - if m, ok := g.migrations.Down(version); ok { - file, _, _, err := g.client.Repositories.GetContents(context.Background(), g.pathOwner, g.pathRepo, path.Join(g.path, m.Raw), &github.RepositoryContentGetOptions{}) - if err != nil { - return nil, "", err - } - if file != nil { - r, err := file.GetContent() - if err != nil { - return nil, "", err - } - return ioutil.NopCloser(bytes.NewReader([]byte(r))), m.Identifier, nil - } - } - return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), g.path, os.ErrNotExist} -} diff --git a/vendor/github.com/rdallman/migrate/source/github/github_test.go b/vendor/github.com/rdallman/migrate/source/github/github_test.go deleted file mode 100644 index 83e86618e..000000000 --- a/vendor/github.com/rdallman/migrate/source/github/github_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package github - -import ( - "bytes" - "io/ioutil" - "testing" - - st "github.com/mattes/migrate/source/testing" -) - -var GithubTestSecret = "" // username:token - -func init() { - secrets, err := ioutil.ReadFile(".github_test_secrets") - if err == nil { - GithubTestSecret = string(bytes.TrimSpace(secrets)[:]) - } -} - -func Test(t *testing.T) { - if len(GithubTestSecret) == 0 { - t.Skip("test requires .github_test_secrets") - } - - g := &Github{} - d, err := g.Open("github://" + GithubTestSecret + "@mattes/migrate_test_tmp/test") - if err != nil { - t.Fatal(err) - } - - st.Test(t, d) -} diff --git a/vendor/github.com/rdallman/migrate/source/go-bindata/README.md b/vendor/github.com/rdallman/migrate/source/go-bindata/README.md deleted file mode 100644 index cd9dd4b78..000000000 --- a/vendor/github.com/rdallman/migrate/source/go-bindata/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# go-bindata - -## Usage - - - -### Read bindata with NewWithSourceInstance - -```shell -go get -u github.com/jteeuwen/go-bindata/... -cd examples/migrations && go-bindata -pkg migrations . -``` - -```go -import ( - "github.com/mattes/migrate" - "github.com/mattes/migrate/source/go-bindata" - "github.com/mattes/migrate/source/go-bindata/examples/migrations" -) - -func main() { - // wrap assets into Resource - s := bindata.Resource(migrations.AssetNames(), - func(name string) ([]byte, error) { - return migrations.Asset(name) - }) - - d, err := bindata.WithInstance(s) - m, err := migrate.NewWithSourceInstance("go-bindata", d, "database://foobar") - m.Up() // run your migrations and handle the errors above of course -} -``` - -### Read bindata with URL (todo) - -This will restore the assets in a tmp directory and then -proxy to source/file. go-bindata must be in your `$PATH`. - -``` -migrate -source go-bindata://examples/migrations/bindata.go -``` - - diff --git a/vendor/github.com/rdallman/migrate/source/go-bindata/examples/migrations/bindata.go b/vendor/github.com/rdallman/migrate/source/go-bindata/examples/migrations/bindata.go deleted file mode 100644 index 282d5ef54..000000000 --- a/vendor/github.com/rdallman/migrate/source/go-bindata/examples/migrations/bindata.go +++ /dev/null @@ -1,304 +0,0 @@ -// Code generated by go-bindata. -// sources: -// 1085649617_create_users_table.down.sql -// 1085649617_create_users_table.up.sql -// 1185749658_add_city_to_users.down.sql -// 1185749658_add_city_to_users.up.sql -// DO NOT EDIT! - -package testdata - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -func bindataRead(data []byte, name string) ([]byte, error) { - gz, err := gzip.NewReader(bytes.NewBuffer(data)) - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - - var buf bytes.Buffer - _, err = io.Copy(&buf, gz) - clErr := gz.Close() - - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - if clErr != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -type asset struct { - bytes []byte - info os.FileInfo -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -func (fi bindataFileInfo) Name() string { - return fi.name -} -func (fi bindataFileInfo) Size() int64 { - return fi.size -} -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} -func (fi bindataFileInfo) IsDir() bool { - return false -} -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var __1085649617_create_users_tableDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\xf0\x74\x53\x70\x8d\xf0\x0c\x0e\x09\x56\x28\x2d\x4e\x2d\x2a\xb6\xe6\x02\x04\x00\x00\xff\xff\x2c\x02\x3d\xa7\x1c\x00\x00\x00") - -func _1085649617_create_users_tableDownSqlBytes() ([]byte, error) { - return bindataRead( - __1085649617_create_users_tableDownSql, - "1085649617_create_users_table.down.sql", - ) -} - -func _1085649617_create_users_tableDownSql() (*asset, error) { - bytes, err := _1085649617_create_users_tableDownSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "1085649617_create_users_table.down.sql", size: 28, mode: os.FileMode(420), modTime: time.Unix(1485750305, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __1085649617_create_users_tableUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x72\x0e\x72\x75\x0c\x71\x55\x08\x71\x74\xf2\x71\x55\x28\x2d\x4e\x2d\x2a\x56\xd0\xe0\x52\x00\xb3\xe2\x33\x53\x14\x32\xf3\x4a\x52\xd3\x53\x8b\x14\x4a\xf3\x32\x0b\x4b\x53\x75\xb8\x14\x14\xf2\x12\x73\x53\x15\x14\x14\x14\xca\x12\x8b\x92\x33\x12\x8b\x34\x4c\x0c\x34\x41\xc2\xa9\xb9\x89\x99\x39\xa8\xc2\x5c\x9a\xd6\x5c\x80\x00\x00\x00\xff\xff\xa3\x57\xbc\x0b\x5f\x00\x00\x00") - -func _1085649617_create_users_tableUpSqlBytes() ([]byte, error) { - return bindataRead( - __1085649617_create_users_tableUpSql, - "1085649617_create_users_table.up.sql", - ) -} - -func _1085649617_create_users_tableUpSql() (*asset, error) { - bytes, err := _1085649617_create_users_tableUpSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "1085649617_create_users_table.up.sql", size: 95, mode: os.FileMode(420), modTime: time.Unix(1485803085, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __1185749658_add_city_to_usersDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x2d\x4e\x2d\x2a\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x09\xf5\xf5\x53\xf0\x74\x53\x70\x8d\xf0\x0c\x0e\x09\x56\x48\xce\x2c\xa9\xb4\xe6\x02\x04\x00\x00\xff\xff\xb7\x52\x88\xd7\x2e\x00\x00\x00") - -func _1185749658_add_city_to_usersDownSqlBytes() ([]byte, error) { - return bindataRead( - __1185749658_add_city_to_usersDownSql, - "1185749658_add_city_to_users.down.sql", - ) -} - -func _1185749658_add_city_to_usersDownSql() (*asset, error) { - bytes, err := _1185749658_add_city_to_usersDownSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "1185749658_add_city_to_users.down.sql", size: 46, mode: os.FileMode(420), modTime: time.Unix(1485750443, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __1185749658_add_city_to_usersUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x2d\x4e\x2d\x2a\x56\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x53\x48\xce\x2c\xa9\x54\x28\x4b\x2c\x4a\xce\x48\x2c\xd2\x30\x34\x30\xd0\xb4\xe6\xe2\xe2\x02\x04\x00\x00\xff\xff\xa8\x0f\x49\xc6\x32\x00\x00\x00") - -func _1185749658_add_city_to_usersUpSqlBytes() ([]byte, error) { - return bindataRead( - __1185749658_add_city_to_usersUpSql, - "1185749658_add_city_to_users.up.sql", - ) -} - -func _1185749658_add_city_to_usersUpSql() (*asset, error) { - bytes, err := _1185749658_add_city_to_usersUpSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "1185749658_add_city_to_users.up.sql", size: 50, mode: os.FileMode(420), modTime: time.Unix(1485843733, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "1085649617_create_users_table.down.sql": _1085649617_create_users_tableDownSql, - "1085649617_create_users_table.up.sql": _1085649617_create_users_tableUpSql, - "1185749658_add_city_to_users.down.sql": _1185749658_add_city_to_usersDownSql, - "1185749658_add_city_to_users.up.sql": _1185749658_add_city_to_usersUpSql, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"} -// AssetDir("data/img") would return []string{"a.png", "b.png"} -// AssetDir("foo.txt") and AssetDir("notexist") would return an error -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - cannonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(cannonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} -var _bintree = &bintree{nil, map[string]*bintree{ - "1085649617_create_users_table.down.sql": &bintree{_1085649617_create_users_tableDownSql, map[string]*bintree{}}, - "1085649617_create_users_table.up.sql": &bintree{_1085649617_create_users_tableUpSql, map[string]*bintree{}}, - "1185749658_add_city_to_users.down.sql": &bintree{_1185749658_add_city_to_usersDownSql, map[string]*bintree{}}, - "1185749658_add_city_to_users.up.sql": &bintree{_1185749658_add_city_to_usersUpSql, map[string]*bintree{}}, -}} - -// RestoreAsset restores an asset under the given directory -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) - if err != nil { - return err - } - return nil -} - -// RestoreAssets restores an asset under the given directory recursively -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - cannonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) -} - diff --git a/vendor/github.com/rdallman/migrate/source/go-bindata/go-bindata.go b/vendor/github.com/rdallman/migrate/source/go-bindata/go-bindata.go deleted file mode 100644 index 7426db71b..000000000 --- a/vendor/github.com/rdallman/migrate/source/go-bindata/go-bindata.go +++ /dev/null @@ -1,119 +0,0 @@ -package bindata - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/mattes/migrate/source" -) - -type AssetFunc func(name string) ([]byte, error) - -func Resource(names []string, afn AssetFunc) *AssetSource { - return &AssetSource{ - Names: names, - AssetFunc: afn, - } -} - -type AssetSource struct { - Names []string - AssetFunc AssetFunc -} - -func init() { - source.Register("go-bindata", &Bindata{}) -} - -type Bindata struct { - path string - assetSource *AssetSource - migrations *source.Migrations -} - -func (b *Bindata) Open(url string) (source.Driver, error) { - return nil, fmt.Errorf("not yet implemented") -} - -var ( - ErrNoAssetSource = fmt.Errorf("expects *AssetSource") -) - -func WithInstance(instance interface{}) (source.Driver, error) { - if _, ok := instance.(*AssetSource); !ok { - return nil, ErrNoAssetSource - } - as := instance.(*AssetSource) - - bn := &Bindata{ - path: "", - assetSource: as, - migrations: source.NewMigrations(), - } - - for _, fi := range as.Names { - m, err := source.DefaultParse(fi) - if err != nil { - continue // ignore files that we can't parse - } - - if !bn.migrations.Append(m) { - return nil, fmt.Errorf("unable to parse file %v", fi) - } - } - - return bn, nil -} - -func (b *Bindata) Close() error { - return nil -} - -func (b *Bindata) First() (version uint, err error) { - if v, ok := b.migrations.First(); !ok { - return 0, &os.PathError{"first", b.path, os.ErrNotExist} - } else { - return v, nil - } -} - -func (b *Bindata) Prev(version uint) (prevVersion uint, err error) { - if v, ok := b.migrations.Prev(version); !ok { - return 0, &os.PathError{fmt.Sprintf("prev for version %v", version), b.path, os.ErrNotExist} - } else { - return v, nil - } -} - -func (b *Bindata) Next(version uint) (nextVersion uint, err error) { - if v, ok := b.migrations.Next(version); !ok { - return 0, &os.PathError{fmt.Sprintf("next for version %v", version), b.path, os.ErrNotExist} - } else { - return v, nil - } -} - -func (b *Bindata) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) { - if m, ok := b.migrations.Up(version); ok { - body, err := b.assetSource.AssetFunc(m.Raw) - if err != nil { - return nil, "", err - } - return ioutil.NopCloser(bytes.NewReader(body)), m.Identifier, nil - } - return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), b.path, os.ErrNotExist} -} - -func (b *Bindata) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) { - if m, ok := b.migrations.Down(version); ok { - body, err := b.assetSource.AssetFunc(m.Raw) - if err != nil { - return nil, "", err - } - return ioutil.NopCloser(bytes.NewReader(body)), m.Identifier, nil - } - return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), b.path, os.ErrNotExist} -} diff --git a/vendor/github.com/rdallman/migrate/source/go-bindata/go-bindata_test.go b/vendor/github.com/rdallman/migrate/source/go-bindata/go-bindata_test.go deleted file mode 100644 index 746a7b91f..000000000 --- a/vendor/github.com/rdallman/migrate/source/go-bindata/go-bindata_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package bindata - -import ( - "testing" - - "github.com/mattes/migrate/source/go-bindata/testdata" - st "github.com/mattes/migrate/source/testing" -) - -func Test(t *testing.T) { - // wrap assets into Resource first - s := Resource(testdata.AssetNames(), - func(name string) ([]byte, error) { - return testdata.Asset(name) - }) - - d, err := WithInstance(s) - if err != nil { - t.Fatal(err) - } - st.Test(t, d) -} - -func TestWithInstance(t *testing.T) { - // wrap assets into Resource - s := Resource(testdata.AssetNames(), - func(name string) ([]byte, error) { - return testdata.Asset(name) - }) - - _, err := WithInstance(s) - if err != nil { - t.Fatal(err) - } -} - -func TestOpen(t *testing.T) { - b := &Bindata{} - _, err := b.Open("") - if err == nil { - t.Fatal("expected err, because it's not implemented yet") - } -} diff --git a/vendor/github.com/rdallman/migrate/source/go-bindata/testdata/bindata.go b/vendor/github.com/rdallman/migrate/source/go-bindata/testdata/bindata.go deleted file mode 100644 index 304f3d87c..000000000 --- a/vendor/github.com/rdallman/migrate/source/go-bindata/testdata/bindata.go +++ /dev/null @@ -1,396 +0,0 @@ -// Code generated by go-bindata. -// sources: -// 1_test.down.sql -// 1_test.up.sql -// 3_test.up.sql -// 4_test.down.sql -// 4_test.up.sql -// 5_test.down.sql -// 7_test.down.sql -// 7_test.up.sql -// DO NOT EDIT! - -package testdata - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -func bindataRead(data []byte, name string) ([]byte, error) { - gz, err := gzip.NewReader(bytes.NewBuffer(data)) - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - - var buf bytes.Buffer - _, err = io.Copy(&buf, gz) - clErr := gz.Close() - - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - if clErr != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -type asset struct { - bytes []byte - info os.FileInfo -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -func (fi bindataFileInfo) Name() string { - return fi.name -} -func (fi bindataFileInfo) Size() int64 { - return fi.size -} -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} -func (fi bindataFileInfo) IsDir() bool { - return false -} -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var __1_testDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") - -func _1_testDownSqlBytes() ([]byte, error) { - return bindataRead( - __1_testDownSql, - "1_test.down.sql", - ) -} - -func _1_testDownSql() (*asset, error) { - bytes, err := _1_testDownSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "1_test.down.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440324, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __1_testUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") - -func _1_testUpSqlBytes() ([]byte, error) { - return bindataRead( - __1_testUpSql, - "1_test.up.sql", - ) -} - -func _1_testUpSql() (*asset, error) { - bytes, err := _1_testUpSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "1_test.up.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440319, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __3_testUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") - -func _3_testUpSqlBytes() ([]byte, error) { - return bindataRead( - __3_testUpSql, - "3_test.up.sql", - ) -} - -func _3_testUpSql() (*asset, error) { - bytes, err := _3_testUpSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "3_test.up.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440331, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __4_testDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") - -func _4_testDownSqlBytes() ([]byte, error) { - return bindataRead( - __4_testDownSql, - "4_test.down.sql", - ) -} - -func _4_testDownSql() (*asset, error) { - bytes, err := _4_testDownSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "4_test.down.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440337, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __4_testUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") - -func _4_testUpSqlBytes() ([]byte, error) { - return bindataRead( - __4_testUpSql, - "4_test.up.sql", - ) -} - -func _4_testUpSql() (*asset, error) { - bytes, err := _4_testUpSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "4_test.up.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440335, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __5_testDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") - -func _5_testDownSqlBytes() ([]byte, error) { - return bindataRead( - __5_testDownSql, - "5_test.down.sql", - ) -} - -func _5_testDownSql() (*asset, error) { - bytes, err := _5_testDownSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "5_test.down.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440340, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __7_testDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") - -func _7_testDownSqlBytes() ([]byte, error) { - return bindataRead( - __7_testDownSql, - "7_test.down.sql", - ) -} - -func _7_testDownSql() (*asset, error) { - bytes, err := _7_testDownSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "7_test.down.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440343, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var __7_testUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") - -func _7_testUpSqlBytes() ([]byte, error) { - return bindataRead( - __7_testUpSql, - "7_test.up.sql", - ) -} - -func _7_testUpSql() (*asset, error) { - bytes, err := _7_testUpSqlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "7_test.up.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440347, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "1_test.down.sql": _1_testDownSql, - "1_test.up.sql": _1_testUpSql, - "3_test.up.sql": _3_testUpSql, - "4_test.down.sql": _4_testDownSql, - "4_test.up.sql": _4_testUpSql, - "5_test.down.sql": _5_testDownSql, - "7_test.down.sql": _7_testDownSql, - "7_test.up.sql": _7_testUpSql, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"} -// AssetDir("data/img") would return []string{"a.png", "b.png"} -// AssetDir("foo.txt") and AssetDir("notexist") would return an error -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - cannonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(cannonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} -var _bintree = &bintree{nil, map[string]*bintree{ - "1_test.down.sql": &bintree{_1_testDownSql, map[string]*bintree{}}, - "1_test.up.sql": &bintree{_1_testUpSql, map[string]*bintree{}}, - "3_test.up.sql": &bintree{_3_testUpSql, map[string]*bintree{}}, - "4_test.down.sql": &bintree{_4_testDownSql, map[string]*bintree{}}, - "4_test.up.sql": &bintree{_4_testUpSql, map[string]*bintree{}}, - "5_test.down.sql": &bintree{_5_testDownSql, map[string]*bintree{}}, - "7_test.down.sql": &bintree{_7_testDownSql, map[string]*bintree{}}, - "7_test.up.sql": &bintree{_7_testUpSql, map[string]*bintree{}}, -}} - -// RestoreAsset restores an asset under the given directory -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) - if err != nil { - return err - } - return nil -} - -// RestoreAssets restores an asset under the given directory recursively -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - cannonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) -} - diff --git a/vendor/github.com/rdallman/migrate/source/google-cloud-storage/README.md b/vendor/github.com/rdallman/migrate/source/google-cloud-storage/README.md deleted file mode 100644 index e61cb2311..000000000 --- a/vendor/github.com/rdallman/migrate/source/google-cloud-storage/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# google-cloud-storage - -`gcs:///` diff --git a/vendor/github.com/rdallman/migrate/source/google-cloud-storage/storage.go b/vendor/github.com/rdallman/migrate/source/google-cloud-storage/storage.go deleted file mode 100644 index c1a18bc2f..000000000 --- a/vendor/github.com/rdallman/migrate/source/google-cloud-storage/storage.go +++ /dev/null @@ -1,119 +0,0 @@ -package googlecloudstorage - -import ( - "fmt" - "io" - "net/url" - "os" - "path" - "strings" - - "cloud.google.com/go/storage" - "github.com/mattes/migrate/source" - "golang.org/x/net/context" - "google.golang.org/api/iterator" -) - -func init() { - source.Register("gcs", &gcs{}) -} - -type gcs struct { - bucket *storage.BucketHandle - prefix string - migrations *source.Migrations -} - -func (g *gcs) Open(folder string) (source.Driver, error) { - u, err := url.Parse(folder) - if err != nil { - return nil, err - } - client, err := storage.NewClient(context.Background()) - if err != nil { - return nil, err - } - driver := gcs{ - bucket: client.Bucket(u.Host), - prefix: strings.Trim(u.Path, "/") + "/", - migrations: source.NewMigrations(), - } - err = driver.loadMigrations() - if err != nil { - return nil, err - } - return &driver, nil -} - -func (g *gcs) loadMigrations() error { - iter := g.bucket.Objects(context.Background(), &storage.Query{ - Prefix: g.prefix, - Delimiter: "/", - }) - object, err := iter.Next() - for ; err == nil; object, err = iter.Next() { - _, fileName := path.Split(object.Name) - m, parseErr := source.DefaultParse(fileName) - if parseErr != nil { - continue - } - if !g.migrations.Append(m) { - return fmt.Errorf("unable to parse file %v", object.Name) - } - } - if err != iterator.Done { - return err - } - return nil -} - -func (g *gcs) Close() error { - return nil -} - -func (g *gcs) First() (uint, error) { - v, ok := g.migrations.First() - if !ok { - return 0, os.ErrNotExist - } - return v, nil -} - -func (g *gcs) Prev(version uint) (uint, error) { - v, ok := g.migrations.Prev(version) - if !ok { - return 0, os.ErrNotExist - } - return v, nil -} - -func (g *gcs) Next(version uint) (uint, error) { - v, ok := g.migrations.Next(version) - if !ok { - return 0, os.ErrNotExist - } - return v, nil -} - -func (g *gcs) ReadUp(version uint) (io.ReadCloser, string, error) { - if m, ok := g.migrations.Up(version); ok { - return g.open(m) - } - return nil, "", os.ErrNotExist -} - -func (g *gcs) ReadDown(version uint) (io.ReadCloser, string, error) { - if m, ok := g.migrations.Down(version); ok { - return g.open(m) - } - return nil, "", os.ErrNotExist -} - -func (g *gcs) open(m *source.Migration) (io.ReadCloser, string, error) { - objectPath := path.Join(g.prefix, m.Raw) - reader, err := g.bucket.Object(objectPath).NewReader(context.Background()) - if err != nil { - return nil, "", err - } - return reader, m.Identifier, nil -} diff --git a/vendor/github.com/rdallman/migrate/source/google-cloud-storage/storage_test.go b/vendor/github.com/rdallman/migrate/source/google-cloud-storage/storage_test.go deleted file mode 100644 index 2af4947cc..000000000 --- a/vendor/github.com/rdallman/migrate/source/google-cloud-storage/storage_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package googlecloudstorage - -import ( - "testing" - - "github.com/fsouza/fake-gcs-server/fakestorage" - "github.com/mattes/migrate/source" - st "github.com/mattes/migrate/source/testing" -) - -func Test(t *testing.T) { - server := fakestorage.NewServer([]fakestorage.Object{ - {BucketName: "some-bucket", Name: "staging/migrations/1_foobar.up.sql", Content: []byte("1 up")}, - {BucketName: "some-bucket", Name: "staging/migrations/1_foobar.down.sql", Content: []byte("1 down")}, - {BucketName: "some-bucket", Name: "prod/migrations/1_foobar.up.sql", Content: []byte("1 up")}, - {BucketName: "some-bucket", Name: "prod/migrations/1_foobar.down.sql", Content: []byte("1 down")}, - {BucketName: "some-bucket", Name: "prod/migrations/3_foobar.up.sql", Content: []byte("3 up")}, - {BucketName: "some-bucket", Name: "prod/migrations/4_foobar.up.sql", Content: []byte("4 up")}, - {BucketName: "some-bucket", Name: "prod/migrations/4_foobar.down.sql", Content: []byte("4 down")}, - {BucketName: "some-bucket", Name: "prod/migrations/5_foobar.down.sql", Content: []byte("5 down")}, - {BucketName: "some-bucket", Name: "prod/migrations/7_foobar.up.sql", Content: []byte("7 up")}, - {BucketName: "some-bucket", Name: "prod/migrations/7_foobar.down.sql", Content: []byte("7 down")}, - {BucketName: "some-bucket", Name: "prod/migrations/not-a-migration.txt"}, - {BucketName: "some-bucket", Name: "prod/migrations/0-random-stuff/whatever.txt"}, - }) - defer server.Stop() - driver := gcs{ - bucket: server.Client().Bucket("some-bucket"), - prefix: "prod/migrations/", - migrations: source.NewMigrations(), - } - err := driver.loadMigrations() - if err != nil { - t.Fatal(err) - } - st.Test(t, &driver) -} diff --git a/vendor/github.com/rdallman/migrate/source/migration.go b/vendor/github.com/rdallman/migrate/source/migration.go deleted file mode 100644 index 97a4ee226..000000000 --- a/vendor/github.com/rdallman/migrate/source/migration.go +++ /dev/null @@ -1,143 +0,0 @@ -package source - -import ( - "sort" -) - -// Direction is either up or down. -type Direction string - -const ( - Down Direction = "down" - Up = "up" -) - -// Migration is a helper struct for source drivers that need to -// build the full directory tree in memory. -// Migration is fully independent from migrate.Migration. -type Migration struct { - // Version is the version of this migration. - Version uint - - // Identifier can be any string that helps identifying - // this migration in the source. - Identifier string - - // Direction is either Up or Down. - Direction Direction - - // Raw holds the raw location path to this migration in source. - // ReadUp and ReadDown will use this. - Raw string -} - -// Migrations wraps Migration and has an internal index -// to keep track of Migration order. -type Migrations struct { - index uintSlice - migrations map[uint]map[Direction]*Migration -} - -func NewMigrations() *Migrations { - return &Migrations{ - index: make(uintSlice, 0), - migrations: make(map[uint]map[Direction]*Migration), - } -} - -func (i *Migrations) Append(m *Migration) (ok bool) { - if m == nil { - return false - } - - if i.migrations[m.Version] == nil { - i.migrations[m.Version] = make(map[Direction]*Migration) - } - - // reject duplicate versions - if _, dup := i.migrations[m.Version][m.Direction]; dup { - return false - } - - i.migrations[m.Version][m.Direction] = m - i.buildIndex() - - return true -} - -func (i *Migrations) buildIndex() { - i.index = make(uintSlice, 0) - for version, _ := range i.migrations { - i.index = append(i.index, version) - } - sort.Sort(i.index) -} - -func (i *Migrations) First() (version uint, ok bool) { - if len(i.index) == 0 { - return 0, false - } - return i.index[0], true -} - -func (i *Migrations) Prev(version uint) (prevVersion uint, ok bool) { - pos := i.findPos(version) - if pos >= 1 && len(i.index) > pos-1 { - return i.index[pos-1], true - } - return 0, false -} - -func (i *Migrations) Next(version uint) (nextVersion uint, ok bool) { - pos := i.findPos(version) - if pos >= 0 && len(i.index) > pos+1 { - return i.index[pos+1], true - } - return 0, false -} - -func (i *Migrations) Up(version uint) (m *Migration, ok bool) { - if _, ok := i.migrations[version]; ok { - if mx, ok := i.migrations[version][Up]; ok { - return mx, true - } - } - return nil, false -} - -func (i *Migrations) Down(version uint) (m *Migration, ok bool) { - if _, ok := i.migrations[version]; ok { - if mx, ok := i.migrations[version][Down]; ok { - return mx, true - } - } - return nil, false -} - -func (i *Migrations) findPos(version uint) int { - if len(i.index) > 0 { - ix := i.index.Search(version) - if ix < len(i.index) && i.index[ix] == version { - return ix - } - } - return -1 -} - -type uintSlice []uint - -func (s uintSlice) Len() int { - return len(s) -} - -func (s uintSlice) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s uintSlice) Less(i, j int) bool { - return s[i] < s[j] -} - -func (s uintSlice) Search(x uint) int { - return sort.Search(len(s), func(i int) bool { return s[i] >= x }) -} diff --git a/vendor/github.com/rdallman/migrate/source/migration_test.go b/vendor/github.com/rdallman/migrate/source/migration_test.go deleted file mode 100644 index 857cd26af..000000000 --- a/vendor/github.com/rdallman/migrate/source/migration_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package source - -import ( - "testing" -) - -func TestNewMigrations(t *testing.T) { - // TODO -} - -func TestAppend(t *testing.T) { - // TODO -} - -func TestBuildIndex(t *testing.T) { - // TODO -} - -func TestFirst(t *testing.T) { - // TODO -} - -func TestPrev(t *testing.T) { - // TODO -} - -func TestUp(t *testing.T) { - // TODO -} - -func TestDown(t *testing.T) { - // TODO -} - -func TestFindPos(t *testing.T) { - m := Migrations{index: uintSlice{1, 2, 3}} - if p := m.findPos(0); p != -1 { - t.Errorf("expected -1, got %v", p) - } - if p := m.findPos(1); p != 0 { - t.Errorf("expected 0, got %v", p) - } - if p := m.findPos(3); p != 2 { - t.Errorf("expected 2, got %v", p) - } -} diff --git a/vendor/github.com/rdallman/migrate/source/parse.go b/vendor/github.com/rdallman/migrate/source/parse.go deleted file mode 100644 index 2f888fe75..000000000 --- a/vendor/github.com/rdallman/migrate/source/parse.go +++ /dev/null @@ -1,39 +0,0 @@ -package source - -import ( - "fmt" - "regexp" - "strconv" -) - -var ( - ErrParse = fmt.Errorf("no match") -) - -var ( - DefaultParse = Parse - DefaultRegex = Regex -) - -// Regex matches the following pattern: -// 123_name.up.ext -// 123_name.down.ext -var Regex = regexp.MustCompile(`^([0-9]+)_(.*)\.(` + string(Down) + `|` + string(Up) + `)\.(.*)$`) - -// Parse returns Migration for matching Regex pattern. -func Parse(raw string) (*Migration, error) { - m := Regex.FindStringSubmatch(raw) - if len(m) == 5 { - versionUint64, err := strconv.ParseUint(m[1], 10, 64) - if err != nil { - return nil, err - } - return &Migration{ - Version: uint(versionUint64), - Identifier: m[2], - Direction: Direction(m[3]), - Raw: raw, - }, nil - } - return nil, ErrParse -} diff --git a/vendor/github.com/rdallman/migrate/source/parse_test.go b/vendor/github.com/rdallman/migrate/source/parse_test.go deleted file mode 100644 index d06356cc8..000000000 --- a/vendor/github.com/rdallman/migrate/source/parse_test.go +++ /dev/null @@ -1,106 +0,0 @@ -package source - -import ( - "testing" -) - -func TestParse(t *testing.T) { - tt := []struct { - name string - expectErr error - expectMigration *Migration - }{ - { - name: "1_foobar.up.sql", - expectErr: nil, - expectMigration: &Migration{ - Version: 1, - Identifier: "foobar", - Direction: Up, - Raw: "1_foobar.up.sql", - }, - }, - { - name: "1_foobar.down.sql", - expectErr: nil, - expectMigration: &Migration{ - Version: 1, - Identifier: "foobar", - Direction: Down, - Raw: "1_foobar.down.sql", - }, - }, - { - name: "1_f-o_ob+ar.up.sql", - expectErr: nil, - expectMigration: &Migration{ - Version: 1, - Identifier: "f-o_ob+ar", - Direction: Up, - Raw: "1_f-o_ob+ar.up.sql", - }, - }, - { - name: "1485385885_foobar.up.sql", - expectErr: nil, - expectMigration: &Migration{ - Version: 1485385885, - Identifier: "foobar", - Direction: Up, - Raw: "1485385885_foobar.up.sql", - }, - }, - { - name: "20170412214116_date_foobar.up.sql", - expectErr: nil, - expectMigration: &Migration{ - Version: 20170412214116, - Identifier: "date_foobar", - Direction: Up, - Raw: "20170412214116_date_foobar.up.sql", - }, - }, - { - name: "-1_foobar.up.sql", - expectErr: ErrParse, - expectMigration: nil, - }, - { - name: "foobar.up.sql", - expectErr: ErrParse, - expectMigration: nil, - }, - { - name: "1.up.sql", - expectErr: ErrParse, - expectMigration: nil, - }, - { - name: "1_foobar.sql", - expectErr: ErrParse, - expectMigration: nil, - }, - { - name: "1_foobar.up", - expectErr: ErrParse, - expectMigration: nil, - }, - { - name: "1_foobar.down", - expectErr: ErrParse, - expectMigration: nil, - }, - } - - for i, v := range tt { - f, err := Parse(v.name) - - if err != v.expectErr { - t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) - } - - if v.expectMigration != nil && *f != *v.expectMigration { - t.Errorf("expected %+v, got %+v, in %v", *v.expectMigration, *f, i) - } - } -} diff --git a/vendor/github.com/rdallman/migrate/source/stub/stub.go b/vendor/github.com/rdallman/migrate/source/stub/stub.go deleted file mode 100644 index 0f4153c54..000000000 --- a/vendor/github.com/rdallman/migrate/source/stub/stub.go +++ /dev/null @@ -1,85 +0,0 @@ -package stub - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/mattes/migrate/source" -) - -func init() { - source.Register("stub", &Stub{}) -} - -type Config struct{} - -// d, _ := source.Open("stub://") -// d.(*stub.Stub).Migrations = - -type Stub struct { - Url string - Instance interface{} - Migrations *source.Migrations - Config *Config -} - -func (s *Stub) Open(url string) (source.Driver, error) { - return &Stub{ - Url: url, - Migrations: source.NewMigrations(), - Config: &Config{}, - }, nil -} - -func WithInstance(instance interface{}, config *Config) (source.Driver, error) { - return &Stub{ - Instance: instance, - Migrations: source.NewMigrations(), - Config: config, - }, nil -} - -func (s *Stub) Close() error { - return nil -} - -func (s *Stub) First() (version uint, err error) { - if v, ok := s.Migrations.First(); !ok { - return 0, &os.PathError{"first", s.Url, os.ErrNotExist} // TODO: s.Url can be empty when called with WithInstance - } else { - return v, nil - } -} - -func (s *Stub) Prev(version uint) (prevVersion uint, err error) { - if v, ok := s.Migrations.Prev(version); !ok { - return 0, &os.PathError{fmt.Sprintf("prev for version %v", version), s.Url, os.ErrNotExist} - } else { - return v, nil - } -} - -func (s *Stub) Next(version uint) (nextVersion uint, err error) { - if v, ok := s.Migrations.Next(version); !ok { - return 0, &os.PathError{fmt.Sprintf("next for version %v", version), s.Url, os.ErrNotExist} - } else { - return v, nil - } -} - -func (s *Stub) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) { - if m, ok := s.Migrations.Up(version); ok { - return ioutil.NopCloser(bytes.NewBufferString(m.Identifier)), fmt.Sprintf("%v.up.stub", version), nil - } - return nil, "", &os.PathError{fmt.Sprintf("read up version %v", version), s.Url, os.ErrNotExist} -} - -func (s *Stub) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) { - if m, ok := s.Migrations.Down(version); ok { - return ioutil.NopCloser(bytes.NewBufferString(m.Identifier)), fmt.Sprintf("%v.down.stub", version), nil - } - return nil, "", &os.PathError{fmt.Sprintf("read down version %v", version), s.Url, os.ErrNotExist} -} diff --git a/vendor/github.com/rdallman/migrate/source/stub/stub_test.go b/vendor/github.com/rdallman/migrate/source/stub/stub_test.go deleted file mode 100644 index 05ce819d7..000000000 --- a/vendor/github.com/rdallman/migrate/source/stub/stub_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package stub - -import ( - "testing" - - "github.com/mattes/migrate/source" - st "github.com/mattes/migrate/source/testing" -) - -func Test(t *testing.T) { - s := &Stub{} - d, err := s.Open("") - if err != nil { - t.Fatal(err) - } - - m := source.NewMigrations() - m.Append(&source.Migration{Version: 1, Direction: source.Up}) - m.Append(&source.Migration{Version: 1, Direction: source.Down}) - m.Append(&source.Migration{Version: 3, Direction: source.Up}) - m.Append(&source.Migration{Version: 4, Direction: source.Up}) - m.Append(&source.Migration{Version: 4, Direction: source.Down}) - m.Append(&source.Migration{Version: 5, Direction: source.Down}) - m.Append(&source.Migration{Version: 7, Direction: source.Up}) - m.Append(&source.Migration{Version: 7, Direction: source.Down}) - - d.(*Stub).Migrations = m - - st.Test(t, d) -} diff --git a/vendor/github.com/rdallman/migrate/source/testing/testing.go b/vendor/github.com/rdallman/migrate/source/testing/testing.go deleted file mode 100644 index 3cc003c59..000000000 --- a/vendor/github.com/rdallman/migrate/source/testing/testing.go +++ /dev/null @@ -1,169 +0,0 @@ -// Package testing has the source tests. -// All source drivers must pass the Test function. -// This lives in it's own package so it stays a test dependency. -package testing - -import ( - "os" - "testing" - - "github.com/mattes/migrate/source" -) - -// Test runs tests against source implementations. -// It assumes that the driver tests has access to the following migrations: -// -// u = up migration, d = down migration, n = version -// | 1 | - | 3 | 4 | 5 | - | 7 | -// | u d | - | u | u d | d | - | u d | -// -// See source/stub/stub_test.go or source/file/file_test.go for an example. -func Test(t *testing.T, d source.Driver) { - TestFirst(t, d) - TestPrev(t, d) - TestNext(t, d) - TestReadUp(t, d) - TestReadDown(t, d) -} - -func TestFirst(t *testing.T, d source.Driver) { - version, err := d.First() - if err != nil { - t.Fatalf("First: expected err to be nil, got %v", err) - } - if version != 1 { - t.Errorf("First: expected 1, got %v", version) - } -} - -func TestPrev(t *testing.T, d source.Driver) { - tt := []struct { - version uint - expectErr error - expectPrevVersion uint - }{ - {version: 0, expectErr: os.ErrNotExist}, - {version: 1, expectErr: os.ErrNotExist}, - {version: 2, expectErr: os.ErrNotExist}, - {version: 3, expectErr: nil, expectPrevVersion: 1}, - {version: 4, expectErr: nil, expectPrevVersion: 3}, - {version: 5, expectErr: nil, expectPrevVersion: 4}, - {version: 6, expectErr: os.ErrNotExist}, - {version: 7, expectErr: nil, expectPrevVersion: 5}, - {version: 8, expectErr: os.ErrNotExist}, - {version: 9, expectErr: os.ErrNotExist}, - } - - for i, v := range tt { - pv, err := d.Prev(v.version) - if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) && v.expectErr != err { - t.Errorf("Prev: expected %v, got %v, in %v", v.expectErr, err, i) - } - if err == nil && v.expectPrevVersion != pv { - t.Errorf("Prev: expected %v, got %v, in %v", v.expectPrevVersion, pv, i) - } - } -} - -func TestNext(t *testing.T, d source.Driver) { - tt := []struct { - version uint - expectErr error - expectNextVersion uint - }{ - {version: 0, expectErr: os.ErrNotExist}, - {version: 1, expectErr: nil, expectNextVersion: 3}, - {version: 2, expectErr: os.ErrNotExist}, - {version: 3, expectErr: nil, expectNextVersion: 4}, - {version: 4, expectErr: nil, expectNextVersion: 5}, - {version: 5, expectErr: nil, expectNextVersion: 7}, - {version: 6, expectErr: os.ErrNotExist}, - {version: 7, expectErr: os.ErrNotExist}, - {version: 8, expectErr: os.ErrNotExist}, - {version: 9, expectErr: os.ErrNotExist}, - } - - for i, v := range tt { - nv, err := d.Next(v.version) - if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) && v.expectErr != err { - t.Errorf("Next: expected %v, got %v, in %v", v.expectErr, err, i) - } - if err == nil && v.expectNextVersion != nv { - t.Errorf("Next: expected %v, got %v, in %v", v.expectNextVersion, nv, i) - } - } -} - -func TestReadUp(t *testing.T, d source.Driver) { - tt := []struct { - version uint - expectErr error - expectUp bool - }{ - {version: 0, expectErr: os.ErrNotExist}, - {version: 1, expectErr: nil, expectUp: true}, - {version: 2, expectErr: os.ErrNotExist}, - {version: 3, expectErr: nil, expectUp: true}, - {version: 4, expectErr: nil, expectUp: true}, - {version: 5, expectErr: os.ErrNotExist}, - {version: 6, expectErr: os.ErrNotExist}, - {version: 7, expectErr: nil, expectUp: true}, - {version: 8, expectErr: os.ErrNotExist}, - } - - for i, v := range tt { - up, identifier, err := d.ReadUp(v.version) - if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || - (v.expectErr != os.ErrNotExist && err != v.expectErr) { - t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) - - } else if err == nil { - if len(identifier) == 0 { - t.Errorf("expected identifier not to be empty, in %v", i) - } - - if v.expectUp == true && up == nil { - t.Errorf("expected up not to be nil, in %v", i) - } else if v.expectUp == false && up != nil { - t.Errorf("expected up to be nil, got %v, in %v", up, i) - } - } - } -} - -func TestReadDown(t *testing.T, d source.Driver) { - tt := []struct { - version uint - expectErr error - expectDown bool - }{ - {version: 0, expectErr: os.ErrNotExist}, - {version: 1, expectErr: nil, expectDown: true}, - {version: 2, expectErr: os.ErrNotExist}, - {version: 3, expectErr: os.ErrNotExist}, - {version: 4, expectErr: nil, expectDown: true}, - {version: 5, expectErr: nil, expectDown: true}, - {version: 6, expectErr: os.ErrNotExist}, - {version: 7, expectErr: nil, expectDown: true}, - {version: 8, expectErr: os.ErrNotExist}, - } - - for i, v := range tt { - down, identifier, err := d.ReadDown(v.version) - if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || - (v.expectErr != os.ErrNotExist && err != v.expectErr) { - t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) - - } else if err == nil { - if len(identifier) == 0 { - t.Errorf("expected identifier not to be empty, in %v", i) - } - - if v.expectDown == true && down == nil { - t.Errorf("expected down not to be nil, in %v", i) - } else if v.expectDown == false && down != nil { - t.Errorf("expected down to be nil, got %v, in %v", down, i) - } - } - } -} diff --git a/vendor/github.com/rdallman/migrate/testing/docker.go b/vendor/github.com/rdallman/migrate/testing/docker.go deleted file mode 100644 index f7a7c4152..000000000 --- a/vendor/github.com/rdallman/migrate/testing/docker.go +++ /dev/null @@ -1,254 +0,0 @@ -// Package testing is used in driver tests. -package testing - -import ( - "bufio" - "context" - "encoding/json" - "fmt" - "io" - "math/rand" - "strconv" - "strings" - "testing" - "time" - dockertypes "github.com/docker/docker/api/types" - dockercontainer "github.com/docker/docker/api/types/container" - dockernetwork "github.com/docker/docker/api/types/network" - dockerclient "github.com/docker/docker/client" -) - -func NewDockerContainer(t testing.TB, image string, env []string, cmd []string) (*DockerContainer, error) { - c, err := dockerclient.NewEnvClient() - if err != nil { - return nil, err - } - - if cmd == nil { - cmd = make([]string, 0) - } - - contr := &DockerContainer{ - t: t, - client: c, - ImageName: image, - ENV: env, - Cmd: cmd, - } - - if err := contr.PullImage(); err != nil { - return nil, err - } - - if err := contr.Start(); err != nil { - return nil, err - } - - return contr, nil -} - -// DockerContainer implements Instance interface -type DockerContainer struct { - t testing.TB - client *dockerclient.Client - ImageName string - ENV []string - Cmd []string - ContainerId string - ContainerName string - ContainerJSON dockertypes.ContainerJSON - containerInspected bool - keepForDebugging bool -} - -func (d *DockerContainer) PullImage() error { - d.t.Logf("Docker: Pull image %v", d.ImageName) - r, err := d.client.ImagePull(context.Background(), d.ImageName, dockertypes.ImagePullOptions{}) - if err != nil { - return err - } - defer r.Close() - - // read output and log relevant lines - bf := bufio.NewScanner(r) - for bf.Scan() { - var resp dockerImagePullOutput - if err := json.Unmarshal(bf.Bytes(), &resp); err != nil { - return err - } - if strings.HasPrefix(resp.Status, "Status: ") { - d.t.Logf("Docker: %v", resp.Status) - } - } - return bf.Err() -} - -func (d *DockerContainer) Start() error { - containerName := fmt.Sprintf("migrate_test_%v", pseudoRandStr(10)) - - // create container first - resp, err := d.client.ContainerCreate(context.Background(), - &dockercontainer.Config{ - Image: d.ImageName, - Labels: map[string]string{"migrate_test": "true"}, - Env: d.ENV, - Cmd: d.Cmd, - }, - &dockercontainer.HostConfig{ - PublishAllPorts: true, - }, - &dockernetwork.NetworkingConfig{}, - containerName) - if err != nil { - return err - } - - d.ContainerId = resp.ID - d.ContainerName = containerName - - // then start it - if err := d.client.ContainerStart(context.Background(), resp.ID, dockertypes.ContainerStartOptions{}); err != nil { - return err - } - - d.t.Logf("Docker: Started container %v (%v) for image %v listening at %v:%v", resp.ID[0:12], containerName, d.ImageName, d.Host(), d.Port()) - for _, v := range resp.Warnings { - d.t.Logf("Docker: Warning: %v", v) - } - return nil -} - -func (d *DockerContainer) KeepForDebugging() { - d.keepForDebugging = true -} - -func (d *DockerContainer) Remove() error { - if d.keepForDebugging { - return nil - } - - if len(d.ContainerId) == 0 { - return fmt.Errorf("missing containerId") - } - if err := d.client.ContainerRemove(context.Background(), d.ContainerId, - dockertypes.ContainerRemoveOptions{ - Force: true, - }); err != nil { - d.t.Log(err) - return err - } - d.t.Logf("Docker: Removed %v", d.ContainerName) - return nil -} - -func (d *DockerContainer) Inspect() error { - if len(d.ContainerId) == 0 { - return fmt.Errorf("missing containerId") - } - resp, err := d.client.ContainerInspect(context.Background(), d.ContainerId) - if err != nil { - return err - } - - d.ContainerJSON = resp - d.containerInspected = true - return nil -} - -func (d *DockerContainer) Logs() (io.ReadCloser, error) { - if len(d.ContainerId) == 0 { - return nil, fmt.Errorf("missing containerId") - } - - return d.client.ContainerLogs(context.Background(), d.ContainerId, dockertypes.ContainerLogsOptions{ - ShowStdout: true, - ShowStderr: true, - }) -} - -func (d *DockerContainer) portMapping(selectFirst bool, cPort int) (containerPort uint, hostIP string, hostPort uint, err error) { - if !d.containerInspected { - if err := d.Inspect(); err != nil { - d.t.Fatal(err) - } - } - - for port, bindings := range d.ContainerJSON.NetworkSettings.Ports { - if !selectFirst && port.Int() != cPort { - // Skip ahead until we find the port we want - continue - } - for _, binding := range bindings { - - hostPortUint, err := strconv.ParseUint(binding.HostPort, 10, 64) - if err != nil { - return 0, "", 0, err - } - - return uint(port.Int()), binding.HostIP, uint(hostPortUint), nil - } - } - - if selectFirst { - return 0, "", 0, fmt.Errorf("no port binding") - } else { - return 0, "", 0, fmt.Errorf("specified port not bound") - } -} - -func (d *DockerContainer) Host() string { - _, hostIP, _, err := d.portMapping(true, -1) - if err != nil { - d.t.Fatal(err) - } - - if hostIP == "0.0.0.0" { - return "127.0.0.1" - } else { - return hostIP - } -} - -func (d *DockerContainer) Port() uint { - _, _, port, err := d.portMapping(true, -1) - if err != nil { - d.t.Fatal(err) - } - return port -} - -func (d *DockerContainer) PortFor(cPort int) uint { - _, _, port, err := d.portMapping(false, cPort) - if err != nil { - d.t.Fatal(err) - } - return port -} - -func (d *DockerContainer) NetworkSettings() dockertypes.NetworkSettings { - netSettings := d.ContainerJSON.NetworkSettings - return *netSettings -} - -type dockerImagePullOutput struct { - Status string `json:"status"` - ProgressDetails struct { - Current int `json:"current"` - Total int `json:"total"` - } `json:"progressDetail"` - Id string `json:"id"` - Progress string `json:"progress"` -} - -func init() { - rand.Seed(time.Now().UnixNano()) -} - -func pseudoRandStr(n int) string { - var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz0123456789") - b := make([]rune, n) - for i := range b { - b[i] = letterRunes[rand.Intn(len(letterRunes))] - } - return string(b) -} diff --git a/vendor/github.com/rdallman/migrate/testing/testing.go b/vendor/github.com/rdallman/migrate/testing/testing.go deleted file mode 100644 index 64e0a6465..000000000 --- a/vendor/github.com/rdallman/migrate/testing/testing.go +++ /dev/null @@ -1,96 +0,0 @@ -package testing - -import ( - "io/ioutil" - "os" - "strconv" - "testing" - "time" - - dockertypes "github.com/docker/docker/api/types" -) - -type IsReadyFunc func(Instance) bool - -type TestFunc func(*testing.T, Instance) - -type Version struct { - Image string - ENV []string - Cmd []string -} - -func ParallelTest(t *testing.T, versions []Version, readyFn IsReadyFunc, testFn TestFunc) { - delay, err := strconv.Atoi(os.Getenv("MIGRATE_TEST_CONTAINER_BOOT_DELAY")) - if err != nil { - delay = 0 - } - - for i, version := range versions { - version := version // capture range variable, see https://goo.gl/60w3p2 - - // Only test against one version in short mode - // TODO: order is random, maybe always pick first version instead? - if i > 0 && testing.Short() { - t.Logf("Skipping %v in short mode", version) - - } else { - t.Run(version.Image, func(t *testing.T) { - t.Parallel() - - // create new container - container, err := NewDockerContainer(t, version.Image, version.ENV, version.Cmd) - if err != nil { - t.Fatalf("%v\n%s", err, containerLogs(t, container)) - } - - // make sure to remove container once done - defer container.Remove() - - // wait until database is ready - tick := time.Tick(1000 * time.Millisecond) - timeout := time.After(time.Duration(delay + 60) * time.Second) - outer: - for { - select { - case <-tick: - if readyFn(container) { - break outer - } - - case <-timeout: - t.Fatalf("Docker: Container not ready, timeout for %v.\n%s", version, containerLogs(t, container)) - } - } - - time.Sleep(time.Duration(int64(delay)) * time.Second) - - // we can now run the tests - testFn(t, container) - }) - } - } -} - -func containerLogs(t *testing.T, c *DockerContainer) []byte { - r, err := c.Logs() - if err != nil { - t.Error("%v", err) - return nil - } - defer r.Close() - b, err := ioutil.ReadAll(r) - if err != nil { - t.Error("%v", err) - return nil - } - return b -} - -type Instance interface { - Host() string - Port() uint - PortFor(int) uint - NetworkSettings() dockertypes.NetworkSettings - KeepForDebugging() -} diff --git a/vendor/github.com/rdallman/migrate/testing/testing_test.go b/vendor/github.com/rdallman/migrate/testing/testing_test.go deleted file mode 100644 index 8217decfa..000000000 --- a/vendor/github.com/rdallman/migrate/testing/testing_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package testing - -import ( - "testing" -) - -func ExampleParallelTest(t *testing.T) { - var isReady = func(i Instance) bool { - // Return true if Instance is ready to run tests. - // Don't block here though. - return true - } - - // t is *testing.T coming from parent Test(t *testing.T) - ParallelTest(t, []Version{{Image: "docker_image:9.6"}}, isReady, - func(t *testing.T, i Instance) { - // Run your test/s ... - t.Fatal("...") - }) -} diff --git a/vendor/github.com/rdallman/migrate/util.go b/vendor/github.com/rdallman/migrate/util.go deleted file mode 100644 index 67048ea5c..000000000 --- a/vendor/github.com/rdallman/migrate/util.go +++ /dev/null @@ -1,105 +0,0 @@ -package migrate - -import ( - "bufio" - "fmt" - "io" - nurl "net/url" - "strings" - "time" -) - -// MultiError holds multiple errors. -type MultiError struct { - Errs []error -} - -// NewMultiError returns an error type holding multiple errors. -func NewMultiError(errs ...error) MultiError { - compactErrs := make([]error, 0) - for _, e := range errs { - if e != nil { - compactErrs = append(compactErrs, e) - } - } - return MultiError{compactErrs} -} - -// Error implements error. Mulitple errors are concatenated with 'and's. -func (m MultiError) Error() string { - var strs = make([]string, 0) - for _, e := range m.Errs { - if len(e.Error()) > 0 { - strs = append(strs, e.Error()) - } - } - return strings.Join(strs, " and ") -} - -// suint safely converts int to uint -// see https://goo.gl/wEcqof -// see https://goo.gl/pai7Dr -func suint(n int) uint { - if n < 0 { - panic(fmt.Sprintf("suint(%v) expects input >= 0", n)) - } - return uint(n) -} - -// newSlowReader turns an io.ReadCloser into a slow io.ReadCloser. -// Use this to simulate a slow internet connection. -func newSlowReader(r io.ReadCloser) io.ReadCloser { - return &slowReader{ - rx: r, - reader: bufio.NewReader(r), - } -} - -type slowReader struct { - rx io.ReadCloser - reader *bufio.Reader -} - -func (b *slowReader) Read(p []byte) (n int, err error) { - time.Sleep(10 * time.Millisecond) - c, err := b.reader.ReadByte() - if err != nil { - return 0, err - } else { - copy(p, []byte{c}) - return 1, nil - } -} - -func (b *slowReader) Close() error { - return b.rx.Close() -} - -var errNoScheme = fmt.Errorf("no scheme") - -// schemeFromUrl returns the scheme from a URL string -func schemeFromUrl(url string) (string, error) { - u, err := nurl.Parse(url) - if err != nil { - return "", err - } - - if len(u.Scheme) == 0 { - return "", errNoScheme - } - - return u.Scheme, nil -} - -// FilterCustomQuery filters all query values starting with `x-` -func FilterCustomQuery(u *nurl.URL) *nurl.URL { - ux := *u - vx := make(nurl.Values) - for k, v := range ux.Query() { - if len(k) <= 1 || (len(k) > 1 && k[0:2] != "x-") { - vx[k] = v - } - } - ux.RawQuery = vx.Encode() - return &ux -} diff --git a/vendor/github.com/rdallman/migrate/util_test.go b/vendor/github.com/rdallman/migrate/util_test.go deleted file mode 100644 index 1ad234473..000000000 --- a/vendor/github.com/rdallman/migrate/util_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package migrate - -import ( - nurl "net/url" - "testing" -) - -func TestSuintPanicsWithNegativeInput(t *testing.T) { - defer func() { - if r := recover(); r == nil { - t.Fatal("expected suint to panic for -1") - } - }() - suint(-1) -} - -func TestSuint(t *testing.T) { - if u := suint(0); u != 0 { - t.Fatalf("expected 0, got %v", u) - } -} - -func TestFilterCustomQuery(t *testing.T) { - n, err := nurl.Parse("foo://host?a=b&x-custom=foo&c=d") - if err != nil { - t.Fatal(err) - } - nx := FilterCustomQuery(n).Query() - if nx.Get("x-custom") != "" { - t.Fatalf("didn't expect x-custom") - } -} diff --git a/vendor/go.opencensus.io/.gitignore b/vendor/go.opencensus.io/.gitignore index 4c0c12a63..f162595df 100644 --- a/vendor/go.opencensus.io/.gitignore +++ b/vendor/go.opencensus.io/.gitignore @@ -1,2 +1,5 @@ /.idea/ +# go.opencensus.io/exporter/aws +/exporter/aws/ + diff --git a/vendor/go.opencensus.io/README.md b/vendor/go.opencensus.io/README.md index 00ca76daa..575673ff7 100644 --- a/vendor/go.opencensus.io/README.md +++ b/vendor/go.opencensus.io/README.md @@ -16,7 +16,7 @@ rapidly, vendoring is recommended. ## Installation ``` -$ go get -u go.opencensus.io/... +$ go get -u go.opencensus.io ``` ## Prerequisites @@ -32,6 +32,7 @@ Currently, OpenCensus supports: * [OpenZipkin][exporter-zipkin] for traces * Stackdriver [Monitoring][exporter-stackdriver] and [Trace][exporter-stackdriver] * [Jaeger][exporter-jaeger] for traces +* [AWS X-Ray][exporter-xray] for traces ## Tags @@ -107,9 +108,10 @@ if err != nil { ## Stats -### Creating, retrieving and deleting a measure +### Measures -Create and load measures with units: +Measures are used for recording data points with associated units. +Creating a Measure: [embedmd]:# (stats.go measure) ```go @@ -119,105 +121,56 @@ if err != nil { } ``` -Retrieve measure by name: +### Recording Measurements -[embedmd]:# (stats.go findMeasure) -```go -m := stats.FindMeasure("my.org/video_size") -if m == nil { - log.Fatalln("measure not found") -} -``` - -### Creating an aggregation - -Currently 4 types of aggregations are supported. The CountAggregation is used to count -the number of times a sample was recorded. The DistributionAggregation is used to -provide a histogram of the values of the samples. The SumAggregation is used to -sum up all sample values. The MeanAggregation is used to calculate the mean of -sample values. - -[embedmd]:# (stats.go aggs) -```go -distAgg := view.DistributionAggregation([]float64{0, 1 << 32, 2 << 32, 3 << 32}) -countAgg := view.CountAggregation{} -sumAgg := view.SumAggregation{} -meanAgg := view.MeanAggregation{} -``` - -### Creating, registering and unregistering a view - -Create and register a view: - -[embedmd]:# (stats.go view) -```go -v, err := view.New( - "my.org/video_size_distribution", - "distribution of processed video size over time", - nil, - videoSize, - distAgg, -) -if err != nil { - log.Fatalf("cannot create view: %v", err) -} -if err := view.Register(v); err != nil { - log.Fatal(err) -} -``` - -Find view by name: - -[embedmd]:# (stats.go findView) -```go -v = view.Find("my.org/video_size_distribution") -if v == nil { - log.Fatalln("view not found") -} -``` - -Unregister view: - -[embedmd]:# (stats.go unregisterView) -```go -if err = view.Unregister(v); err != nil { - log.Fatal(err) -} -``` - -Configure the default interval between reports of collected data. -This is a system wide interval and impacts all views. The default -interval duration is 10 seconds. Trying to set an interval with -a duration less than a certain minimum (maybe 1s) should have no effect. - -[embedmd]:# (stats.go reportingPeriod) -```go -view.SetReportingPeriod(5 * time.Second) -``` - -### Recording measurements - -Recording usage can only be performed against already registered measure -and their registered views. Measurements are implicitly tagged with the -tags in the context: +Measurements are data points associated with Measures. +Recording implicitly tags the set of Measurements with the tags from the +provided context: [embedmd]:# (stats.go record) ```go stats.Record(ctx, videoSize.M(102478)) ``` -### Retrieving collected data for a view +### Views -Users need to subscribe to a view in order to retrieve collected data. +Views are how Measures are aggregated. You can think of them as queries over the +set of recorded data points (Measurements). -[embedmd]:# (stats.go subscribe) +Views have two parts: the tags to group by and the aggregation type used. + +Currently four types of aggregations are supported: +* CountAggregation is used to count the number of times a sample was recorded. +* DistributionAggregation is used to provide a histogram of the values of the samples. +* SumAggregation is used to sum up all sample values. +* MeanAggregation is used to calculate the mean of sample values. + +[embedmd]:# (stats.go aggs) ```go -if err := v.Subscribe(); err != nil { - log.Fatal(err) +distAgg := view.DistributionAggregation{0, 1 << 32, 2 << 32, 3 << 32} +countAgg := view.CountAggregation{} +sumAgg := view.SumAggregation{} +meanAgg := view.MeanAggregation{} +``` + +Here we create a view with the DistributionAggregation over our Measure. +All Measurements will be aggregated together irrespective of their tags, +i.e. no grouping by tag. + +[embedmd]:# (stats.go view) +```go +if err = view.Subscribe(&view.View{ + Name: "my.org/video_size_distribution", + Description: "distribution of processed video size over time", + Measure: videoSize, + Aggregation: view.DistributionAggregation([]float64{0, 1 << 32, 2 << 32, 3 << 32}), +}); err != nil { + log.Fatalf("Failed to subscribe to view: %v", err) } ``` -Subscribed views' data will be exported via the registered exporters. +Subscribe begins collecting data for the view. Subscribed views' data will be +exported via the registered exporters. [embedmd]:# (stats.go registerExporter) ```go @@ -239,6 +192,16 @@ func (e *exporter) ExportView(vd *view.Data) { ``` +Configure the default interval between reports of collected data. +This is a system wide interval and impacts all views. The default +interval duration is 10 seconds. + +[embedmd]:# (stats.go reportingPeriod) +```go +view.SetReportingPeriod(5 * time.Second) +``` + + ## Traces ### Starting and ending a span @@ -294,3 +257,4 @@ A screenshot of the CPU profile from the program above: [exporter-stackdriver]: https://godoc.org/go.opencensus.io/exporter/stackdriver [exporter-zipkin]: https://godoc.org/go.opencensus.io/exporter/zipkin [exporter-jaeger]: https://godoc.org/go.opencensus.io/exporter/jaeger +[exporter-xray]: https://godoc.org/go.opencensus.io/exporter/xray diff --git a/vendor/go.opencensus.io/examples/exporter/exporter.go b/vendor/go.opencensus.io/examples/exporter/exporter.go index eb1ee4f9d..daa09eb94 100644 --- a/vendor/go.opencensus.io/examples/exporter/exporter.go +++ b/vendor/go.opencensus.io/examples/exporter/exporter.go @@ -21,16 +21,16 @@ import ( "go.opencensus.io/trace" ) -// Exporter is a stats and trace exporter that logs +// PrintExporter is a stats and trace exporter that logs // the exported data to the console. -type Exporter struct{} +type PrintExporter struct{} // ExportView logs the view data. -func (e *Exporter) ExportView(vd *view.Data) { +func (e *PrintExporter) ExportView(vd *view.Data) { log.Println(vd) } // ExportSpan logs the trace span. -func (e *Exporter) ExportSpan(vd *trace.SpanData) { +func (e *PrintExporter) ExportSpan(vd *trace.SpanData) { log.Println(vd) } diff --git a/vendor/go.opencensus.io/examples/grpc/README.md b/vendor/go.opencensus.io/examples/grpc/README.md new file mode 100644 index 000000000..7d3a8d0cb --- /dev/null +++ b/vendor/go.opencensus.io/examples/grpc/README.md @@ -0,0 +1,31 @@ +# Example gRPC server and client with OpenCensus + +This example uses: + +* gRPC to create an RPC server and client. +* The OpenCensus gRPC plugin to instrument the RPC server and client. +* Debugging exporters to print stats and traces to stdout. + +``` +$ go get go.opencensus.io/examples/grpc +``` + +First, run the server: + +``` +$ go run $(go env GOPATH)/src/go.opencensus.io/examples/grpc/helloworld_server/main.go +``` + +Then, run the client: + +``` +$ go run $(go env GOPATH)/src/go.opencensus.io/examples/grpc/helloworld_client/main.go +``` + +You will see traces and stats exported on the stdout. You can use one of the +[exporters](https://godoc.org/go.opencensus.io/exporter) +to upload collected data to the backend of your choice. + +You can also see the z-pages provided from the server: +* Traces: http://localhost:8081/tracez +* RPCs: http://localhost:8081/rpcz diff --git a/vendor/go.opencensus.io/examples/grpc/helloworld_client/main.go b/vendor/go.opencensus.io/examples/grpc/helloworld_client/main.go index fe84f5bc5..e28de9db5 100644 --- a/vendor/go.opencensus.io/examples/grpc/helloworld_client/main.go +++ b/vendor/go.opencensus.io/examples/grpc/helloworld_client/main.go @@ -16,7 +16,6 @@ package main import ( "log" - "net/http" "os" "time" @@ -24,7 +23,6 @@ import ( pb "go.opencensus.io/examples/grpc/proto" "go.opencensus.io/plugin/ocgrpc" "go.opencensus.io/stats/view" - "go.opencensus.io/zpages" "golang.org/x/net/context" "google.golang.org/grpc" ) @@ -35,11 +33,9 @@ const ( ) func main() { - go func() { log.Fatal(http.ListenAndServe(":8080", zpages.Handler)) }() - // Register stats and trace exporters to export // the collected data. - view.RegisterExporter(&exporter.Exporter{}) + view.RegisterExporter(&exporter.PrintExporter{}) // Subscribe to collect client request count. if err := ocgrpc.ClientErrorCountView.Subscribe(); err != nil { @@ -48,7 +44,7 @@ func main() { // Set up a connection to the server with the OpenCensus // stats handler to enable stats and tracing. - conn, err := grpc.Dial(address, grpc.WithStatsHandler(ocgrpc.NewClientStatsHandler()), grpc.WithInsecure()) + conn, err := grpc.Dial(address, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}), grpc.WithInsecure()) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/vendor/go.opencensus.io/examples/grpc/helloworld_server/main.go b/vendor/go.opencensus.io/examples/grpc/helloworld_server/main.go index 99609ac4f..2745d26bf 100644 --- a/vendor/go.opencensus.io/examples/grpc/helloworld_server/main.go +++ b/vendor/go.opencensus.io/examples/grpc/helloworld_server/main.go @@ -45,7 +45,7 @@ func main() { go func() { log.Fatal(http.ListenAndServe(":8081", zpages.Handler)) }() // Register stats and trace exporters to export // the collected data. - view.RegisterExporter(&exporter.Exporter{}) + view.RegisterExporter(&exporter.PrintExporter{}) // Subscribe to collect server request count. if err := ocgrpc.ServerRequestCountView.Subscribe(); err != nil { @@ -59,7 +59,7 @@ func main() { // Set up a new server with the OpenCensus // stats handler to enable stats and tracing. - s := grpc.NewServer(grpc.StatsHandler(ocgrpc.NewServerStatsHandler())) + s := grpc.NewServer(grpc.StatsHandler(&ocgrpc.ServerHandler{})) pb.RegisterGreeterServer(s, &server{}) // Register reflection service on gRPC server. reflection.Register(s) diff --git a/vendor/go.opencensus.io/examples/helloworld/main.go b/vendor/go.opencensus.io/examples/helloworld/main.go new file mode 100644 index 000000000..ede25cc26 --- /dev/null +++ b/vendor/go.opencensus.io/examples/helloworld/main.go @@ -0,0 +1,102 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Command helloworld is an example program that collects data for +// video size. +package main + +import ( + "context" + "fmt" + "log" + "math/rand" + "time" + + "go.opencensus.io/examples/exporter" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "go.opencensus.io/trace" +) + +var ( + // frontendKey allows us to breakdown the recorded data + // by the frontend used when uploading the video. + frontendKey tag.Key + + // videoSize will measure the size of processed videos. + videoSize *stats.Int64Measure +) + +func main() { + ctx := context.Background() + + // Register an exporter to be able to retrieve + // the data from the subscribed views. + e := &exporter.PrintExporter{} + view.RegisterExporter(e) + trace.RegisterExporter(e) + + var err error + frontendKey, err = tag.NewKey("my.org/keys/frontend") + if err != nil { + log.Fatal(err) + } + videoSize, err = stats.Int64("my.org/measure/video_size", "size of processed videos", "MBy") + if err != nil { + log.Fatalf("Video size measure not created: %v", err) + } + + // Create view to see the processed video size + // distribution broken down by frontend. + // Subscribe will allow view data to be exported. + err = view.Subscribe(&view.View{ + Name: "my.org/views/video_size", + Description: "processed video size over time", + TagKeys: []tag.Key{frontendKey}, + Measure: videoSize, + Aggregation: view.DistributionAggregation{0, 1 << 16, 1 << 32}, + }) + if err != nil { + log.Fatalf("Cannot subscribe to the view: %v", err) + } + + // Process the video. + process(ctx) + + // Wait for a duration longer than reporting duration to ensure the stats + // library reports the collected data. + fmt.Println("Wait longer than the reporting duration...") + time.Sleep(2 * time.Second) +} + +// process processes the video and instruments the processing +// by creating a span and collecting metrics about the operation. +func process(ctx context.Context) { + ctx, err := tag.New(ctx, + tag.Insert(frontendKey, "mobile-ios9.3.5"), + ) + if err != nil { + log.Fatal(err) + } + ctx, span := trace.StartSpan(ctx, "my.org/ProcessVideo") + defer span.End() + // Process video. + // Record the processed video size. + + // Sleep for [1,10] milliseconds to fake work. + time.Sleep(time.Duration(rand.Intn(10)+1) * time.Millisecond) + + stats.Record(ctx, videoSize.M(25648)) +} diff --git a/vendor/go.opencensus.io/examples/http/README.md b/vendor/go.opencensus.io/examples/http/README.md new file mode 100644 index 000000000..6bc87306b --- /dev/null +++ b/vendor/go.opencensus.io/examples/http/README.md @@ -0,0 +1,31 @@ +# Example net/http server and client with OpenCensus + +This example uses: + +* net/http to create a server and client. +* The OpenCensus net/http plugin to instrument the server and client. +* Debugging exporters to print stats and traces to stdout. + +``` +$ go get go.opencensus.io/examples/http +``` + +First, run the server: + +``` +$ go run $(go env GOPATH)/src/go.opencensus.io/examples/http/helloworld_server/main.go +``` + +Then, run the client: + +``` +$ go run $(go env GOPATH)/src/go.opencensus.io/examples/http/helloworld_client/main.go +``` + +You will see traces and stats exported on the stdout. You can use one of the +[exporters](https://godoc.org/go.opencensus.io/exporter) +to upload collected data to the backend of your choice. + +You can also see the z-pages provided from the server: +* Traces: http://localhost:8081/tracez +* RPCs: http://localhost:8081/rpcz diff --git a/vendor/go.opencensus.io/examples/http/helloworld_client/main.go b/vendor/go.opencensus.io/examples/http/helloworld_client/main.go index a30019cdf..9a2a67e90 100644 --- a/vendor/go.opencensus.io/examples/http/helloworld_client/main.go +++ b/vendor/go.opencensus.io/examples/http/helloworld_client/main.go @@ -24,16 +24,13 @@ import ( "go.opencensus.io/examples/exporter" "go.opencensus.io/stats/view" - "go.opencensus.io/zpages" ) const server = "http://localhost:50030" func main() { - go func() { log.Fatal(http.ListenAndServe(":9979", zpages.Handler)) }() - // Register stats and trace exporters to export the collected data. - exporter := &exporter.Exporter{} + exporter := &exporter.PrintExporter{} view.RegisterExporter(exporter) trace.RegisterExporter(exporter) diff --git a/vendor/go.opencensus.io/examples/http/helloworld_server/main.go b/vendor/go.opencensus.io/examples/http/helloworld_server/main.go index b5f3cd843..dbeb438ca 100644 --- a/vendor/go.opencensus.io/examples/http/helloworld_server/main.go +++ b/vendor/go.opencensus.io/examples/http/helloworld_server/main.go @@ -29,10 +29,10 @@ import ( ) func main() { - go func() { log.Fatal(http.ListenAndServe(":9979", zpages.Handler)) }() + go func() { log.Fatal(http.ListenAndServe(":8081", zpages.Handler)) }() // Register stats and trace exporters to export the collected data. - exporter := &exporter.Exporter{} + exporter := &exporter.PrintExporter{} view.RegisterExporter(exporter) trace.RegisterExporter(exporter) diff --git a/vendor/go.opencensus.io/examples/stats/helloworld/main.go b/vendor/go.opencensus.io/examples/stats/helloworld/main.go deleted file mode 100644 index 4e48fc563..000000000 --- a/vendor/go.opencensus.io/examples/stats/helloworld/main.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Command helloworld is an example program that collects data for -// video size. -package main - -import ( - "context" - "fmt" - "log" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" -) - -func main() { - ctx := context.Background() - - // Register an exporter to be able to retrieve - // the data from the subscribed views. - view.RegisterExporter(&exporter{}) - - // Create measures. The program will record measures for the size of - // processed videos and the nubmer of videos marked as spam. - videoSize, err := stats.Int64("my.org/measure/video_size", "size of processed videos", "MBy") - if err != nil { - log.Fatalf("Video size measure not created: %v", err) - } - - // Create view to see the processed video size - // distribution over 10 seconds. - v, err := view.New( - "my.org/views/video_size", - "processed video size over time", - nil, - videoSize, - view.DistributionAggregation([]float64{0, 1 << 16, 1 << 32}), - ) - if err != nil { - log.Fatalf("Cannot create view: %v", err) - } - - // Set reporting period to report data at every second. - view.SetReportingPeriod(1 * time.Second) - - // Subscribe will allow view data to be exported. - // Once no longer need, you can unsubscribe from the view. - if err := v.Subscribe(); err != nil { - log.Fatalf("Cannot subscribe to the view: %v", err) - } - - // Record data points. - stats.Record(ctx, videoSize.M(25648), videoSize.M(48000), videoSize.M(128000)) - - // Wait for a duration longer than reporting duration to ensure the stats - // library reports the collected data. - fmt.Println("Wait longer than the reporting duration...") - time.Sleep(2 * time.Second) -} - -type exporter struct{} - -func (e *exporter) ExportView(vd *view.Data) { - log.Println(vd) -} diff --git a/vendor/go.opencensus.io/examples/stats/prometheus/main.go b/vendor/go.opencensus.io/examples/stats/prometheus/main.go index 11f616c9e..1a08d564c 100644 --- a/vendor/go.opencensus.io/examples/stats/prometheus/main.go +++ b/vendor/go.opencensus.io/examples/stats/prometheus/main.go @@ -43,47 +43,30 @@ func main() { if err != nil { log.Fatalf("Video count measure not created: %v", err) } - - // 1. Create view to see the number of processed videos cumulatively. - viewCount, err := view.New( - "video_count", - "number of videos processed over time", - nil, - videoCount, - view.CountAggregation{}, - ) - if err != nil { - log.Fatalf("Cannot create view: %v", err) - } - - // Subscribe will allow view data to be exported. - // Once no longer needed, you can unsubscribe from the view. - if err := viewCount.Subscribe(); err != nil { - log.Fatalf("Cannot subscribe to the view: %v", err) - } - - // Create measures. The program will record measures for the size of - // processed videos and the number of videos marked as spam. - videoSize, err := stats.Int64("my.org/measures/video_size_cum", "size of processed video", "MBy") + videoSize, err := stats.Int64("my.org/measures/video_size", "size of processed video", "MBy") if err != nil { log.Fatalf("Video size measure not created: %v", err) } - // 2. Create view to see the amount of video processed - viewSize, err := view.New( - "video_cum", - "processed video size over time", - nil, - videoSize, - view.DistributionAggregation([]float64{0, 1 << 16, 1 << 32}), - ) - if err != nil { - log.Fatalf("Cannot create view: %v", err) - } - + // Create view to see the number of processed videos cumulatively. + // Create view to see the amount of video processed // Subscribe will allow view data to be exported. // Once no longer needed, you can unsubscribe from the view. - if err := viewSize.Subscribe(); err != nil { + err = view.Subscribe( + &view.View{ + Name: "video_count", + Description: "number of videos processed over time", + Measure: videoCount, + Aggregation: &view.CountAggregation{}, + }, + &view.View{ + Name: "video_size", + Description: "processed video size over time", + Measure: videoSize, + Aggregation: view.DistributionAggregation{0, 1 << 16, 1 << 32}, + }, + ) + if err != nil { log.Fatalf("Cannot subscribe to the view: %v", err) } @@ -93,8 +76,7 @@ func main() { // Record some data points... go func() { for { - stats.Record(ctx, videoCount.M(1)) - stats.Record(ctx, videoSize.M(rand.Int63())) + stats.Record(ctx, videoCount.M(1), videoSize.M(rand.Int63())) <-time.After(time.Millisecond * time.Duration(1+rand.Intn(400))) } }() diff --git a/vendor/go.opencensus.io/examples/stats/stackdriver/main.go b/vendor/go.opencensus.io/examples/stats/stackdriver/main.go index 9019683b5..a82974c62 100644 --- a/vendor/go.opencensus.io/examples/stats/stackdriver/main.go +++ b/vendor/go.opencensus.io/examples/stats/stackdriver/main.go @@ -56,24 +56,19 @@ func main() { log.Fatalf("Video size measure not created: %v", err) } - // Create view to see the processed video size cumulatively. - v, err := view.New( - "my.org/views/video_size_cum", - "processed video size over time", - nil, - videoSize, - view.DistributionAggregation([]float64{0, 1 << 16, 1 << 32}), - ) - if err != nil { - log.Fatalf("Cannot create view: %v", err) - } - // Set reporting period to report data at every second. view.SetReportingPeriod(1 * time.Second) + // Create view to see the processed video size cumulatively. // Subscribe will allow view data to be exported. // Once no longer need, you can unsubscribe from the view. - if err := v.Subscribe(); err != nil { + err = view.Subscribe(&view.View{ + Name: "my.org/views/video_size_cum", + Description: "processed video size over time", + Measure: videoSize, + Aggregation: view.DistributionAggregation{0, 1 << 16, 1 << 32}, + }) + if err != nil { log.Fatalf("Cannot subscribe to the view: %v", err) } diff --git a/vendor/go.opencensus.io/examples/trace/helloworld/main.go b/vendor/go.opencensus.io/examples/trace/helloworld/main.go deleted file mode 100644 index e8dcaed0c..000000000 --- a/vendor/go.opencensus.io/examples/trace/helloworld/main.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Command helloworld is an example program that creates spans. -package main - -import ( - "context" - "log" - "time" - - "go.opencensus.io/trace" -) - -func main() { - ctx := context.Background() - - // Register an exporter to be able to retrieve - // the collected spans. - trace.RegisterExporter(&exporter{}) - - trace.SetDefaultSampler(trace.AlwaysSample()) - - ctx, span := trace.StartSpan(ctx, "/foo") - bar(ctx) - span.End() - - time.Sleep(1 * time.Second) // Wait enough for the exporter to report. -} - -func bar(ctx context.Context) { - ctx, span := trace.StartSpan(ctx, "/bar") - defer span.End() - - // Do bar... -} - -type exporter struct{} - -func (e *exporter) ExportSpan(sd *trace.SpanData) { - log.Println(sd) -} diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/agent/GoUnusedProtection__.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/agent/GoUnusedProtection__.go deleted file mode 100644 index 1cb4f1aef..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/agent/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package agent - -var GoUnusedProtection__ int diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/agent/agent-consts.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/agent/agent-consts.go deleted file mode 100644 index 592a6679b..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/agent/agent-consts.go +++ /dev/null @@ -1,28 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package agent - -import ( - "bytes" - "context" - "fmt" - "reflect" - - "git.apache.org/thrift.git/lib/go/thrift" - "go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger" - "go.opencensus.io/exporter/jaeger/internal/gen-go/zipkincore" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -var _ = jaeger.GoUnusedProtection__ -var _ = zipkincore.GoUnusedProtection__ - -func init() { -} diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/agent/agent-remote/agent-remote.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/agent/agent-remote/agent-remote.go deleted file mode 100755 index 0768fdc12..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/agent/agent-remote/agent-remote.go +++ /dev/null @@ -1,187 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package main - -import ( - "context" - "flag" - "fmt" - "math" - "net" - "net/url" - "os" - "strconv" - "strings" - - "git.apache.org/thrift.git/lib/go/thrift" - - "go.opencensus.io/exporter/jaeger/internal/gen-go/agent" - "go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger" - "go.opencensus.io/exporter/jaeger/internal/gen-go/zipkincore" -) - -var _ = jaeger.GoUnusedProtection__ -var _ = zipkincore.GoUnusedProtection__ - -func Usage() { - fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:") - flag.PrintDefaults() - fmt.Fprintln(os.Stderr, "\nFunctions:") - fmt.Fprintln(os.Stderr, " void emitZipkinBatch( spans)") - fmt.Fprintln(os.Stderr, " void emitBatch(Batch batch)") - fmt.Fprintln(os.Stderr) - os.Exit(0) -} - -func main() { - flag.Usage = Usage - var host string - var port int - var protocol string - var urlString string - var framed bool - var useHttp bool - var parsedUrl *url.URL - var trans thrift.TTransport - _ = strconv.Atoi - _ = math.Abs - flag.Usage = Usage - flag.StringVar(&host, "h", "localhost", "Specify host and port") - flag.IntVar(&port, "p", 9090, "Specify port") - flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)") - flag.StringVar(&urlString, "u", "", "Specify the url") - flag.BoolVar(&framed, "framed", false, "Use framed transport") - flag.BoolVar(&useHttp, "http", false, "Use http") - flag.Parse() - - if len(urlString) > 0 { - var err error - parsedUrl, err = url.Parse(urlString) - if err != nil { - fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) - flag.Usage() - } - host = parsedUrl.Host - useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" - } else if useHttp { - _, err := url.Parse(fmt.Sprint("http://", host, ":", port)) - if err != nil { - fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) - flag.Usage() - } - } - - cmd := flag.Arg(0) - var err error - if useHttp { - trans, err = thrift.NewTHttpClient(parsedUrl.String()) - } else { - portStr := fmt.Sprint(port) - if strings.Contains(host, ":") { - host, portStr, err = net.SplitHostPort(host) - if err != nil { - fmt.Fprintln(os.Stderr, "error with host:", err) - os.Exit(1) - } - } - trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr)) - if err != nil { - fmt.Fprintln(os.Stderr, "error resolving address:", err) - os.Exit(1) - } - if framed { - trans = thrift.NewTFramedTransport(trans) - } - } - if err != nil { - fmt.Fprintln(os.Stderr, "Error creating transport", err) - os.Exit(1) - } - defer trans.Close() - var protocolFactory thrift.TProtocolFactory - switch protocol { - case "compact": - protocolFactory = thrift.NewTCompactProtocolFactory() - break - case "simplejson": - protocolFactory = thrift.NewTSimpleJSONProtocolFactory() - break - case "json": - protocolFactory = thrift.NewTJSONProtocolFactory() - break - case "binary", "": - protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() - break - default: - fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol) - Usage() - os.Exit(1) - } - iprot := protocolFactory.GetProtocol(trans) - oprot := protocolFactory.GetProtocol(trans) - client := agent.NewAgentClient(thrift.NewTStandardClient(iprot, oprot)) - if err := trans.Open(); err != nil { - fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err) - os.Exit(1) - } - - switch cmd { - case "emitZipkinBatch": - if flag.NArg()-1 != 1 { - fmt.Fprintln(os.Stderr, "EmitZipkinBatch requires 1 args") - flag.Usage() - } - arg5 := flag.Arg(1) - mbTrans6 := thrift.NewTMemoryBufferLen(len(arg5)) - defer mbTrans6.Close() - _, err7 := mbTrans6.WriteString(arg5) - if err7 != nil { - Usage() - return - } - factory8 := thrift.NewTSimpleJSONProtocolFactory() - jsProt9 := factory8.GetProtocol(mbTrans6) - containerStruct0 := agent.NewAgentEmitZipkinBatchArgs() - err10 := containerStruct0.ReadField1(jsProt9) - if err10 != nil { - Usage() - return - } - argvalue0 := containerStruct0.Spans - value0 := argvalue0 - fmt.Print(client.EmitZipkinBatch(context.Background(), value0)) - fmt.Print("\n") - break - case "emitBatch": - if flag.NArg()-1 != 1 { - fmt.Fprintln(os.Stderr, "EmitBatch requires 1 args") - flag.Usage() - } - arg11 := flag.Arg(1) - mbTrans12 := thrift.NewTMemoryBufferLen(len(arg11)) - defer mbTrans12.Close() - _, err13 := mbTrans12.WriteString(arg11) - if err13 != nil { - Usage() - return - } - factory14 := thrift.NewTSimpleJSONProtocolFactory() - jsProt15 := factory14.GetProtocol(mbTrans12) - argvalue0 := jaeger.NewBatch() - err16 := argvalue0.Read(jsProt15) - if err16 != nil { - Usage() - return - } - value0 := argvalue0 - fmt.Print(client.EmitBatch(context.Background(), value0)) - fmt.Print("\n") - break - case "": - Usage() - break - default: - fmt.Fprintln(os.Stderr, "Invalid function ", cmd) - } -} diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/agent/agent.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/agent/agent.go deleted file mode 100644 index bcb21aa54..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/agent/agent.go +++ /dev/null @@ -1,391 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package agent - -import ( - "bytes" - "context" - "fmt" - "reflect" - - "go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger" - "go.opencensus.io/exporter/jaeger/internal/gen-go/zipkincore" - - "git.apache.org/thrift.git/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -var _ = jaeger.GoUnusedProtection__ -var _ = zipkincore.GoUnusedProtection__ - -type Agent interface { - // Parameters: - // - Spans - EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (err error) - // Parameters: - // - Batch - EmitBatch(ctx context.Context, batch *jaeger.Batch) (err error) -} - -type AgentClient struct { - c thrift.TClient -} - -// Deprecated: Use NewAgent instead -func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient { - return &AgentClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -// Deprecated: Use NewAgent instead -func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient { - return &AgentClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewAgentClient(c thrift.TClient) *AgentClient { - return &AgentClient{ - c: c, - } -} - -// Parameters: -// - Spans -func (p *AgentClient) EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (err error) { - var _args0 AgentEmitZipkinBatchArgs - _args0.Spans = spans - if err := p.c.Call(ctx, "emitZipkinBatch", &_args0, nil); err != nil { - return err - } - return nil -} - -// Parameters: -// - Batch -func (p *AgentClient) EmitBatch(ctx context.Context, batch *jaeger.Batch) (err error) { - var _args1 AgentEmitBatchArgs - _args1.Batch = batch - if err := p.c.Call(ctx, "emitBatch", &_args1, nil); err != nil { - return err - } - return nil -} - -type AgentProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler Agent -} - -func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewAgentProcessor(handler Agent) *AgentProcessor { - - self2 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self2.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler: handler} - self2.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler} - return self2 -} - -func (p *AgentProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x3.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, x3 - -} - -type agentProcessorEmitZipkinBatch struct { - handler Agent -} - -func (p *agentProcessorEmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AgentEmitZipkinBatchArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - return false, err - } - - iprot.ReadMessageEnd() - var err2 error - if err2 = p.handler.EmitZipkinBatch(ctx, args.Spans); err2 != nil { - return true, err2 - } - return true, nil -} - -type agentProcessorEmitBatch struct { - handler Agent -} - -func (p *agentProcessorEmitBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AgentEmitBatchArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - return false, err - } - - iprot.ReadMessageEnd() - var err2 error - if err2 = p.handler.EmitBatch(ctx, args.Batch); err2 != nil { - return true, err2 - } - return true, nil -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Spans -type AgentEmitZipkinBatchArgs struct { - Spans []*zipkincore.Span `thrift:"spans,1" db:"spans" json:"spans"` -} - -func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs { - return &AgentEmitZipkinBatchArgs{} -} - -func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span { - return p.Spans -} -func (p *AgentEmitZipkinBatchArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AgentEmitZipkinBatchArgs) ReadField1(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*zipkincore.Span, 0, size) - p.Spans = tSlice - for i := 0; i < size; i++ { - _elem4 := &zipkincore.Span{} - if err := _elem4.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) - } - p.Spans = append(p.Spans, _elem4) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *AgentEmitZipkinBatchArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("emitZipkinBatch_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AgentEmitZipkinBatchArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("spans", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Spans { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) - } - return err -} - -func (p *AgentEmitZipkinBatchArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p) -} - -// Attributes: -// - Batch -type AgentEmitBatchArgs struct { - Batch *jaeger.Batch `thrift:"batch,1" db:"batch" json:"batch"` -} - -func NewAgentEmitBatchArgs() *AgentEmitBatchArgs { - return &AgentEmitBatchArgs{} -} - -var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch - -func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch { - if !p.IsSetBatch() { - return AgentEmitBatchArgs_Batch_DEFAULT - } - return p.Batch -} -func (p *AgentEmitBatchArgs) IsSetBatch() bool { - return p.Batch != nil -} - -func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AgentEmitBatchArgs) ReadField1(iprot thrift.TProtocol) error { - p.Batch = &jaeger.Batch{} - if err := p.Batch.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err) - } - return nil -} - -func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("emitBatch_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) - } - if err := p.Batch.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) - } - return err -} - -func (p *AgentEmitBatchArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p) -} diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/aggregation_validator/GoUnusedProtection__.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/aggregation_validator/GoUnusedProtection__.go deleted file mode 100644 index fba7830f9..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/aggregation_validator/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package aggregation_validator - -var GoUnusedProtection__ int diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/aggregation_validator/aggregation_validator-consts.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/aggregation_validator/aggregation_validator-consts.go deleted file mode 100644 index f73e21730..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/aggregation_validator/aggregation_validator-consts.go +++ /dev/null @@ -1,23 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package aggregation_validator - -import ( - "bytes" - "context" - "fmt" - "reflect" - - "git.apache.org/thrift.git/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -func init() { -} diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/aggregation_validator/aggregation_validator-remote/aggregation_validator-remote.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/aggregation_validator/aggregation_validator-remote/aggregation_validator-remote.go deleted file mode 100755 index 114f96c2d..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/aggregation_validator/aggregation_validator-remote/aggregation_validator-remote.go +++ /dev/null @@ -1,139 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package main - -import ( - "context" - "flag" - "fmt" - "math" - "net" - "net/url" - "os" - "strconv" - "strings" - - "git.apache.org/thrift.git/lib/go/thrift" - "go.opencensus.io/exporter/jaeger/internal/gen-go/aggregation_validator" -) - -func Usage() { - fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:") - flag.PrintDefaults() - fmt.Fprintln(os.Stderr, "\nFunctions:") - fmt.Fprintln(os.Stderr, " ValidateTraceResponse validateTrace(string traceId)") - fmt.Fprintln(os.Stderr) - os.Exit(0) -} - -func main() { - flag.Usage = Usage - var host string - var port int - var protocol string - var urlString string - var framed bool - var useHttp bool - var parsedUrl *url.URL - var trans thrift.TTransport - _ = strconv.Atoi - _ = math.Abs - flag.Usage = Usage - flag.StringVar(&host, "h", "localhost", "Specify host and port") - flag.IntVar(&port, "p", 9090, "Specify port") - flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)") - flag.StringVar(&urlString, "u", "", "Specify the url") - flag.BoolVar(&framed, "framed", false, "Use framed transport") - flag.BoolVar(&useHttp, "http", false, "Use http") - flag.Parse() - - if len(urlString) > 0 { - var err error - parsedUrl, err = url.Parse(urlString) - if err != nil { - fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) - flag.Usage() - } - host = parsedUrl.Host - useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" - } else if useHttp { - _, err := url.Parse(fmt.Sprint("http://", host, ":", port)) - if err != nil { - fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) - flag.Usage() - } - } - - cmd := flag.Arg(0) - var err error - if useHttp { - trans, err = thrift.NewTHttpClient(parsedUrl.String()) - } else { - portStr := fmt.Sprint(port) - if strings.Contains(host, ":") { - host, portStr, err = net.SplitHostPort(host) - if err != nil { - fmt.Fprintln(os.Stderr, "error with host:", err) - os.Exit(1) - } - } - trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr)) - if err != nil { - fmt.Fprintln(os.Stderr, "error resolving address:", err) - os.Exit(1) - } - if framed { - trans = thrift.NewTFramedTransport(trans) - } - } - if err != nil { - fmt.Fprintln(os.Stderr, "Error creating transport", err) - os.Exit(1) - } - defer trans.Close() - var protocolFactory thrift.TProtocolFactory - switch protocol { - case "compact": - protocolFactory = thrift.NewTCompactProtocolFactory() - break - case "simplejson": - protocolFactory = thrift.NewTSimpleJSONProtocolFactory() - break - case "json": - protocolFactory = thrift.NewTJSONProtocolFactory() - break - case "binary", "": - protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() - break - default: - fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol) - Usage() - os.Exit(1) - } - iprot := protocolFactory.GetProtocol(trans) - oprot := protocolFactory.GetProtocol(trans) - client := aggregation_validator.NewAggregationValidatorClient(thrift.NewTStandardClient(iprot, oprot)) - if err := trans.Open(); err != nil { - fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err) - os.Exit(1) - } - - switch cmd { - case "validateTrace": - if flag.NArg()-1 != 1 { - fmt.Fprintln(os.Stderr, "ValidateTrace requires 1 args") - flag.Usage() - } - argvalue0 := flag.Arg(1) - value0 := argvalue0 - fmt.Print(client.ValidateTrace(context.Background(), value0)) - fmt.Print("\n") - break - case "": - Usage() - break - default: - fmt.Fprintln(os.Stderr, "Invalid function ", cmd) - } -} diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/aggregation_validator/aggregation_validator.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/aggregation_validator/aggregation_validator.go deleted file mode 100644 index 66c581e57..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/aggregation_validator/aggregation_validator.go +++ /dev/null @@ -1,521 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package aggregation_validator - -import ( - "bytes" - "context" - "fmt" - "reflect" - - "git.apache.org/thrift.git/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -// Attributes: -// - Ok -// - TraceCount -type ValidateTraceResponse struct { - Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"` - TraceCount int64 `thrift:"traceCount,2,required" db:"traceCount" json:"traceCount"` -} - -func NewValidateTraceResponse() *ValidateTraceResponse { - return &ValidateTraceResponse{} -} - -func (p *ValidateTraceResponse) GetOk() bool { - return p.Ok -} - -func (p *ValidateTraceResponse) GetTraceCount() int64 { - return p.TraceCount -} -func (p *ValidateTraceResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetOk bool = false - var issetTraceCount bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetOk = true - case 2: - if fieldTypeId == thrift.I64 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetTraceCount = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetOk { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set")) - } - if !issetTraceCount { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceCount is not set")) - } - return nil -} - -func (p *ValidateTraceResponse) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Ok = v - } - return nil -} - -func (p *ValidateTraceResponse) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.TraceCount = v - } - return nil -} - -func (p *ValidateTraceResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("ValidateTraceResponse"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ValidateTraceResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("ok", thrift.BOOL, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) - } - if err := oprot.WriteBool(bool(p.Ok)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) - } - return err -} - -func (p *ValidateTraceResponse) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("traceCount", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceCount: ", p), err) - } - if err := oprot.WriteI64(int64(p.TraceCount)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceCount (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceCount: ", p), err) - } - return err -} - -func (p *ValidateTraceResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ValidateTraceResponse(%+v)", *p) -} - -type AggregationValidator interface { - // Parameters: - // - TraceId - ValidateTrace(ctx context.Context, traceId string) (r *ValidateTraceResponse, err error) -} - -type AggregationValidatorClient struct { - c thrift.TClient -} - -// Deprecated: Use NewAggregationValidator instead -func NewAggregationValidatorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AggregationValidatorClient { - return &AggregationValidatorClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -// Deprecated: Use NewAggregationValidator instead -func NewAggregationValidatorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AggregationValidatorClient { - return &AggregationValidatorClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewAggregationValidatorClient(c thrift.TClient) *AggregationValidatorClient { - return &AggregationValidatorClient{ - c: c, - } -} - -// Parameters: -// - TraceId -func (p *AggregationValidatorClient) ValidateTrace(ctx context.Context, traceId string) (r *ValidateTraceResponse, err error) { - var _args0 AggregationValidatorValidateTraceArgs - _args0.TraceId = traceId - var _result1 AggregationValidatorValidateTraceResult - if err = p.c.Call(ctx, "validateTrace", &_args0, &_result1); err != nil { - return - } - return _result1.GetSuccess(), nil -} - -type AggregationValidatorProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler AggregationValidator -} - -func (p *AggregationValidatorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *AggregationValidatorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *AggregationValidatorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewAggregationValidatorProcessor(handler AggregationValidator) *AggregationValidatorProcessor { - - self2 := &AggregationValidatorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self2.processorMap["validateTrace"] = &aggregationValidatorProcessorValidateTrace{handler: handler} - return self2 -} - -func (p *AggregationValidatorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x3.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, x3 - -} - -type aggregationValidatorProcessorValidateTrace struct { - handler AggregationValidator -} - -func (p *aggregationValidatorProcessorValidateTrace) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AggregationValidatorValidateTraceArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("validateTrace", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := AggregationValidatorValidateTraceResult{} - var retval *ValidateTraceResponse - var err2 error - if retval, err2 = p.handler.ValidateTrace(ctx, args.TraceId); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing validateTrace: "+err2.Error()) - oprot.WriteMessageBegin("validateTrace", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("validateTrace", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - TraceId -type AggregationValidatorValidateTraceArgs struct { - TraceId string `thrift:"traceId,1,required" db:"traceId" json:"traceId"` -} - -func NewAggregationValidatorValidateTraceArgs() *AggregationValidatorValidateTraceArgs { - return &AggregationValidatorValidateTraceArgs{} -} - -func (p *AggregationValidatorValidateTraceArgs) GetTraceId() string { - return p.TraceId -} -func (p *AggregationValidatorValidateTraceArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTraceId bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetTraceId = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTraceId { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceId is not set")) - } - return nil -} - -func (p *AggregationValidatorValidateTraceArgs) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.TraceId = v - } - return nil -} - -func (p *AggregationValidatorValidateTraceArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("validateTrace_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AggregationValidatorValidateTraceArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("traceId", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceId: ", p), err) - } - if err := oprot.WriteString(string(p.TraceId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceId (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceId: ", p), err) - } - return err -} - -func (p *AggregationValidatorValidateTraceArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AggregationValidatorValidateTraceArgs(%+v)", *p) -} - -// Attributes: -// - Success -type AggregationValidatorValidateTraceResult struct { - Success *ValidateTraceResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewAggregationValidatorValidateTraceResult() *AggregationValidatorValidateTraceResult { - return &AggregationValidatorValidateTraceResult{} -} - -var AggregationValidatorValidateTraceResult_Success_DEFAULT *ValidateTraceResponse - -func (p *AggregationValidatorValidateTraceResult) GetSuccess() *ValidateTraceResponse { - if !p.IsSetSuccess() { - return AggregationValidatorValidateTraceResult_Success_DEFAULT - } - return p.Success -} -func (p *AggregationValidatorValidateTraceResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *AggregationValidatorValidateTraceResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AggregationValidatorValidateTraceResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &ValidateTraceResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *AggregationValidatorValidateTraceResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("validateTrace_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AggregationValidatorValidateTraceResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *AggregationValidatorValidateTraceResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AggregationValidatorValidateTraceResult(%+v)", *p) -} diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/baggage/GoUnusedProtection__.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/baggage/GoUnusedProtection__.go deleted file mode 100644 index a48540622..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/baggage/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package baggage - -var GoUnusedProtection__ int diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/baggage/baggage-consts.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/baggage/baggage-consts.go deleted file mode 100644 index 42ee62ddb..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/baggage/baggage-consts.go +++ /dev/null @@ -1,23 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package baggage - -import ( - "bytes" - "context" - "fmt" - "reflect" - - "git.apache.org/thrift.git/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -func init() { -} diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/baggage/baggage.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/baggage/baggage.go deleted file mode 100644 index e6a2f0435..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/baggage/baggage.go +++ /dev/null @@ -1,540 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package baggage - -import ( - "bytes" - "context" - "fmt" - "reflect" - - "git.apache.org/thrift.git/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -// Attributes: -// - BaggageKey -// - MaxValueLength -type BaggageRestriction struct { - BaggageKey string `thrift:"baggageKey,1,required" db:"baggageKey" json:"baggageKey"` - MaxValueLength int32 `thrift:"maxValueLength,2,required" db:"maxValueLength" json:"maxValueLength"` -} - -func NewBaggageRestriction() *BaggageRestriction { - return &BaggageRestriction{} -} - -func (p *BaggageRestriction) GetBaggageKey() string { - return p.BaggageKey -} - -func (p *BaggageRestriction) GetMaxValueLength() int32 { - return p.MaxValueLength -} -func (p *BaggageRestriction) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetBaggageKey bool = false - var issetMaxValueLength bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetBaggageKey = true - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetMaxValueLength = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetBaggageKey { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field BaggageKey is not set")) - } - if !issetMaxValueLength { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxValueLength is not set")) - } - return nil -} - -func (p *BaggageRestriction) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.BaggageKey = v - } - return nil -} - -func (p *BaggageRestriction) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.MaxValueLength = v - } - return nil -} - -func (p *BaggageRestriction) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("BaggageRestriction"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *BaggageRestriction) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("baggageKey", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:baggageKey: ", p), err) - } - if err := oprot.WriteString(string(p.BaggageKey)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.baggageKey (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:baggageKey: ", p), err) - } - return err -} - -func (p *BaggageRestriction) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("maxValueLength", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:maxValueLength: ", p), err) - } - if err := oprot.WriteI32(int32(p.MaxValueLength)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.maxValueLength (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:maxValueLength: ", p), err) - } - return err -} - -func (p *BaggageRestriction) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BaggageRestriction(%+v)", *p) -} - -type BaggageRestrictionManager interface { - // getBaggageRestrictions retrieves the baggage restrictions for a specific service. - // Usually, baggageRestrictions apply to all services however there may be situations - // where a baggageKey might only be allowed to be set by a specific service. - // - // Parameters: - // - ServiceName - GetBaggageRestrictions(ctx context.Context, serviceName string) (r []*BaggageRestriction, err error) -} - -type BaggageRestrictionManagerClient struct { - c thrift.TClient -} - -// Deprecated: Use NewBaggageRestrictionManager instead -func NewBaggageRestrictionManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *BaggageRestrictionManagerClient { - return &BaggageRestrictionManagerClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -// Deprecated: Use NewBaggageRestrictionManager instead -func NewBaggageRestrictionManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *BaggageRestrictionManagerClient { - return &BaggageRestrictionManagerClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewBaggageRestrictionManagerClient(c thrift.TClient) *BaggageRestrictionManagerClient { - return &BaggageRestrictionManagerClient{ - c: c, - } -} - -// getBaggageRestrictions retrieves the baggage restrictions for a specific service. -// Usually, baggageRestrictions apply to all services however there may be situations -// where a baggageKey might only be allowed to be set by a specific service. -// -// Parameters: -// - ServiceName -func (p *BaggageRestrictionManagerClient) GetBaggageRestrictions(ctx context.Context, serviceName string) (r []*BaggageRestriction, err error) { - var _args0 BaggageRestrictionManagerGetBaggageRestrictionsArgs - _args0.ServiceName = serviceName - var _result1 BaggageRestrictionManagerGetBaggageRestrictionsResult - if err = p.c.Call(ctx, "getBaggageRestrictions", &_args0, &_result1); err != nil { - return - } - return _result1.GetSuccess(), nil -} - -type BaggageRestrictionManagerProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler BaggageRestrictionManager -} - -func (p *BaggageRestrictionManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *BaggageRestrictionManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *BaggageRestrictionManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewBaggageRestrictionManagerProcessor(handler BaggageRestrictionManager) *BaggageRestrictionManagerProcessor { - - self2 := &BaggageRestrictionManagerProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self2.processorMap["getBaggageRestrictions"] = &baggageRestrictionManagerProcessorGetBaggageRestrictions{handler: handler} - return self2 -} - -func (p *BaggageRestrictionManagerProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x3.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, x3 - -} - -type baggageRestrictionManagerProcessorGetBaggageRestrictions struct { - handler BaggageRestrictionManager -} - -func (p *baggageRestrictionManagerProcessorGetBaggageRestrictions) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("getBaggageRestrictions", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := BaggageRestrictionManagerGetBaggageRestrictionsResult{} - var retval []*BaggageRestriction - var err2 error - if retval, err2 = p.handler.GetBaggageRestrictions(ctx, args.ServiceName); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getBaggageRestrictions: "+err2.Error()) - oprot.WriteMessageBegin("getBaggageRestrictions", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("getBaggageRestrictions", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - ServiceName -type BaggageRestrictionManagerGetBaggageRestrictionsArgs struct { - ServiceName string `thrift:"serviceName,1" db:"serviceName" json:"serviceName"` -} - -func NewBaggageRestrictionManagerGetBaggageRestrictionsArgs() *BaggageRestrictionManagerGetBaggageRestrictionsArgs { - return &BaggageRestrictionManagerGetBaggageRestrictionsArgs{} -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) GetServiceName() string { - return p.ServiceName -} -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.ServiceName = v - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("getBaggageRestrictions_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) - } - if err := oprot.WriteString(string(p.ServiceName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) - } - return err -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsArgs(%+v)", *p) -} - -// Attributes: -// - Success -type BaggageRestrictionManagerGetBaggageRestrictionsResult struct { - Success []*BaggageRestriction `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewBaggageRestrictionManagerGetBaggageRestrictionsResult() *BaggageRestrictionManagerGetBaggageRestrictionsResult { - return &BaggageRestrictionManagerGetBaggageRestrictionsResult{} -} - -var BaggageRestrictionManagerGetBaggageRestrictionsResult_Success_DEFAULT []*BaggageRestriction - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) GetSuccess() []*BaggageRestriction { - return p.Success -} -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.LIST { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) ReadField0(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*BaggageRestriction, 0, size) - p.Success = tSlice - for i := 0; i < size; i++ { - _elem4 := &BaggageRestriction{} - if err := _elem4.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) - } - p.Success = append(p.Success, _elem4) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("getBaggageRestrictions_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Success { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsResult(%+v)", *p) -} diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/baggage/baggage_restriction_manager-remote/baggage_restriction_manager-remote.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/baggage/baggage_restriction_manager-remote/baggage_restriction_manager-remote.go deleted file mode 100755 index 6ae764a2d..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/baggage/baggage_restriction_manager-remote/baggage_restriction_manager-remote.go +++ /dev/null @@ -1,139 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package main - -import ( - "context" - "flag" - "fmt" - "math" - "net" - "net/url" - "os" - "strconv" - "strings" - - "git.apache.org/thrift.git/lib/go/thrift" - "go.opencensus.io/exporter/jaeger/internal/gen-go/baggage" -) - -func Usage() { - fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:") - flag.PrintDefaults() - fmt.Fprintln(os.Stderr, "\nFunctions:") - fmt.Fprintln(os.Stderr, " getBaggageRestrictions(string serviceName)") - fmt.Fprintln(os.Stderr) - os.Exit(0) -} - -func main() { - flag.Usage = Usage - var host string - var port int - var protocol string - var urlString string - var framed bool - var useHttp bool - var parsedUrl *url.URL - var trans thrift.TTransport - _ = strconv.Atoi - _ = math.Abs - flag.Usage = Usage - flag.StringVar(&host, "h", "localhost", "Specify host and port") - flag.IntVar(&port, "p", 9090, "Specify port") - flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)") - flag.StringVar(&urlString, "u", "", "Specify the url") - flag.BoolVar(&framed, "framed", false, "Use framed transport") - flag.BoolVar(&useHttp, "http", false, "Use http") - flag.Parse() - - if len(urlString) > 0 { - var err error - parsedUrl, err = url.Parse(urlString) - if err != nil { - fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) - flag.Usage() - } - host = parsedUrl.Host - useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" - } else if useHttp { - _, err := url.Parse(fmt.Sprint("http://", host, ":", port)) - if err != nil { - fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) - flag.Usage() - } - } - - cmd := flag.Arg(0) - var err error - if useHttp { - trans, err = thrift.NewTHttpClient(parsedUrl.String()) - } else { - portStr := fmt.Sprint(port) - if strings.Contains(host, ":") { - host, portStr, err = net.SplitHostPort(host) - if err != nil { - fmt.Fprintln(os.Stderr, "error with host:", err) - os.Exit(1) - } - } - trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr)) - if err != nil { - fmt.Fprintln(os.Stderr, "error resolving address:", err) - os.Exit(1) - } - if framed { - trans = thrift.NewTFramedTransport(trans) - } - } - if err != nil { - fmt.Fprintln(os.Stderr, "Error creating transport", err) - os.Exit(1) - } - defer trans.Close() - var protocolFactory thrift.TProtocolFactory - switch protocol { - case "compact": - protocolFactory = thrift.NewTCompactProtocolFactory() - break - case "simplejson": - protocolFactory = thrift.NewTSimpleJSONProtocolFactory() - break - case "json": - protocolFactory = thrift.NewTJSONProtocolFactory() - break - case "binary", "": - protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() - break - default: - fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol) - Usage() - os.Exit(1) - } - iprot := protocolFactory.GetProtocol(trans) - oprot := protocolFactory.GetProtocol(trans) - client := baggage.NewBaggageRestrictionManagerClient(thrift.NewTStandardClient(iprot, oprot)) - if err := trans.Open(); err != nil { - fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err) - os.Exit(1) - } - - switch cmd { - case "getBaggageRestrictions": - if flag.NArg()-1 != 1 { - fmt.Fprintln(os.Stderr, "GetBaggageRestrictions requires 1 args") - flag.Usage() - } - argvalue0 := flag.Arg(1) - value0 := argvalue0 - fmt.Print(client.GetBaggageRestrictions(context.Background(), value0)) - fmt.Print("\n") - break - case "": - Usage() - break - default: - fmt.Fprintln(os.Stderr, "Invalid function ", cmd) - } -} diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/dependency/GoUnusedProtection__.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/dependency/GoUnusedProtection__.go deleted file mode 100644 index 894ec23e6..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/dependency/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package dependency - -var GoUnusedProtection__ int diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/dependency/dependency-consts.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/dependency/dependency-consts.go deleted file mode 100644 index 3297a29c2..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/dependency/dependency-consts.go +++ /dev/null @@ -1,23 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package dependency - -import ( - "bytes" - "context" - "fmt" - "reflect" - - "git.apache.org/thrift.git/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -func init() { -} diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/dependency/dependency-remote/dependency-remote.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/dependency/dependency-remote/dependency-remote.go deleted file mode 100755 index 2146158e8..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/dependency/dependency-remote/dependency-remote.go +++ /dev/null @@ -1,165 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package main - -import ( - "context" - "flag" - "fmt" - "math" - "net" - "net/url" - "os" - "strconv" - "strings" - - "git.apache.org/thrift.git/lib/go/thrift" - "go.opencensus.io/exporter/jaeger/internal/gen-go/dependency" -) - -func Usage() { - fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:") - flag.PrintDefaults() - fmt.Fprintln(os.Stderr, "\nFunctions:") - fmt.Fprintln(os.Stderr, " Dependencies getDependenciesForTrace(string traceId)") - fmt.Fprintln(os.Stderr, " void saveDependencies(Dependencies dependencies)") - fmt.Fprintln(os.Stderr) - os.Exit(0) -} - -func main() { - flag.Usage = Usage - var host string - var port int - var protocol string - var urlString string - var framed bool - var useHttp bool - var parsedUrl *url.URL - var trans thrift.TTransport - _ = strconv.Atoi - _ = math.Abs - flag.Usage = Usage - flag.StringVar(&host, "h", "localhost", "Specify host and port") - flag.IntVar(&port, "p", 9090, "Specify port") - flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)") - flag.StringVar(&urlString, "u", "", "Specify the url") - flag.BoolVar(&framed, "framed", false, "Use framed transport") - flag.BoolVar(&useHttp, "http", false, "Use http") - flag.Parse() - - if len(urlString) > 0 { - var err error - parsedUrl, err = url.Parse(urlString) - if err != nil { - fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) - flag.Usage() - } - host = parsedUrl.Host - useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" - } else if useHttp { - _, err := url.Parse(fmt.Sprint("http://", host, ":", port)) - if err != nil { - fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) - flag.Usage() - } - } - - cmd := flag.Arg(0) - var err error - if useHttp { - trans, err = thrift.NewTHttpClient(parsedUrl.String()) - } else { - portStr := fmt.Sprint(port) - if strings.Contains(host, ":") { - host, portStr, err = net.SplitHostPort(host) - if err != nil { - fmt.Fprintln(os.Stderr, "error with host:", err) - os.Exit(1) - } - } - trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr)) - if err != nil { - fmt.Fprintln(os.Stderr, "error resolving address:", err) - os.Exit(1) - } - if framed { - trans = thrift.NewTFramedTransport(trans) - } - } - if err != nil { - fmt.Fprintln(os.Stderr, "Error creating transport", err) - os.Exit(1) - } - defer trans.Close() - var protocolFactory thrift.TProtocolFactory - switch protocol { - case "compact": - protocolFactory = thrift.NewTCompactProtocolFactory() - break - case "simplejson": - protocolFactory = thrift.NewTSimpleJSONProtocolFactory() - break - case "json": - protocolFactory = thrift.NewTJSONProtocolFactory() - break - case "binary", "": - protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() - break - default: - fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol) - Usage() - os.Exit(1) - } - iprot := protocolFactory.GetProtocol(trans) - oprot := protocolFactory.GetProtocol(trans) - client := dependency.NewDependencyClient(thrift.NewTStandardClient(iprot, oprot)) - if err := trans.Open(); err != nil { - fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err) - os.Exit(1) - } - - switch cmd { - case "getDependenciesForTrace": - if flag.NArg()-1 != 1 { - fmt.Fprintln(os.Stderr, "GetDependenciesForTrace requires 1 args") - flag.Usage() - } - argvalue0 := flag.Arg(1) - value0 := argvalue0 - fmt.Print(client.GetDependenciesForTrace(context.Background(), value0)) - fmt.Print("\n") - break - case "saveDependencies": - if flag.NArg()-1 != 1 { - fmt.Fprintln(os.Stderr, "SaveDependencies requires 1 args") - flag.Usage() - } - arg7 := flag.Arg(1) - mbTrans8 := thrift.NewTMemoryBufferLen(len(arg7)) - defer mbTrans8.Close() - _, err9 := mbTrans8.WriteString(arg7) - if err9 != nil { - Usage() - return - } - factory10 := thrift.NewTSimpleJSONProtocolFactory() - jsProt11 := factory10.GetProtocol(mbTrans8) - argvalue0 := dependency.NewDependencies() - err12 := argvalue0.Read(jsProt11) - if err12 != nil { - Usage() - return - } - value0 := argvalue0 - fmt.Print(client.SaveDependencies(context.Background(), value0)) - fmt.Print("\n") - break - case "": - Usage() - break - default: - fmt.Fprintln(os.Stderr, "Invalid function ", cmd) - } -} diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/dependency/dependency.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/dependency/dependency.go deleted file mode 100644 index 43ba09c89..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/dependency/dependency.go +++ /dev/null @@ -1,833 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package dependency - -import ( - "bytes" - "context" - "fmt" - "reflect" - - "git.apache.org/thrift.git/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -// Attributes: -// - Parent -// - Child -// - CallCount -type DependencyLink struct { - Parent string `thrift:"parent,1,required" db:"parent" json:"parent"` - Child string `thrift:"child,2,required" db:"child" json:"child"` - // unused field # 3 - CallCount int64 `thrift:"callCount,4,required" db:"callCount" json:"callCount"` -} - -func NewDependencyLink() *DependencyLink { - return &DependencyLink{} -} - -func (p *DependencyLink) GetParent() string { - return p.Parent -} - -func (p *DependencyLink) GetChild() string { - return p.Child -} - -func (p *DependencyLink) GetCallCount() int64 { - return p.CallCount -} -func (p *DependencyLink) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetParent bool = false - var issetChild bool = false - var issetCallCount bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetParent = true - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetChild = true - case 4: - if fieldTypeId == thrift.I64 { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetCallCount = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetParent { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Parent is not set")) - } - if !issetChild { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Child is not set")) - } - if !issetCallCount { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field CallCount is not set")) - } - return nil -} - -func (p *DependencyLink) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Parent = v - } - return nil -} - -func (p *DependencyLink) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Child = v - } - return nil -} - -func (p *DependencyLink) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.CallCount = v - } - return nil -} - -func (p *DependencyLink) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("DependencyLink"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *DependencyLink) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("parent", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:parent: ", p), err) - } - if err := oprot.WriteString(string(p.Parent)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.parent (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:parent: ", p), err) - } - return err -} - -func (p *DependencyLink) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("child", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:child: ", p), err) - } - if err := oprot.WriteString(string(p.Child)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.child (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:child: ", p), err) - } - return err -} - -func (p *DependencyLink) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("callCount", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:callCount: ", p), err) - } - if err := oprot.WriteI64(int64(p.CallCount)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.callCount (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:callCount: ", p), err) - } - return err -} - -func (p *DependencyLink) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("DependencyLink(%+v)", *p) -} - -// Attributes: -// - Links -type Dependencies struct { - Links []*DependencyLink `thrift:"links,1,required" db:"links" json:"links"` -} - -func NewDependencies() *Dependencies { - return &Dependencies{} -} - -func (p *Dependencies) GetLinks() []*DependencyLink { - return p.Links -} -func (p *Dependencies) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetLinks bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetLinks = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetLinks { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Links is not set")) - } - return nil -} - -func (p *Dependencies) ReadField1(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*DependencyLink, 0, size) - p.Links = tSlice - for i := 0; i < size; i++ { - _elem0 := &DependencyLink{} - if err := _elem0.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) - } - p.Links = append(p.Links, _elem0) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Dependencies) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Dependencies"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Dependencies) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("links", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:links: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Links)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Links { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:links: ", p), err) - } - return err -} - -func (p *Dependencies) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Dependencies(%+v)", *p) -} - -type Dependency interface { - // Parameters: - // - TraceId - GetDependenciesForTrace(ctx context.Context, traceId string) (r *Dependencies, err error) - // Parameters: - // - Dependencies - SaveDependencies(ctx context.Context, dependencies *Dependencies) (err error) -} - -type DependencyClient struct { - c thrift.TClient -} - -// Deprecated: Use NewDependency instead -func NewDependencyClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *DependencyClient { - return &DependencyClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -// Deprecated: Use NewDependency instead -func NewDependencyClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *DependencyClient { - return &DependencyClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewDependencyClient(c thrift.TClient) *DependencyClient { - return &DependencyClient{ - c: c, - } -} - -// Parameters: -// - TraceId -func (p *DependencyClient) GetDependenciesForTrace(ctx context.Context, traceId string) (r *Dependencies, err error) { - var _args1 DependencyGetDependenciesForTraceArgs - _args1.TraceId = traceId - var _result2 DependencyGetDependenciesForTraceResult - if err = p.c.Call(ctx, "getDependenciesForTrace", &_args1, &_result2); err != nil { - return - } - return _result2.GetSuccess(), nil -} - -// Parameters: -// - Dependencies -func (p *DependencyClient) SaveDependencies(ctx context.Context, dependencies *Dependencies) (err error) { - var _args3 DependencySaveDependenciesArgs - _args3.Dependencies = dependencies - if err := p.c.Call(ctx, "saveDependencies", &_args3, nil); err != nil { - return err - } - return nil -} - -type DependencyProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler Dependency -} - -func (p *DependencyProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *DependencyProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *DependencyProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewDependencyProcessor(handler Dependency) *DependencyProcessor { - - self4 := &DependencyProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self4.processorMap["getDependenciesForTrace"] = &dependencyProcessorGetDependenciesForTrace{handler: handler} - self4.processorMap["saveDependencies"] = &dependencyProcessorSaveDependencies{handler: handler} - return self4 -} - -func (p *DependencyProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x5 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x5.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, x5 - -} - -type dependencyProcessorGetDependenciesForTrace struct { - handler Dependency -} - -func (p *dependencyProcessorGetDependenciesForTrace) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := DependencyGetDependenciesForTraceArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("getDependenciesForTrace", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := DependencyGetDependenciesForTraceResult{} - var retval *Dependencies - var err2 error - if retval, err2 = p.handler.GetDependenciesForTrace(ctx, args.TraceId); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getDependenciesForTrace: "+err2.Error()) - oprot.WriteMessageBegin("getDependenciesForTrace", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("getDependenciesForTrace", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type dependencyProcessorSaveDependencies struct { - handler Dependency -} - -func (p *dependencyProcessorSaveDependencies) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := DependencySaveDependenciesArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - return false, err - } - - iprot.ReadMessageEnd() - var err2 error - if err2 = p.handler.SaveDependencies(ctx, args.Dependencies); err2 != nil { - return true, err2 - } - return true, nil -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - TraceId -type DependencyGetDependenciesForTraceArgs struct { - TraceId string `thrift:"traceId,1,required" db:"traceId" json:"traceId"` -} - -func NewDependencyGetDependenciesForTraceArgs() *DependencyGetDependenciesForTraceArgs { - return &DependencyGetDependenciesForTraceArgs{} -} - -func (p *DependencyGetDependenciesForTraceArgs) GetTraceId() string { - return p.TraceId -} -func (p *DependencyGetDependenciesForTraceArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTraceId bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetTraceId = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTraceId { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceId is not set")) - } - return nil -} - -func (p *DependencyGetDependenciesForTraceArgs) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.TraceId = v - } - return nil -} - -func (p *DependencyGetDependenciesForTraceArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("getDependenciesForTrace_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *DependencyGetDependenciesForTraceArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("traceId", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceId: ", p), err) - } - if err := oprot.WriteString(string(p.TraceId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceId (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceId: ", p), err) - } - return err -} - -func (p *DependencyGetDependenciesForTraceArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("DependencyGetDependenciesForTraceArgs(%+v)", *p) -} - -// Attributes: -// - Success -type DependencyGetDependenciesForTraceResult struct { - Success *Dependencies `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewDependencyGetDependenciesForTraceResult() *DependencyGetDependenciesForTraceResult { - return &DependencyGetDependenciesForTraceResult{} -} - -var DependencyGetDependenciesForTraceResult_Success_DEFAULT *Dependencies - -func (p *DependencyGetDependenciesForTraceResult) GetSuccess() *Dependencies { - if !p.IsSetSuccess() { - return DependencyGetDependenciesForTraceResult_Success_DEFAULT - } - return p.Success -} -func (p *DependencyGetDependenciesForTraceResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *DependencyGetDependenciesForTraceResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *DependencyGetDependenciesForTraceResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &Dependencies{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *DependencyGetDependenciesForTraceResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("getDependenciesForTrace_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *DependencyGetDependenciesForTraceResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *DependencyGetDependenciesForTraceResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("DependencyGetDependenciesForTraceResult(%+v)", *p) -} - -// Attributes: -// - Dependencies -type DependencySaveDependenciesArgs struct { - Dependencies *Dependencies `thrift:"dependencies,1" db:"dependencies" json:"dependencies"` -} - -func NewDependencySaveDependenciesArgs() *DependencySaveDependenciesArgs { - return &DependencySaveDependenciesArgs{} -} - -var DependencySaveDependenciesArgs_Dependencies_DEFAULT *Dependencies - -func (p *DependencySaveDependenciesArgs) GetDependencies() *Dependencies { - if !p.IsSetDependencies() { - return DependencySaveDependenciesArgs_Dependencies_DEFAULT - } - return p.Dependencies -} -func (p *DependencySaveDependenciesArgs) IsSetDependencies() bool { - return p.Dependencies != nil -} - -func (p *DependencySaveDependenciesArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *DependencySaveDependenciesArgs) ReadField1(iprot thrift.TProtocol) error { - p.Dependencies = &Dependencies{} - if err := p.Dependencies.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Dependencies), err) - } - return nil -} - -func (p *DependencySaveDependenciesArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("saveDependencies_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *DependencySaveDependenciesArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("dependencies", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:dependencies: ", p), err) - } - if err := p.Dependencies.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Dependencies), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:dependencies: ", p), err) - } - return err -} - -func (p *DependencySaveDependenciesArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("DependencySaveDependenciesArgs(%+v)", *p) -} diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/sampling/GoUnusedProtection__.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/sampling/GoUnusedProtection__.go deleted file mode 100644 index 9be720327..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/sampling/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package sampling - -var GoUnusedProtection__ int diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/sampling/sampling-consts.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/sampling/sampling-consts.go deleted file mode 100644 index de29e6334..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/sampling/sampling-consts.go +++ /dev/null @@ -1,23 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package sampling - -import ( - "bytes" - "context" - "fmt" - "reflect" - - "git.apache.org/thrift.git/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -func init() { -} diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/sampling/sampling.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/sampling/sampling.go deleted file mode 100644 index 3dc224332..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/sampling/sampling.go +++ /dev/null @@ -1,1320 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package sampling - -import ( - "bytes" - "context" - "database/sql/driver" - "errors" - "fmt" - "reflect" - - "git.apache.org/thrift.git/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -type SamplingStrategyType int64 - -const ( - SamplingStrategyType_PROBABILISTIC SamplingStrategyType = 0 - SamplingStrategyType_RATE_LIMITING SamplingStrategyType = 1 -) - -func (p SamplingStrategyType) String() string { - switch p { - case SamplingStrategyType_PROBABILISTIC: - return "PROBABILISTIC" - case SamplingStrategyType_RATE_LIMITING: - return "RATE_LIMITING" - } - return "" -} - -func SamplingStrategyTypeFromString(s string) (SamplingStrategyType, error) { - switch s { - case "PROBABILISTIC": - return SamplingStrategyType_PROBABILISTIC, nil - case "RATE_LIMITING": - return SamplingStrategyType_RATE_LIMITING, nil - } - return SamplingStrategyType(0), fmt.Errorf("not a valid SamplingStrategyType string") -} - -func SamplingStrategyTypePtr(v SamplingStrategyType) *SamplingStrategyType { return &v } - -func (p SamplingStrategyType) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *SamplingStrategyType) UnmarshalText(text []byte) error { - q, err := SamplingStrategyTypeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -func (p *SamplingStrategyType) Scan(value interface{}) error { - v, ok := value.(int64) - if !ok { - return errors.New("Scan value is not int64") - } - *p = SamplingStrategyType(v) - return nil -} - -func (p *SamplingStrategyType) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } - return int64(*p), nil -} - -// Attributes: -// - SamplingRate -type ProbabilisticSamplingStrategy struct { - SamplingRate float64 `thrift:"samplingRate,1,required" db:"samplingRate" json:"samplingRate"` -} - -func NewProbabilisticSamplingStrategy() *ProbabilisticSamplingStrategy { - return &ProbabilisticSamplingStrategy{} -} - -func (p *ProbabilisticSamplingStrategy) GetSamplingRate() float64 { - return p.SamplingRate -} -func (p *ProbabilisticSamplingStrategy) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetSamplingRate bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.DOUBLE { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetSamplingRate = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetSamplingRate { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SamplingRate is not set")) - } - return nil -} - -func (p *ProbabilisticSamplingStrategy) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.SamplingRate = v - } - return nil -} - -func (p *ProbabilisticSamplingStrategy) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("ProbabilisticSamplingStrategy"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ProbabilisticSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("samplingRate", thrift.DOUBLE, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:samplingRate: ", p), err) - } - if err := oprot.WriteDouble(float64(p.SamplingRate)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.samplingRate (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:samplingRate: ", p), err) - } - return err -} - -func (p *ProbabilisticSamplingStrategy) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ProbabilisticSamplingStrategy(%+v)", *p) -} - -// Attributes: -// - MaxTracesPerSecond -type RateLimitingSamplingStrategy struct { - MaxTracesPerSecond int16 `thrift:"maxTracesPerSecond,1,required" db:"maxTracesPerSecond" json:"maxTracesPerSecond"` -} - -func NewRateLimitingSamplingStrategy() *RateLimitingSamplingStrategy { - return &RateLimitingSamplingStrategy{} -} - -func (p *RateLimitingSamplingStrategy) GetMaxTracesPerSecond() int16 { - return p.MaxTracesPerSecond -} -func (p *RateLimitingSamplingStrategy) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetMaxTracesPerSecond bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I16 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetMaxTracesPerSecond = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetMaxTracesPerSecond { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxTracesPerSecond is not set")) - } - return nil -} - -func (p *RateLimitingSamplingStrategy) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI16(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.MaxTracesPerSecond = v - } - return nil -} - -func (p *RateLimitingSamplingStrategy) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("RateLimitingSamplingStrategy"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RateLimitingSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("maxTracesPerSecond", thrift.I16, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:maxTracesPerSecond: ", p), err) - } - if err := oprot.WriteI16(int16(p.MaxTracesPerSecond)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.maxTracesPerSecond (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:maxTracesPerSecond: ", p), err) - } - return err -} - -func (p *RateLimitingSamplingStrategy) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RateLimitingSamplingStrategy(%+v)", *p) -} - -// Attributes: -// - Operation -// - ProbabilisticSampling -type OperationSamplingStrategy struct { - Operation string `thrift:"operation,1,required" db:"operation" json:"operation"` - ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2,required" db:"probabilisticSampling" json:"probabilisticSampling"` -} - -func NewOperationSamplingStrategy() *OperationSamplingStrategy { - return &OperationSamplingStrategy{} -} - -func (p *OperationSamplingStrategy) GetOperation() string { - return p.Operation -} - -var OperationSamplingStrategy_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy - -func (p *OperationSamplingStrategy) GetProbabilisticSampling() *ProbabilisticSamplingStrategy { - if !p.IsSetProbabilisticSampling() { - return OperationSamplingStrategy_ProbabilisticSampling_DEFAULT - } - return p.ProbabilisticSampling -} -func (p *OperationSamplingStrategy) IsSetProbabilisticSampling() bool { - return p.ProbabilisticSampling != nil -} - -func (p *OperationSamplingStrategy) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetOperation bool = false - var issetProbabilisticSampling bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetOperation = true - case 2: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetProbabilisticSampling = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetOperation { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Operation is not set")) - } - if !issetProbabilisticSampling { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ProbabilisticSampling is not set")) - } - return nil -} - -func (p *OperationSamplingStrategy) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Operation = v - } - return nil -} - -func (p *OperationSamplingStrategy) ReadField2(iprot thrift.TProtocol) error { - p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{} - if err := p.ProbabilisticSampling.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err) - } - return nil -} - -func (p *OperationSamplingStrategy) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("OperationSamplingStrategy"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *OperationSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("operation", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operation: ", p), err) - } - if err := oprot.WriteString(string(p.Operation)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.operation (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operation: ", p), err) - } - return err -} - -func (p *OperationSamplingStrategy) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("probabilisticSampling", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err) - } - if err := p.ProbabilisticSampling.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err) - } - return err -} - -func (p *OperationSamplingStrategy) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("OperationSamplingStrategy(%+v)", *p) -} - -// Attributes: -// - DefaultSamplingProbability -// - DefaultLowerBoundTracesPerSecond -// - PerOperationStrategies -// - DefaultUpperBoundTracesPerSecond -type PerOperationSamplingStrategies struct { - DefaultSamplingProbability float64 `thrift:"defaultSamplingProbability,1,required" db:"defaultSamplingProbability" json:"defaultSamplingProbability"` - DefaultLowerBoundTracesPerSecond float64 `thrift:"defaultLowerBoundTracesPerSecond,2,required" db:"defaultLowerBoundTracesPerSecond" json:"defaultLowerBoundTracesPerSecond"` - PerOperationStrategies []*OperationSamplingStrategy `thrift:"perOperationStrategies,3,required" db:"perOperationStrategies" json:"perOperationStrategies"` - DefaultUpperBoundTracesPerSecond *float64 `thrift:"defaultUpperBoundTracesPerSecond,4" db:"defaultUpperBoundTracesPerSecond" json:"defaultUpperBoundTracesPerSecond,omitempty"` -} - -func NewPerOperationSamplingStrategies() *PerOperationSamplingStrategies { - return &PerOperationSamplingStrategies{} -} - -func (p *PerOperationSamplingStrategies) GetDefaultSamplingProbability() float64 { - return p.DefaultSamplingProbability -} - -func (p *PerOperationSamplingStrategies) GetDefaultLowerBoundTracesPerSecond() float64 { - return p.DefaultLowerBoundTracesPerSecond -} - -func (p *PerOperationSamplingStrategies) GetPerOperationStrategies() []*OperationSamplingStrategy { - return p.PerOperationStrategies -} - -var PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT float64 - -func (p *PerOperationSamplingStrategies) GetDefaultUpperBoundTracesPerSecond() float64 { - if !p.IsSetDefaultUpperBoundTracesPerSecond() { - return PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT - } - return *p.DefaultUpperBoundTracesPerSecond -} -func (p *PerOperationSamplingStrategies) IsSetDefaultUpperBoundTracesPerSecond() bool { - return p.DefaultUpperBoundTracesPerSecond != nil -} - -func (p *PerOperationSamplingStrategies) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetDefaultSamplingProbability bool = false - var issetDefaultLowerBoundTracesPerSecond bool = false - var issetPerOperationStrategies bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.DOUBLE { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetDefaultSamplingProbability = true - case 2: - if fieldTypeId == thrift.DOUBLE { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetDefaultLowerBoundTracesPerSecond = true - case 3: - if fieldTypeId == thrift.LIST { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetPerOperationStrategies = true - case 4: - if fieldTypeId == thrift.DOUBLE { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetDefaultSamplingProbability { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultSamplingProbability is not set")) - } - if !issetDefaultLowerBoundTracesPerSecond { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultLowerBoundTracesPerSecond is not set")) - } - if !issetPerOperationStrategies { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PerOperationStrategies is not set")) - } - return nil -} - -func (p *PerOperationSamplingStrategies) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.DefaultSamplingProbability = v - } - return nil -} - -func (p *PerOperationSamplingStrategies) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.DefaultLowerBoundTracesPerSecond = v - } - return nil -} - -func (p *PerOperationSamplingStrategies) ReadField3(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*OperationSamplingStrategy, 0, size) - p.PerOperationStrategies = tSlice - for i := 0; i < size; i++ { - _elem0 := &OperationSamplingStrategy{} - if err := _elem0.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) - } - p.PerOperationStrategies = append(p.PerOperationStrategies, _elem0) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *PerOperationSamplingStrategies) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.DefaultUpperBoundTracesPerSecond = &v - } - return nil -} - -func (p *PerOperationSamplingStrategies) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("PerOperationSamplingStrategies"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *PerOperationSamplingStrategies) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("defaultSamplingProbability", thrift.DOUBLE, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:defaultSamplingProbability: ", p), err) - } - if err := oprot.WriteDouble(float64(p.DefaultSamplingProbability)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.defaultSamplingProbability (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:defaultSamplingProbability: ", p), err) - } - return err -} - -func (p *PerOperationSamplingStrategies) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("defaultLowerBoundTracesPerSecond", thrift.DOUBLE, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:defaultLowerBoundTracesPerSecond: ", p), err) - } - if err := oprot.WriteDouble(float64(p.DefaultLowerBoundTracesPerSecond)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.defaultLowerBoundTracesPerSecond (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:defaultLowerBoundTracesPerSecond: ", p), err) - } - return err -} - -func (p *PerOperationSamplingStrategies) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("perOperationStrategies", thrift.LIST, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:perOperationStrategies: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.PerOperationStrategies)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.PerOperationStrategies { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:perOperationStrategies: ", p), err) - } - return err -} - -func (p *PerOperationSamplingStrategies) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetDefaultUpperBoundTracesPerSecond() { - if err := oprot.WriteFieldBegin("defaultUpperBoundTracesPerSecond", thrift.DOUBLE, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:defaultUpperBoundTracesPerSecond: ", p), err) - } - if err := oprot.WriteDouble(float64(*p.DefaultUpperBoundTracesPerSecond)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.defaultUpperBoundTracesPerSecond (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:defaultUpperBoundTracesPerSecond: ", p), err) - } - } - return err -} - -func (p *PerOperationSamplingStrategies) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("PerOperationSamplingStrategies(%+v)", *p) -} - -// Attributes: -// - StrategyType -// - ProbabilisticSampling -// - RateLimitingSampling -// - OperationSampling -type SamplingStrategyResponse struct { - StrategyType SamplingStrategyType `thrift:"strategyType,1,required" db:"strategyType" json:"strategyType"` - ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2" db:"probabilisticSampling" json:"probabilisticSampling,omitempty"` - RateLimitingSampling *RateLimitingSamplingStrategy `thrift:"rateLimitingSampling,3" db:"rateLimitingSampling" json:"rateLimitingSampling,omitempty"` - OperationSampling *PerOperationSamplingStrategies `thrift:"operationSampling,4" db:"operationSampling" json:"operationSampling,omitempty"` -} - -func NewSamplingStrategyResponse() *SamplingStrategyResponse { - return &SamplingStrategyResponse{} -} - -func (p *SamplingStrategyResponse) GetStrategyType() SamplingStrategyType { - return p.StrategyType -} - -var SamplingStrategyResponse_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy - -func (p *SamplingStrategyResponse) GetProbabilisticSampling() *ProbabilisticSamplingStrategy { - if !p.IsSetProbabilisticSampling() { - return SamplingStrategyResponse_ProbabilisticSampling_DEFAULT - } - return p.ProbabilisticSampling -} - -var SamplingStrategyResponse_RateLimitingSampling_DEFAULT *RateLimitingSamplingStrategy - -func (p *SamplingStrategyResponse) GetRateLimitingSampling() *RateLimitingSamplingStrategy { - if !p.IsSetRateLimitingSampling() { - return SamplingStrategyResponse_RateLimitingSampling_DEFAULT - } - return p.RateLimitingSampling -} - -var SamplingStrategyResponse_OperationSampling_DEFAULT *PerOperationSamplingStrategies - -func (p *SamplingStrategyResponse) GetOperationSampling() *PerOperationSamplingStrategies { - if !p.IsSetOperationSampling() { - return SamplingStrategyResponse_OperationSampling_DEFAULT - } - return p.OperationSampling -} -func (p *SamplingStrategyResponse) IsSetProbabilisticSampling() bool { - return p.ProbabilisticSampling != nil -} - -func (p *SamplingStrategyResponse) IsSetRateLimitingSampling() bool { - return p.RateLimitingSampling != nil -} - -func (p *SamplingStrategyResponse) IsSetOperationSampling() bool { - return p.OperationSampling != nil -} - -func (p *SamplingStrategyResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetStrategyType bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetStrategyType = true - case 2: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetStrategyType { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StrategyType is not set")) - } - return nil -} - -func (p *SamplingStrategyResponse) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - temp := SamplingStrategyType(v) - p.StrategyType = temp - } - return nil -} - -func (p *SamplingStrategyResponse) ReadField2(iprot thrift.TProtocol) error { - p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{} - if err := p.ProbabilisticSampling.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err) - } - return nil -} - -func (p *SamplingStrategyResponse) ReadField3(iprot thrift.TProtocol) error { - p.RateLimitingSampling = &RateLimitingSamplingStrategy{} - if err := p.RateLimitingSampling.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.RateLimitingSampling), err) - } - return nil -} - -func (p *SamplingStrategyResponse) ReadField4(iprot thrift.TProtocol) error { - p.OperationSampling = &PerOperationSamplingStrategies{} - if err := p.OperationSampling.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationSampling), err) - } - return nil -} - -func (p *SamplingStrategyResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("SamplingStrategyResponse"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *SamplingStrategyResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("strategyType", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:strategyType: ", p), err) - } - if err := oprot.WriteI32(int32(p.StrategyType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.strategyType (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:strategyType: ", p), err) - } - return err -} - -func (p *SamplingStrategyResponse) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetProbabilisticSampling() { - if err := oprot.WriteFieldBegin("probabilisticSampling", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err) - } - if err := p.ProbabilisticSampling.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err) - } - } - return err -} - -func (p *SamplingStrategyResponse) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetRateLimitingSampling() { - if err := oprot.WriteFieldBegin("rateLimitingSampling", thrift.STRUCT, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:rateLimitingSampling: ", p), err) - } - if err := p.RateLimitingSampling.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.RateLimitingSampling), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:rateLimitingSampling: ", p), err) - } - } - return err -} - -func (p *SamplingStrategyResponse) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetOperationSampling() { - if err := oprot.WriteFieldBegin("operationSampling", thrift.STRUCT, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:operationSampling: ", p), err) - } - if err := p.OperationSampling.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationSampling), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:operationSampling: ", p), err) - } - } - return err -} - -func (p *SamplingStrategyResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SamplingStrategyResponse(%+v)", *p) -} - -type SamplingManager interface { - // Parameters: - // - ServiceName - GetSamplingStrategy(ctx context.Context, serviceName string) (r *SamplingStrategyResponse, err error) -} - -type SamplingManagerClient struct { - c thrift.TClient -} - -// Deprecated: Use NewSamplingManager instead -func NewSamplingManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *SamplingManagerClient { - return &SamplingManagerClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -// Deprecated: Use NewSamplingManager instead -func NewSamplingManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *SamplingManagerClient { - return &SamplingManagerClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewSamplingManagerClient(c thrift.TClient) *SamplingManagerClient { - return &SamplingManagerClient{ - c: c, - } -} - -// Parameters: -// - ServiceName -func (p *SamplingManagerClient) GetSamplingStrategy(ctx context.Context, serviceName string) (r *SamplingStrategyResponse, err error) { - var _args1 SamplingManagerGetSamplingStrategyArgs - _args1.ServiceName = serviceName - var _result2 SamplingManagerGetSamplingStrategyResult - if err = p.c.Call(ctx, "getSamplingStrategy", &_args1, &_result2); err != nil { - return - } - return _result2.GetSuccess(), nil -} - -type SamplingManagerProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler SamplingManager -} - -func (p *SamplingManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *SamplingManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *SamplingManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewSamplingManagerProcessor(handler SamplingManager) *SamplingManagerProcessor { - - self3 := &SamplingManagerProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self3.processorMap["getSamplingStrategy"] = &samplingManagerProcessorGetSamplingStrategy{handler: handler} - return self3 -} - -func (p *SamplingManagerProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x4 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x4.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, x4 - -} - -type samplingManagerProcessorGetSamplingStrategy struct { - handler SamplingManager -} - -func (p *samplingManagerProcessorGetSamplingStrategy) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := SamplingManagerGetSamplingStrategyArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("getSamplingStrategy", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := SamplingManagerGetSamplingStrategyResult{} - var retval *SamplingStrategyResponse - var err2 error - if retval, err2 = p.handler.GetSamplingStrategy(ctx, args.ServiceName); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getSamplingStrategy: "+err2.Error()) - oprot.WriteMessageBegin("getSamplingStrategy", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("getSamplingStrategy", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - ServiceName -type SamplingManagerGetSamplingStrategyArgs struct { - ServiceName string `thrift:"serviceName,1" db:"serviceName" json:"serviceName"` -} - -func NewSamplingManagerGetSamplingStrategyArgs() *SamplingManagerGetSamplingStrategyArgs { - return &SamplingManagerGetSamplingStrategyArgs{} -} - -func (p *SamplingManagerGetSamplingStrategyArgs) GetServiceName() string { - return p.ServiceName -} -func (p *SamplingManagerGetSamplingStrategyArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *SamplingManagerGetSamplingStrategyArgs) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.ServiceName = v - } - return nil -} - -func (p *SamplingManagerGetSamplingStrategyArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("getSamplingStrategy_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *SamplingManagerGetSamplingStrategyArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) - } - if err := oprot.WriteString(string(p.ServiceName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) - } - return err -} - -func (p *SamplingManagerGetSamplingStrategyArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SamplingManagerGetSamplingStrategyArgs(%+v)", *p) -} - -// Attributes: -// - Success -type SamplingManagerGetSamplingStrategyResult struct { - Success *SamplingStrategyResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewSamplingManagerGetSamplingStrategyResult() *SamplingManagerGetSamplingStrategyResult { - return &SamplingManagerGetSamplingStrategyResult{} -} - -var SamplingManagerGetSamplingStrategyResult_Success_DEFAULT *SamplingStrategyResponse - -func (p *SamplingManagerGetSamplingStrategyResult) GetSuccess() *SamplingStrategyResponse { - if !p.IsSetSuccess() { - return SamplingManagerGetSamplingStrategyResult_Success_DEFAULT - } - return p.Success -} -func (p *SamplingManagerGetSamplingStrategyResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *SamplingManagerGetSamplingStrategyResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *SamplingManagerGetSamplingStrategyResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &SamplingStrategyResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *SamplingManagerGetSamplingStrategyResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("getSamplingStrategy_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *SamplingManagerGetSamplingStrategyResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *SamplingManagerGetSamplingStrategyResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SamplingManagerGetSamplingStrategyResult(%+v)", *p) -} diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/sampling/sampling_manager-remote/sampling_manager-remote.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/sampling/sampling_manager-remote/sampling_manager-remote.go deleted file mode 100755 index d71cea2fc..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/sampling/sampling_manager-remote/sampling_manager-remote.go +++ /dev/null @@ -1,139 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package main - -import ( - "context" - "flag" - "fmt" - "math" - "net" - "net/url" - "os" - "strconv" - "strings" - - "git.apache.org/thrift.git/lib/go/thrift" - "go.opencensus.io/exporter/jaeger/internal/gen-go/sampling" -) - -func Usage() { - fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:") - flag.PrintDefaults() - fmt.Fprintln(os.Stderr, "\nFunctions:") - fmt.Fprintln(os.Stderr, " SamplingStrategyResponse getSamplingStrategy(string serviceName)") - fmt.Fprintln(os.Stderr) - os.Exit(0) -} - -func main() { - flag.Usage = Usage - var host string - var port int - var protocol string - var urlString string - var framed bool - var useHttp bool - var parsedUrl *url.URL - var trans thrift.TTransport - _ = strconv.Atoi - _ = math.Abs - flag.Usage = Usage - flag.StringVar(&host, "h", "localhost", "Specify host and port") - flag.IntVar(&port, "p", 9090, "Specify port") - flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)") - flag.StringVar(&urlString, "u", "", "Specify the url") - flag.BoolVar(&framed, "framed", false, "Use framed transport") - flag.BoolVar(&useHttp, "http", false, "Use http") - flag.Parse() - - if len(urlString) > 0 { - var err error - parsedUrl, err = url.Parse(urlString) - if err != nil { - fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) - flag.Usage() - } - host = parsedUrl.Host - useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" - } else if useHttp { - _, err := url.Parse(fmt.Sprint("http://", host, ":", port)) - if err != nil { - fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) - flag.Usage() - } - } - - cmd := flag.Arg(0) - var err error - if useHttp { - trans, err = thrift.NewTHttpClient(parsedUrl.String()) - } else { - portStr := fmt.Sprint(port) - if strings.Contains(host, ":") { - host, portStr, err = net.SplitHostPort(host) - if err != nil { - fmt.Fprintln(os.Stderr, "error with host:", err) - os.Exit(1) - } - } - trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr)) - if err != nil { - fmt.Fprintln(os.Stderr, "error resolving address:", err) - os.Exit(1) - } - if framed { - trans = thrift.NewTFramedTransport(trans) - } - } - if err != nil { - fmt.Fprintln(os.Stderr, "Error creating transport", err) - os.Exit(1) - } - defer trans.Close() - var protocolFactory thrift.TProtocolFactory - switch protocol { - case "compact": - protocolFactory = thrift.NewTCompactProtocolFactory() - break - case "simplejson": - protocolFactory = thrift.NewTSimpleJSONProtocolFactory() - break - case "json": - protocolFactory = thrift.NewTJSONProtocolFactory() - break - case "binary", "": - protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() - break - default: - fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol) - Usage() - os.Exit(1) - } - iprot := protocolFactory.GetProtocol(trans) - oprot := protocolFactory.GetProtocol(trans) - client := sampling.NewSamplingManagerClient(thrift.NewTStandardClient(iprot, oprot)) - if err := trans.Open(); err != nil { - fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err) - os.Exit(1) - } - - switch cmd { - case "getSamplingStrategy": - if flag.NArg()-1 != 1 { - fmt.Fprintln(os.Stderr, "GetSamplingStrategy requires 1 args") - flag.Usage() - } - argvalue0 := flag.Arg(1) - value0 := argvalue0 - fmt.Print(client.GetSamplingStrategy(context.Background(), value0)) - fmt.Print("\n") - break - case "": - Usage() - break - default: - fmt.Fprintln(os.Stderr, "Invalid function ", cmd) - } -} diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go deleted file mode 100644 index c5df9b690..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package zipkincore - -var GoUnusedProtection__ int diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/zipkincore/zipkin_collector-remote/zipkin_collector-remote.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/zipkincore/zipkin_collector-remote/zipkin_collector-remote.go deleted file mode 100755 index 76fe670fb..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/zipkincore/zipkin_collector-remote/zipkin_collector-remote.go +++ /dev/null @@ -1,155 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package main - -import ( - "context" - "flag" - "fmt" - "math" - "net" - "net/url" - "os" - "strconv" - "strings" - - "git.apache.org/thrift.git/lib/go/thrift" - "go.opencensus.io/exporter/jaeger/internal/gen-go/zipkincore" -) - -func Usage() { - fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:") - flag.PrintDefaults() - fmt.Fprintln(os.Stderr, "\nFunctions:") - fmt.Fprintln(os.Stderr, " submitZipkinBatch( spans)") - fmt.Fprintln(os.Stderr) - os.Exit(0) -} - -func main() { - flag.Usage = Usage - var host string - var port int - var protocol string - var urlString string - var framed bool - var useHttp bool - var parsedUrl *url.URL - var trans thrift.TTransport - _ = strconv.Atoi - _ = math.Abs - flag.Usage = Usage - flag.StringVar(&host, "h", "localhost", "Specify host and port") - flag.IntVar(&port, "p", 9090, "Specify port") - flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)") - flag.StringVar(&urlString, "u", "", "Specify the url") - flag.BoolVar(&framed, "framed", false, "Use framed transport") - flag.BoolVar(&useHttp, "http", false, "Use http") - flag.Parse() - - if len(urlString) > 0 { - var err error - parsedUrl, err = url.Parse(urlString) - if err != nil { - fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) - flag.Usage() - } - host = parsedUrl.Host - useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" - } else if useHttp { - _, err := url.Parse(fmt.Sprint("http://", host, ":", port)) - if err != nil { - fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) - flag.Usage() - } - } - - cmd := flag.Arg(0) - var err error - if useHttp { - trans, err = thrift.NewTHttpClient(parsedUrl.String()) - } else { - portStr := fmt.Sprint(port) - if strings.Contains(host, ":") { - host, portStr, err = net.SplitHostPort(host) - if err != nil { - fmt.Fprintln(os.Stderr, "error with host:", err) - os.Exit(1) - } - } - trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr)) - if err != nil { - fmt.Fprintln(os.Stderr, "error resolving address:", err) - os.Exit(1) - } - if framed { - trans = thrift.NewTFramedTransport(trans) - } - } - if err != nil { - fmt.Fprintln(os.Stderr, "Error creating transport", err) - os.Exit(1) - } - defer trans.Close() - var protocolFactory thrift.TProtocolFactory - switch protocol { - case "compact": - protocolFactory = thrift.NewTCompactProtocolFactory() - break - case "simplejson": - protocolFactory = thrift.NewTSimpleJSONProtocolFactory() - break - case "json": - protocolFactory = thrift.NewTJSONProtocolFactory() - break - case "binary", "": - protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() - break - default: - fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol) - Usage() - os.Exit(1) - } - iprot := protocolFactory.GetProtocol(trans) - oprot := protocolFactory.GetProtocol(trans) - client := zipkincore.NewZipkinCollectorClient(thrift.NewTStandardClient(iprot, oprot)) - if err := trans.Open(); err != nil { - fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err) - os.Exit(1) - } - - switch cmd { - case "submitZipkinBatch": - if flag.NArg()-1 != 1 { - fmt.Fprintln(os.Stderr, "SubmitZipkinBatch requires 1 args") - flag.Usage() - } - arg8 := flag.Arg(1) - mbTrans9 := thrift.NewTMemoryBufferLen(len(arg8)) - defer mbTrans9.Close() - _, err10 := mbTrans9.WriteString(arg8) - if err10 != nil { - Usage() - return - } - factory11 := thrift.NewTSimpleJSONProtocolFactory() - jsProt12 := factory11.GetProtocol(mbTrans9) - containerStruct0 := zipkincore.NewZipkinCollectorSubmitZipkinBatchArgs() - err13 := containerStruct0.ReadField1(jsProt12) - if err13 != nil { - Usage() - return - } - argvalue0 := containerStruct0.Spans - value0 := argvalue0 - fmt.Print(client.SubmitZipkinBatch(context.Background(), value0)) - fmt.Print("\n") - break - case "": - Usage() - break - default: - fmt.Fprintln(os.Stderr, "Invalid function ", cmd) - } -} diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go deleted file mode 100644 index 7b06b0d4c..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go +++ /dev/null @@ -1,37 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package zipkincore - -import ( - "bytes" - "context" - "fmt" - "reflect" - - "git.apache.org/thrift.git/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -const CLIENT_SEND = "cs" -const CLIENT_RECV = "cr" -const SERVER_SEND = "ss" -const SERVER_RECV = "sr" -const WIRE_SEND = "ws" -const WIRE_RECV = "wr" -const CLIENT_SEND_FRAGMENT = "csf" -const CLIENT_RECV_FRAGMENT = "crf" -const SERVER_SEND_FRAGMENT = "ssf" -const SERVER_RECV_FRAGMENT = "srf" -const LOCAL_COMPONENT = "lc" -const CLIENT_ADDR = "ca" -const SERVER_ADDR = "sa" - -func init() { -} diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/zipkincore/zipkincore.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/zipkincore/zipkincore.go deleted file mode 100644 index 2f8bdd8ab..000000000 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/zipkincore/zipkincore.go +++ /dev/null @@ -1,1778 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package zipkincore - -import ( - "bytes" - "context" - "database/sql/driver" - "errors" - "fmt" - "reflect" - - "git.apache.org/thrift.git/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -type AnnotationType int64 - -const ( - AnnotationType_BOOL AnnotationType = 0 - AnnotationType_BYTES AnnotationType = 1 - AnnotationType_I16 AnnotationType = 2 - AnnotationType_I32 AnnotationType = 3 - AnnotationType_I64 AnnotationType = 4 - AnnotationType_DOUBLE AnnotationType = 5 - AnnotationType_STRING AnnotationType = 6 -) - -func (p AnnotationType) String() string { - switch p { - case AnnotationType_BOOL: - return "BOOL" - case AnnotationType_BYTES: - return "BYTES" - case AnnotationType_I16: - return "I16" - case AnnotationType_I32: - return "I32" - case AnnotationType_I64: - return "I64" - case AnnotationType_DOUBLE: - return "DOUBLE" - case AnnotationType_STRING: - return "STRING" - } - return "" -} - -func AnnotationTypeFromString(s string) (AnnotationType, error) { - switch s { - case "BOOL": - return AnnotationType_BOOL, nil - case "BYTES": - return AnnotationType_BYTES, nil - case "I16": - return AnnotationType_I16, nil - case "I32": - return AnnotationType_I32, nil - case "I64": - return AnnotationType_I64, nil - case "DOUBLE": - return AnnotationType_DOUBLE, nil - case "STRING": - return AnnotationType_STRING, nil - } - return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string") -} - -func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v } - -func (p AnnotationType) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *AnnotationType) UnmarshalText(text []byte) error { - q, err := AnnotationTypeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -func (p *AnnotationType) Scan(value interface{}) error { - v, ok := value.(int64) - if !ok { - return errors.New("Scan value is not int64") - } - *p = AnnotationType(v) - return nil -} - -func (p *AnnotationType) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } - return int64(*p), nil -} - -// Indicates the network context of a service recording an annotation with two -// exceptions. -// -// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR, -// the endpoint indicates the source or destination of an RPC. This exception -// allows zipkin to display network context of uninstrumented services, or -// clients such as web browsers. -// -// Attributes: -// - Ipv4: IPv4 host address packed into 4 bytes. -// -// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4 -// - Port: IPv4 port -// -// Note: this is to be treated as an unsigned integer, so watch for negatives. -// -// Conventionally, when the port isn't known, port = 0. -// - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web" -// -// Conventionally, when the service name isn't known, service_name = "unknown". -type Endpoint struct { - Ipv4 int32 `thrift:"ipv4,1" db:"ipv4" json:"ipv4"` - Port int16 `thrift:"port,2" db:"port" json:"port"` - ServiceName string `thrift:"service_name,3" db:"service_name" json:"service_name"` -} - -func NewEndpoint() *Endpoint { - return &Endpoint{} -} - -func (p *Endpoint) GetIpv4() int32 { - return p.Ipv4 -} - -func (p *Endpoint) GetPort() int16 { - return p.Port -} - -func (p *Endpoint) GetServiceName() string { - return p.ServiceName -} -func (p *Endpoint) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I16 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Endpoint) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Ipv4 = v - } - return nil -} - -func (p *Endpoint) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI16(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Port = v - } - return nil -} - -func (p *Endpoint) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.ServiceName = v - } - return nil -} - -func (p *Endpoint) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Endpoint"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Endpoint) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("ipv4", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err) - } - if err := oprot.WriteI32(int32(p.Ipv4)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err) - } - return err -} - -func (p *Endpoint) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("port", thrift.I16, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err) - } - if err := oprot.WriteI16(int16(p.Port)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err) - } - return err -} - -func (p *Endpoint) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("service_name", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err) - } - if err := oprot.WriteString(string(p.ServiceName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err) - } - return err -} - -func (p *Endpoint) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Endpoint(%+v)", *p) -} - -// An annotation is similar to a log statement. It includes a host field which -// allows these events to be attributed properly, and also aggregatable. -// -// Attributes: -// - Timestamp: Microseconds from epoch. -// -// This value should use the most precise value possible. For example, -// gettimeofday or syncing nanoTime against a tick of currentTimeMillis. -// - Value -// - Host: Always the host that recorded the event. By specifying the host you allow -// rollup of all events (such as client requests to a service) by IP address. -type Annotation struct { - Timestamp int64 `thrift:"timestamp,1" db:"timestamp" json:"timestamp"` - Value string `thrift:"value,2" db:"value" json:"value"` - Host *Endpoint `thrift:"host,3" db:"host" json:"host,omitempty"` -} - -func NewAnnotation() *Annotation { - return &Annotation{} -} - -func (p *Annotation) GetTimestamp() int64 { - return p.Timestamp -} - -func (p *Annotation) GetValue() string { - return p.Value -} - -var Annotation_Host_DEFAULT *Endpoint - -func (p *Annotation) GetHost() *Endpoint { - if !p.IsSetHost() { - return Annotation_Host_DEFAULT - } - return p.Host -} -func (p *Annotation) IsSetHost() bool { - return p.Host != nil -} - -func (p *Annotation) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Annotation) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Timestamp = v - } - return nil -} - -func (p *Annotation) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Value = v - } - return nil -} - -func (p *Annotation) ReadField3(iprot thrift.TProtocol) error { - p.Host = &Endpoint{} - if err := p.Host.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) - } - return nil -} - -func (p *Annotation) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Annotation"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Annotation) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) - } - if err := oprot.WriteI64(int64(p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) - } - return err -} - -func (p *Annotation) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) - } - if err := oprot.WriteString(string(p.Value)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) - } - return err -} - -func (p *Annotation) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetHost() { - if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err) - } - if err := p.Host.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err) - } - } - return err -} - -func (p *Annotation) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Annotation(%+v)", *p) -} - -// Binary annotations are tags applied to a Span to give it context. For -// example, a binary annotation of "http.uri" could the path to a resource in a -// RPC call. -// -// Binary annotations of type STRING are always queryable, though more a -// historical implementation detail than a structural concern. -// -// Binary annotations can repeat, and vary on the host. Similar to Annotation, -// the host indicates who logged the event. This allows you to tell the -// difference between the client and server side of the same key. For example, -// the key "http.uri" might be different on the client and server side due to -// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field, -// you can see the different points of view, which often help in debugging. -// -// Attributes: -// - Key -// - Value -// - AnnotationType -// - Host: The host that recorded tag, which allows you to differentiate between -// multiple tags with the same key. There are two exceptions to this. -// -// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or -// destination of an RPC. This exception allows zipkin to display network -// context of uninstrumented services, or clients such as web browsers. -type BinaryAnnotation struct { - Key string `thrift:"key,1" db:"key" json:"key"` - Value []byte `thrift:"value,2" db:"value" json:"value"` - AnnotationType AnnotationType `thrift:"annotation_type,3" db:"annotation_type" json:"annotation_type"` - Host *Endpoint `thrift:"host,4" db:"host" json:"host,omitempty"` -} - -func NewBinaryAnnotation() *BinaryAnnotation { - return &BinaryAnnotation{} -} - -func (p *BinaryAnnotation) GetKey() string { - return p.Key -} - -func (p *BinaryAnnotation) GetValue() []byte { - return p.Value -} - -func (p *BinaryAnnotation) GetAnnotationType() AnnotationType { - return p.AnnotationType -} - -var BinaryAnnotation_Host_DEFAULT *Endpoint - -func (p *BinaryAnnotation) GetHost() *Endpoint { - if !p.IsSetHost() { - return BinaryAnnotation_Host_DEFAULT - } - return p.Host -} -func (p *BinaryAnnotation) IsSetHost() bool { - return p.Host != nil -} - -func (p *BinaryAnnotation) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *BinaryAnnotation) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Key = v - } - return nil -} - -func (p *BinaryAnnotation) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Value = v - } - return nil -} - -func (p *BinaryAnnotation) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - temp := AnnotationType(v) - p.AnnotationType = temp - } - return nil -} - -func (p *BinaryAnnotation) ReadField4(iprot thrift.TProtocol) error { - p.Host = &Endpoint{} - if err := p.Host.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) - } - return nil -} - -func (p *BinaryAnnotation) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("BinaryAnnotation"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *BinaryAnnotation) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) - } - if err := oprot.WriteString(string(p.Key)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) - } - return err -} - -func (p *BinaryAnnotation) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) - } - if err := oprot.WriteBinary(p.Value); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) - } - return err -} - -func (p *BinaryAnnotation) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("annotation_type", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err) - } - if err := oprot.WriteI32(int32(p.AnnotationType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err) - } - return err -} - -func (p *BinaryAnnotation) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetHost() { - if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err) - } - if err := p.Host.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err) - } - } - return err -} - -func (p *BinaryAnnotation) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BinaryAnnotation(%+v)", *p) -} - -// A trace is a series of spans (often RPC calls) which form a latency tree. -// -// The root span is where trace_id = id and parent_id = Nil. The root span is -// usually the longest interval in the trace, starting with a SERVER_RECV -// annotation and ending with a SERVER_SEND. -// -// Attributes: -// - TraceID -// - Name: Span name in lowercase, rpc method for example -// -// Conventionally, when the span name isn't known, name = "unknown". -// - ID -// - ParentID -// - Annotations -// - BinaryAnnotations -// - Debug -// - Timestamp: Microseconds from epoch of the creation of this span. -// -// This value should be set directly by instrumentation, using the most -// precise value possible. For example, gettimeofday or syncing nanoTime -// against a tick of currentTimeMillis. -// -// For compatibilty with instrumentation that precede this field, collectors -// or span stores can derive this via Annotation.timestamp. -// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp. -// -// This field is optional for compatibility with old data: first-party span -// stores are expected to support this at time of introduction. -// - Duration: Measurement of duration in microseconds, used to support queries. -// -// This value should be set directly, where possible. Doing so encourages -// precise measurement decoupled from problems of clocks, such as skew or NTP -// updates causing time to move backwards. -// -// For compatibilty with instrumentation that precede this field, collectors -// or span stores can derive this by subtracting Annotation.timestamp. -// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp. -// -// If this field is persisted as unset, zipkin will continue to work, except -// duration query support will be implementation-specific. Similarly, setting -// this field non-atomically is implementation-specific. -// -// This field is i64 vs i32 to support spans longer than 35 minutes. -type Span struct { - TraceID int64 `thrift:"trace_id,1" db:"trace_id" json:"trace_id"` - // unused field # 2 - Name string `thrift:"name,3" db:"name" json:"name"` - ID int64 `thrift:"id,4" db:"id" json:"id"` - ParentID *int64 `thrift:"parent_id,5" db:"parent_id" json:"parent_id,omitempty"` - Annotations []*Annotation `thrift:"annotations,6" db:"annotations" json:"annotations"` - // unused field # 7 - BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" db:"binary_annotations" json:"binary_annotations"` - Debug bool `thrift:"debug,9" db:"debug" json:"debug,omitempty"` - Timestamp *int64 `thrift:"timestamp,10" db:"timestamp" json:"timestamp,omitempty"` - Duration *int64 `thrift:"duration,11" db:"duration" json:"duration,omitempty"` -} - -func NewSpan() *Span { - return &Span{} -} - -func (p *Span) GetTraceID() int64 { - return p.TraceID -} - -func (p *Span) GetName() string { - return p.Name -} - -func (p *Span) GetID() int64 { - return p.ID -} - -var Span_ParentID_DEFAULT int64 - -func (p *Span) GetParentID() int64 { - if !p.IsSetParentID() { - return Span_ParentID_DEFAULT - } - return *p.ParentID -} - -func (p *Span) GetAnnotations() []*Annotation { - return p.Annotations -} - -func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation { - return p.BinaryAnnotations -} - -var Span_Debug_DEFAULT bool = false - -func (p *Span) GetDebug() bool { - return p.Debug -} - -var Span_Timestamp_DEFAULT int64 - -func (p *Span) GetTimestamp() int64 { - if !p.IsSetTimestamp() { - return Span_Timestamp_DEFAULT - } - return *p.Timestamp -} - -var Span_Duration_DEFAULT int64 - -func (p *Span) GetDuration() int64 { - if !p.IsSetDuration() { - return Span_Duration_DEFAULT - } - return *p.Duration -} -func (p *Span) IsSetParentID() bool { - return p.ParentID != nil -} - -func (p *Span) IsSetDebug() bool { - return p.Debug != Span_Debug_DEFAULT -} - -func (p *Span) IsSetTimestamp() bool { - return p.Timestamp != nil -} - -func (p *Span) IsSetDuration() bool { - return p.Duration != nil -} - -func (p *Span) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I64 { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.I64 { - if err := p.ReadField5(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.LIST { - if err := p.ReadField6(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 8: - if fieldTypeId == thrift.LIST { - if err := p.ReadField8(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 9: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField9(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 10: - if fieldTypeId == thrift.I64 { - if err := p.ReadField10(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 11: - if fieldTypeId == thrift.I64 { - if err := p.ReadField11(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Span) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.TraceID = v - } - return nil -} - -func (p *Span) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.Name = v - } - return nil -} - -func (p *Span) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.ID = v - } - return nil -} - -func (p *Span) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.ParentID = &v - } - return nil -} - -func (p *Span) ReadField6(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Annotation, 0, size) - p.Annotations = tSlice - for i := 0; i < size; i++ { - _elem0 := &Annotation{} - if err := _elem0.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) - } - p.Annotations = append(p.Annotations, _elem0) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) ReadField8(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*BinaryAnnotation, 0, size) - p.BinaryAnnotations = tSlice - for i := 0; i < size; i++ { - _elem1 := &BinaryAnnotation{} - if err := _elem1.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) - } - p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 9: ", err) - } else { - p.Debug = v - } - return nil -} - -func (p *Span) ReadField10(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 10: ", err) - } else { - p.Timestamp = &v - } - return nil -} - -func (p *Span) ReadField11(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 11: ", err) - } else { - p.Duration = &v - } - return nil -} - -func (p *Span) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Span"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - if err := p.writeField8(oprot); err != nil { - return err - } - if err := p.writeField9(oprot); err != nil { - return err - } - if err := p.writeField10(oprot); err != nil { - return err - } - if err := p.writeField11(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Span) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("trace_id", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err) - } - if err := oprot.WriteI64(int64(p.TraceID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err) - } - return err -} - -func (p *Span) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("name", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err) - } - if err := oprot.WriteString(string(p.Name)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err) - } - return err -} - -func (p *Span) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("id", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err) - } - if err := oprot.WriteI64(int64(p.ID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err) - } - return err -} - -func (p *Span) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetParentID() { - if err := oprot.WriteFieldBegin("parent_id", thrift.I64, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err) - } - if err := oprot.WriteI64(int64(*p.ParentID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err) - } - } - return err -} - -func (p *Span) writeField6(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("annotations", thrift.LIST, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Annotations)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Annotations { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err) - } - return err -} - -func (p *Span) writeField8(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("binary_annotations", thrift.LIST, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.BinaryAnnotations)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.BinaryAnnotations { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err) - } - return err -} - -func (p *Span) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetDebug() { - if err := oprot.WriteFieldBegin("debug", thrift.BOOL, 9); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err) - } - if err := oprot.WriteBool(bool(p.Debug)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err) - } - } - return err -} - -func (p *Span) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetTimestamp() { - if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 10); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err) - } - if err := oprot.WriteI64(int64(*p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err) - } - } - return err -} - -func (p *Span) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetDuration() { - if err := oprot.WriteFieldBegin("duration", thrift.I64, 11); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err) - } - if err := oprot.WriteI64(int64(*p.Duration)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err) - } - } - return err -} - -func (p *Span) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Span(%+v)", *p) -} - -// Attributes: -// - Ok -type Response struct { - Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"` -} - -func NewResponse() *Response { - return &Response{} -} - -func (p *Response) GetOk() bool { - return p.Ok -} -func (p *Response) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetOk bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetOk = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetOk { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set")) - } - return nil -} - -func (p *Response) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Ok = v - } - return nil -} - -func (p *Response) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Response) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("ok", thrift.BOOL, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) - } - if err := oprot.WriteBool(bool(p.Ok)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) - } - return err -} - -func (p *Response) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Response(%+v)", *p) -} - -type ZipkinCollector interface { - // Parameters: - // - Spans - SubmitZipkinBatch(ctx context.Context, spans []*Span) (r []*Response, err error) -} - -type ZipkinCollectorClient struct { - c thrift.TClient -} - -// Deprecated: Use NewZipkinCollector instead -func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient { - return &ZipkinCollectorClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -// Deprecated: Use NewZipkinCollector instead -func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient { - return &ZipkinCollectorClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewZipkinCollectorClient(c thrift.TClient) *ZipkinCollectorClient { - return &ZipkinCollectorClient{ - c: c, - } -} - -// Parameters: -// - Spans -func (p *ZipkinCollectorClient) SubmitZipkinBatch(ctx context.Context, spans []*Span) (r []*Response, err error) { - var _args2 ZipkinCollectorSubmitZipkinBatchArgs - _args2.Spans = spans - var _result3 ZipkinCollectorSubmitZipkinBatchResult - if err = p.c.Call(ctx, "submitZipkinBatch", &_args2, &_result3); err != nil { - return - } - return _result3.GetSuccess(), nil -} - -type ZipkinCollectorProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler ZipkinCollector -} - -func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor { - - self4 := &ZipkinCollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self4.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler: handler} - return self4 -} - -func (p *ZipkinCollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x5 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x5.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, x5 - -} - -type zipkinCollectorProcessorSubmitZipkinBatch struct { - handler ZipkinCollector -} - -func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := ZipkinCollectorSubmitZipkinBatchArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("submitZipkinBatch", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := ZipkinCollectorSubmitZipkinBatchResult{} - var retval []*Response - var err2 error - if retval, err2 = p.handler.SubmitZipkinBatch(ctx, args.Spans); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: "+err2.Error()) - oprot.WriteMessageBegin("submitZipkinBatch", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("submitZipkinBatch", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Spans -type ZipkinCollectorSubmitZipkinBatchArgs struct { - Spans []*Span `thrift:"spans,1" db:"spans" json:"spans"` -} - -func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs { - return &ZipkinCollectorSubmitZipkinBatchArgs{} -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span { - return p.Spans -} -func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) ReadField1(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Span, 0, size) - p.Spans = tSlice - for i := 0; i < size; i++ { - _elem6 := &Span{} - if err := _elem6.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem6), err) - } - p.Spans = append(p.Spans, _elem6) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("submitZipkinBatch_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("spans", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Spans { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) - } - return err -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p) -} - -// Attributes: -// - Success -type ZipkinCollectorSubmitZipkinBatchResult struct { - Success []*Response `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult { - return &ZipkinCollectorSubmitZipkinBatchResult{} -} - -var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response - -func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response { - return p.Success -} -func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.LIST { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) ReadField0(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Response, 0, size) - p.Success = tSlice - for i := 0; i < size; i++ { - _elem7 := &Response{} - if err := _elem7.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem7), err) - } - p.Success = append(p.Success, _elem7) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("submitZipkinBatch_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Success { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p) -} diff --git a/vendor/go.opencensus.io/exporter/prometheus/prometheus.go b/vendor/go.opencensus.io/exporter/prometheus/prometheus.go index 57ba5e8da..ab616331a 100644 --- a/vendor/go.opencensus.io/exporter/prometheus/prometheus.go +++ b/vendor/go.opencensus.io/exporter/prometheus/prometheus.go @@ -12,8 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package prometheus contains the Prometheus exporters for -// Stackdriver Monitoring. +// Package prometheus contains a Prometheus exporter. // // Please note that this exporter is currently work in progress and not complete. package prometheus // import "go.opencensus.io/exporter/prometheus" @@ -51,6 +50,7 @@ type Exporter struct { // Options contains options for configuring the exporter. type Options struct { Namespace string + Registry *prometheus.Registry OnError func(err error) } @@ -74,13 +74,15 @@ func newExporter(o Options) (*Exporter, error) { if o.Namespace == "" { o.Namespace = defaultNamespace } - reg := prometheus.NewRegistry() - collector := newCollector(o, reg) + if o.Registry == nil { + o.Registry = prometheus.NewRegistry() + } + collector := newCollector(o, o.Registry) e := &Exporter{ opts: o, - g: reg, + g: o.Registry, c: collector, - handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), + handler: promhttp.HandlerFor(o.Registry, promhttp.HandlerOpts{}), } return e, nil } @@ -99,8 +101,8 @@ func (c *collector) registerViews(views ...*view.View) { if !ok { desc := prometheus.NewDesc( viewName(c.opts.Namespace, view), - view.Description(), - tagKeysToLabels(view.TagKeys()), + view.Description, + tagKeysToLabels(view.TagKeys), nil, ) c.registeredViewsMu.Lock() @@ -228,7 +230,7 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { } func (c *collector) toMetric(desc *prometheus.Desc, v *view.View, row *view.Row) (prometheus.Metric, error) { - switch agg := v.Aggregation().(type) { + switch agg := v.Aggregation.(type) { case view.CountAggregation: data := row.Data.(*view.CountData) return prometheus.NewConstMetric(desc, prometheus.CounterValue, float64(*data), tagValues(row.Tags)...) @@ -250,7 +252,7 @@ func (c *collector) toMetric(desc *prometheus.Desc, v *view.View, row *view.Row) return prometheus.NewConstMetric(desc, prometheus.UntypedValue, float64(*data), tagValues(row.Tags)...) default: - return nil, fmt.Errorf("aggregation %T is not yet supported", v.Aggregation()) + return nil, fmt.Errorf("aggregation %T is not yet supported", v.Aggregation) } } @@ -287,13 +289,13 @@ func tagValues(t []tag.Tag) []string { } func viewName(namespace string, v *view.View) string { - return namespace + "_" + internal.Sanitize(v.Name()) + return namespace + "_" + internal.Sanitize(v.Name) } func viewSignature(namespace string, v *view.View) string { var buf bytes.Buffer buf.WriteString(viewName(namespace, v)) - for _, k := range v.TagKeys() { + for _, k := range v.TagKeys { buf.WriteString("-" + k.Name()) } return buf.String() diff --git a/vendor/go.opencensus.io/exporter/prometheus/prometheus_test.go b/vendor/go.opencensus.io/exporter/prometheus/prometheus_test.go index e96de88ad..b42b99503 100644 --- a/vendor/go.opencensus.io/exporter/prometheus/prometheus_test.go +++ b/vendor/go.opencensus.io/exporter/prometheus/prometheus_test.go @@ -15,20 +15,35 @@ package prometheus import ( + "context" + "fmt" + "io/ioutil" + "log" + "net/http" + "net/http/httptest" + "strings" "sync" "testing" "time" "go.opencensus.io/stats" "go.opencensus.io/stats/view" + "go.opencensus.io/tag" "github.com/prometheus/client_golang/prometheus" ) -func newView(agg view.Aggregation) *view.View { - m, _ := stats.Int64("tests/foo1", "bytes", "byte") - view, _ := view.New("foo", "bar", nil, m, agg) - return view +func newView(measureName string, agg view.Aggregation) *view.View { + m, err := stats.Int64(measureName, "bytes", stats.UnitBytes) + if err != nil { + log.Fatal(err) + } + return &view.View{ + Name: "foo", + Description: "bar", + Measure: m, + Aggregation: agg, + } } func TestOnlyCumulativeWindowSupported(t *testing.T) { @@ -44,13 +59,13 @@ func TestOnlyCumulativeWindowSupported(t *testing.T) { }{ 0: { vds: &view.Data{ - View: newView(view.CountAggregation{}), + View: newView("TestOnlyCumulativeWindowSupported/m1", view.CountAggregation{}), }, want: 0, // no rows present }, 1: { vds: &view.Data{ - View: newView(view.CountAggregation{}), + View: newView("TestOnlyCumulativeWindowSupported/m2", view.CountAggregation{}), Rows: []*view.Row{ {Data: &count1}, }, @@ -59,7 +74,7 @@ func TestOnlyCumulativeWindowSupported(t *testing.T) { }, 2: { vds: &view.Data{ - View: newView(view.MeanAggregation{}), + View: newView("TestOnlyCumulativeWindowSupported/m3", view.MeanAggregation{}), Rows: []*view.Row{ {Data: &mean1}, }, @@ -131,8 +146,8 @@ func TestCollectNonRacy(t *testing.T) { count1 := view.CountData(1) mean1 := &view.MeanData{Mean: 4.5, Count: 5} vds := []*view.Data{ - {View: newView(view.MeanAggregation{}), Rows: []*view.Row{{Data: mean1}}}, - {View: newView(view.CountAggregation{}), Rows: []*view.Row{{Data: &count1}}}, + {View: newView(fmt.Sprintf("TestCollectNonRacy/m1-%d", i), view.MeanAggregation{}), Rows: []*view.Row{{Data: mean1}}}, + {View: newView(fmt.Sprintf("TestCollectNonRacy/m2-%d", i), view.CountAggregation{}), Rows: []*view.Row{{Data: &count1}}}, } for _, v := range vds { exp.ExportView(v) @@ -174,3 +189,102 @@ func TestCollectNonRacy(t *testing.T) { } }() } + +type mCreator struct { + m *stats.Int64Measure + err error +} + +type mSlice []*stats.Int64Measure + +func (mc *mCreator) createAndAppend(measures *mSlice, name, desc, unit string) { + mc.m, mc.err = stats.Int64(name, desc, unit) + *measures = append(*measures, mc.m) +} + +type vCreator struct { + v *view.View + err error +} + +func (vc *vCreator) createAndSubscribe(name, description string, keys []tag.Key, measure stats.Measure, agg view.Aggregation) { + vc.v, vc.err = view.New(name, description, keys, measure, agg) + if err := vc.v.Subscribe(); err != nil { + vc.err = err + } +} + +func TestMetricsEndpointOutput(t *testing.T) { + exporter, err := newExporter(Options{}) + if err != nil { + t.Fatalf("failed to create prometheus exporter: %v", err) + } + view.RegisterExporter(exporter) + + names := []string{"foo", "bar", "baz"} + + measures := make(mSlice, 0) + mc := &mCreator{} + for _, name := range names { + mc.createAndAppend(&measures, "tests/"+name, name, "") + } + if mc.err != nil { + t.Errorf("failed to create measures: %v", err) + } + + vc := &vCreator{} + for _, m := range measures { + vc.createAndSubscribe(m.Name(), m.Description(), nil, m, view.CountAggregation{}) + } + if vc.err != nil { + t.Fatalf("failed to create views: %v", err) + } + view.SetReportingPeriod(time.Millisecond) + + for _, m := range measures { + stats.Record(context.Background(), m.M(1)) + } + + srv := httptest.NewServer(exporter) + defer srv.Close() + + var i int + var output string + for { + if i == 10000 { + t.Fatal("no output at /metrics (10s wait)") + } + i++ + + resp, err := http.Get(srv.URL) + if err != nil { + t.Fatalf("failed to get /metrics: %v", err) + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read body: %v", err) + } + resp.Body.Close() + + output = string(body) + if output != "" { + break + } + time.Sleep(time.Millisecond) + } + + if strings.Contains(output, "collected before with the same name and label values") { + t.Fatal("metric name and labels being duplicated but must be unique") + } + + if strings.Contains(output, "error(s) occurred") { + t.Fatal("error reported by prometheus registry") + } + + for _, name := range names { + if !strings.Contains(output, "opencensus_tests_"+name+" 1") { + t.Fatalf("measurement missing in output: %v", name) + } + } +} diff --git a/vendor/go.opencensus.io/exporter/stackdriver/example_test.go b/vendor/go.opencensus.io/exporter/stackdriver/example_test.go index 5850ef7aa..38be71b01 100644 --- a/vendor/go.opencensus.io/exporter/stackdriver/example_test.go +++ b/vendor/go.opencensus.io/exporter/stackdriver/example_test.go @@ -19,8 +19,8 @@ import ( "net/http" "go.opencensus.io/exporter/stackdriver" + "go.opencensus.io/exporter/stackdriver/propagation" "go.opencensus.io/plugin/ochttp" - "go.opencensus.io/plugin/ochttp/propagation/google" "go.opencensus.io/stats/view" "go.opencensus.io/trace" ) @@ -33,6 +33,8 @@ func Example() { // Export to Stackdriver Monitoring. view.RegisterExporter(exporter) + // Subscribe views to see stats in Stackdriver Monitoring + view.Subscribe(ochttp.ClientLatencyView, ochttp.ClientResponseBytesView) // Export to Stackdriver Trace. trace.RegisterExporter(exporter) @@ -40,7 +42,7 @@ func Example() { // Automatically add a Stackdriver trace header to outgoing requests: client := &http.Client{ Transport: &ochttp.Transport{ - Propagation: &google.HTTPFormat{}, + Propagation: &propagation.HTTPFormat{}, }, } _ = client // use client diff --git a/vendor/go.opencensus.io/exporter/stackdriver/propagation/http.go b/vendor/go.opencensus.io/exporter/stackdriver/propagation/http.go new file mode 100644 index 000000000..7cc02a110 --- /dev/null +++ b/vendor/go.opencensus.io/exporter/stackdriver/propagation/http.go @@ -0,0 +1,94 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package propagation implement X-Cloud-Trace-Context header propagation used +// by Google Cloud products. +package propagation // import "go.opencensus.io/exporter/stackdriver/propagation" + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "net/http" + "strconv" + "strings" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +const ( + httpHeaderMaxSize = 200 + httpHeader = `X-Cloud-Trace-Context` +) + +var _ propagation.HTTPFormat = (*HTTPFormat)(nil) + +// HTTPFormat implements propagation.HTTPFormat to propagate +// traces in HTTP headers for Google Cloud Platform and Stackdriver Trace. +type HTTPFormat struct{} + +// SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests. +func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + h := req.Header.Get(httpHeader) + // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. + // Return if the header is empty or missing, or if the header is unreasonably + // large, to avoid making unnecessary copies of a large string. + if h == "" || len(h) > httpHeaderMaxSize { + return trace.SpanContext{}, false + } + + // Parse the trace id field. + slash := strings.Index(h, `/`) + if slash == -1 { + return trace.SpanContext{}, false + } + tid, h := h[:slash], h[slash+1:] + + buf, err := hex.DecodeString(tid) + if err != nil { + return trace.SpanContext{}, false + } + copy(sc.TraceID[:], buf) + + // Parse the span id field. + spanstr := h + semicolon := strings.Index(h, `;`) + if semicolon != -1 { + spanstr, h = h[:semicolon], h[semicolon+1:] + } + sid, err := strconv.ParseUint(spanstr, 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + binary.BigEndian.PutUint64(sc.SpanID[:], sid) + + // Parse the options field, options field is optional. + if !strings.HasPrefix(h, "o=") { + return sc, true + } + o, err := strconv.ParseUint(h[2:], 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + sc.TraceOptions = trace.TraceOptions(o) + return sc, true +} + +// SpanContextToRequest modifies the given request to include a Stackdriver Trace header. +func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + sid := binary.BigEndian.Uint64(sc.SpanID[:]) + header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) + req.Header.Set(httpHeader, header) +} diff --git a/vendor/go.opencensus.io/exporter/stackdriver/propagation/http_test.go b/vendor/go.opencensus.io/exporter/stackdriver/propagation/http_test.go new file mode 100644 index 000000000..9ad93b714 --- /dev/null +++ b/vendor/go.opencensus.io/exporter/stackdriver/propagation/http_test.go @@ -0,0 +1,70 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation + +import ( + "net/http" + "reflect" + "testing" + + "go.opencensus.io/trace" +) + +func TestHTTPFormat(t *testing.T) { + format := &HTTPFormat{} + traceID := [16]byte{16, 84, 69, 170, 120, 67, 188, 139, 242, 6, 177, 32, 0, 16, 0, 0} + spanID1 := [8]byte{255, 0, 0, 0, 0, 0, 0, 123} + spanID2 := [8]byte{0, 0, 0, 0, 0, 0, 0, 123} + tests := []struct { + incoming string + wantSpanContext trace.SpanContext + }{ + { + incoming: "105445aa7843bc8bf206b12000100000/18374686479671623803;o=1", + wantSpanContext: trace.SpanContext{ + TraceID: traceID, + SpanID: spanID1, + TraceOptions: 1, + }, + }, + { + incoming: "105445aa7843bc8bf206b12000100000/123;o=0", + wantSpanContext: trace.SpanContext{ + TraceID: traceID, + SpanID: spanID2, + TraceOptions: 0, + }, + }, + } + for _, tt := range tests { + t.Run(tt.incoming, func(t *testing.T) { + req, _ := http.NewRequest("GET", "http://example.com", nil) + req.Header.Add(httpHeader, tt.incoming) + sc, ok := format.SpanContextFromRequest(req) + if !ok { + t.Errorf("exporter.SpanContextFromRequest() = false; want true") + } + if got, want := sc, tt.wantSpanContext; !reflect.DeepEqual(got, want) { + t.Errorf("exporter.SpanContextFromRequest() returned span context %v; want %v", got, want) + } + + req, _ = http.NewRequest("GET", "http://example.com", nil) + format.SpanContextToRequest(sc, req) + if got, want := req.Header.Get(httpHeader), tt.incoming; got != want { + t.Errorf("exporter.SpanContextToRequest() returned header %q; want %q", got, want) + } + }) + } +} diff --git a/vendor/go.opencensus.io/exporter/stackdriver/stackdriver.go b/vendor/go.opencensus.io/exporter/stackdriver/stackdriver.go index 05e87dbc6..2ae7dc333 100644 --- a/vendor/go.opencensus.io/exporter/stackdriver/stackdriver.go +++ b/vendor/go.opencensus.io/exporter/stackdriver/stackdriver.go @@ -22,10 +22,15 @@ package stackdriver // import "go.opencensus.io/exporter/stackdriver" import ( + "context" + "errors" + "fmt" "time" + traceapi "cloud.google.com/go/trace/apiv2" "go.opencensus.io/stats/view" "go.opencensus.io/trace" + "golang.org/x/oauth2/google" "google.golang.org/api/option" monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" ) @@ -34,10 +39,12 @@ import ( type Options struct { // ProjectID is the identifier of the Stackdriver // project the user is uploading the stats data to. + // If not set, this will default to your "Application Default Credentials". + // For details see: https://developers.google.com/accounts/docs/application-default-credentials ProjectID string // OnError is the hook to be called when there is - // an error occurred when uploading the stats data. + // an error uploading the stats or tracing data. // If no custom hook is set, errors are logged. // Optional. OnError func(err error) @@ -76,6 +83,16 @@ type Exporter struct { // NewExporter creates a new Exporter that implements both stats.Exporter and // trace.Exporter. func NewExporter(o Options) (*Exporter, error) { + if o.ProjectID == "" { + creds, err := google.FindDefaultCredentials(context.Background(), traceapi.DefaultAuthScopes()...) + if err != nil { + return nil, fmt.Errorf("stackdriver: %v", err) + } + if creds.ProjectID == "" { + return nil, errors.New("stackdriver: no project found with application default credentials") + } + o.ProjectID = creds.ProjectID + } se, err := newStatsExporter(o) if err != nil { return nil, err diff --git a/vendor/go.opencensus.io/exporter/stackdriver/stackdriver_test.go b/vendor/go.opencensus.io/exporter/stackdriver/stackdriver_test.go new file mode 100644 index 000000000..796cbb166 --- /dev/null +++ b/vendor/go.opencensus.io/exporter/stackdriver/stackdriver_test.go @@ -0,0 +1,83 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +import ( + "context" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + "go.opencensus.io/plugin/ochttp" + "go.opencensus.io/stats/view" + "go.opencensus.io/trace" + "golang.org/x/net/context/ctxhttp" +) + +func TestExport(t *testing.T) { + projectID, ok := os.LookupEnv("STACKDRIVER_TEST_PROJECT_ID") + if !ok { + t.Skip("STACKDRIVER_TEST_PROJECT_ID not set") + } + + exporter, err := NewExporter(Options{ProjectID: projectID}) + if err != nil { + t.Fatal(err) + } + defer exporter.Flush() + + trace.RegisterExporter(exporter) + defer trace.UnregisterExporter(exporter) + view.RegisterExporter(exporter) + defer view.UnregisterExporter(exporter) + + trace.SetDefaultSampler(trace.AlwaysSample()) + + span := trace.NewSpan("custom-span", nil, trace.StartOptions{}) + time.Sleep(10 * time.Millisecond) + span.End() + + // Test HTTP spans + + handler := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + time.Sleep(150 * time.Millisecond) // do work + rw.Write([]byte("Hello, world!")) + }) + server := httptest.NewServer(&ochttp.Handler{Handler: handler}) + defer server.Close() + + ctx := context.Background() + client := &http.Client{ + Transport: &ochttp.Transport{}, + } + resp, err := ctxhttp.Get(ctx, client, server.URL+"/test/123?abc=xyz") + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if want, got := "Hello, world!", string(body); want != got { + t.Fatalf("resp.Body = %q; want %q", want, got) + } + + // Flush twice to expose issue of exporter creating traces internally (#557) + exporter.Flush() + exporter.Flush() +} diff --git a/vendor/go.opencensus.io/exporter/stackdriver/stats.go b/vendor/go.opencensus.io/exporter/stackdriver/stats.go index a9cee1fc2..62112421b 100644 --- a/vendor/go.opencensus.io/exporter/stackdriver/stats.go +++ b/vendor/go.opencensus.io/exporter/stackdriver/stats.go @@ -196,7 +196,7 @@ func (e *statsExporter) makeReq(vds []*view.Data, limit int) []*monitoringpb.Cre for _, row := range vd.Rows { ts := &monitoringpb.TimeSeries{ Metric: &metricpb.Metric{ - Type: namespacedViewName(vd.View.Name(), false), + Type: namespacedViewName(vd.View.Name, false), Labels: newLabels(row.Tags, e.taskValue), }, Resource: resource, @@ -228,10 +228,10 @@ func (e *statsExporter) createMeasure(ctx context.Context, vd *view.Data) error e.createdViewsMu.Lock() defer e.createdViewsMu.Unlock() - m := vd.View.Measure() - agg := vd.View.Aggregation() - tagKeys := vd.View.TagKeys() - viewName := vd.View.Name() + m := vd.View.Measure + agg := vd.View.Aggregation + tagKeys := vd.View.TagKeys + viewName := vd.View.Name if md, ok := e.createdViews[viewName]; ok { return equalAggTagKeys(md, agg, tagKeys) @@ -279,7 +279,7 @@ func (e *statsExporter) createMeasure(ctx context.Context, vd *view.Data) error Type: namespacedViewName(viewName, false), MetricKind: metricKind, ValueType: valueType, - Labels: newLabelDescriptors(vd.View.TagKeys()), + Labels: newLabelDescriptors(vd.View.TagKeys), }, }) if err != nil { @@ -333,7 +333,7 @@ func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue { }, }} case *view.DistributionData: - bounds := vd.Aggregation().(view.DistributionAggregation) + bounds := vd.Aggregation.(view.DistributionAggregation) return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{ DistributionValue: &distributionpb.Distribution{ Count: v.Count, diff --git a/vendor/go.opencensus.io/exporter/stackdriver/stats_test.go b/vendor/go.opencensus.io/exporter/stackdriver/stats_test.go index 3ffbe7065..da6d0e74d 100644 --- a/vendor/go.opencensus.io/exporter/stackdriver/stats_test.go +++ b/vendor/go.opencensus.io/exporter/stackdriver/stats_test.go @@ -83,14 +83,18 @@ func TestExporter_makeReq(t *testing.T) { t.Fatal(err) } - v, err := view.New("testview", "desc", []tag.Key{key}, m, view.CountAggregation{}) - if err != nil { - t.Fatal(err) + v := &view.View{ + Name: "testview", + Description: "desc", + TagKeys: []tag.Key{key}, + Measure: m, + Aggregation: view.CountAggregation{}, } - - distView, err := view.New("distview", "desc", nil, m, view.DistributionAggregation([]float64{2, 4, 7})) - if err != nil { - t.Fatal(err) + distView := &view.View{ + Name: "distview", + Description: "desc", + Measure: m, + Aggregation: view.DistributionAggregation{2, 4, 7}, } start := time.Now() @@ -384,14 +388,13 @@ func TestExporter_makeReq_batching(t *testing.T) { t.Fatal(err) } - v, err := view.New("view", "desc", []tag.Key{key}, m, view.CountAggregation{}) - if err != nil { - t.Fatal(err) + v := &view.View{ + Name: "view", + Description: "desc", + TagKeys: []tag.Key{key}, + Measure: m, + Aggregation: view.CountAggregation{}, } - if err := view.Register(v); err != nil { - t.Fatal(err) - } - defer view.Unregister(v) tests := []struct { name string @@ -573,9 +576,12 @@ func TestExporter_createMeasure(t *testing.T) { t.Fatal(err) } - v, err := view.New("testview", "desc", []tag.Key{key}, m, view.CountAggregation{}) - if err != nil { - t.Fatal(err) + v := &view.View{ + Name: "testview", + Description: "desc", + TagKeys: []tag.Key{key}, + Measure: m, + Aggregation: view.CountAggregation{}, } data := view.CountData(0) @@ -599,7 +605,7 @@ func TestExporter_createMeasure(t *testing.T) { Type: "hello", MetricKind: metricpb.MetricDescriptor_CUMULATIVE, ValueType: metricpb.MetricDescriptor_INT64, - Labels: newLabelDescriptors(vd.View.TagKeys()), + Labels: newLabelDescriptors(vd.View.TagKeys), }, nil } @@ -632,14 +638,17 @@ func TestExporter_makeReq_withCustomMonitoredResource(t *testing.T) { t.Fatal(err) } - v, err := view.New("testview", "desc", []tag.Key{key}, m, view.CountAggregation{}) - if err != nil { + v := &view.View{ + Name: "testview", + Description: "desc", + TagKeys: []tag.Key{key}, + Measure: m, + Aggregation: view.CountAggregation{}, + } + if err := view.Subscribe(v); err != nil { t.Fatal(err) } - if err := v.Subscribe(); err != nil { - t.Fatal(err) - } - defer v.Unsubscribe() + defer view.Unsubscribe(v) start := time.Now() end := start.Add(time.Minute) diff --git a/vendor/go.opencensus.io/exporter/stackdriver/trace.go b/vendor/go.opencensus.io/exporter/stackdriver/trace.go index eaca6c404..8db648f6e 100644 --- a/vendor/go.opencensus.io/exporter/stackdriver/trace.go +++ b/vendor/go.opencensus.io/exporter/stackdriver/trace.go @@ -30,12 +30,10 @@ import ( // traceExporter is an implementation of trace.Exporter that uploads spans to // Stackdriver. // -// traceExporter also implements trace/propagation.HTTPFormat and can -// propagate Stackdriver Traces over HTTP requests. type traceExporter struct { projectID string bundler *bundler.Bundler - // uploadFn defaults to uploadToStackdriver; it can be replaced for tests. + // uploadFn defaults to uploadSpans; it can be replaced for tests. uploadFn func(spans []*trace.SpanData) overflowLogger client *tracingclient.Client @@ -75,7 +73,7 @@ func newTraceExporterWithClient(o Options, c *tracingclient.Client) *traceExport bundler.BufferedByteLimit = bundler.BundleCountThreshold * 2000 e.bundler = bundler - e.uploadFn = e.uploadToStackdriver + e.uploadFn = e.uploadSpans return e } @@ -107,8 +105,8 @@ func (e *traceExporter) Flush() { e.bundler.Flush() } -// uploadToStackdriver uploads a set of spans to Stackdriver. -func (e *traceExporter) uploadToStackdriver(spans []*trace.SpanData) { +// uploadSpans uploads a set of spans to Stackdriver. +func (e *traceExporter) uploadSpans(spans []*trace.SpanData) { req := tracepb.BatchWriteSpansRequest{ Name: "projects/" + e.projectID, Spans: make([]*tracepb.Span, 0, len(spans)), @@ -116,8 +114,16 @@ func (e *traceExporter) uploadToStackdriver(spans []*trace.SpanData) { for _, span := range spans { req.Spans = append(req.Spans, protoFromSpanData(span, e.projectID)) } - err := e.client.BatchWriteSpans(context.Background(), &req) + // Create a never-sampled span to prevent traces associated with exporter. + span := trace.NewSpan("go.opencensus.io/exporter/stackdriver.uploadSpans", nil, trace.StartOptions{Sampler: trace.NeverSample()}) + defer span.End() + span.SetAttributes(trace.Int64Attribute("num_spans", int64(len(spans)))) + + ctx := trace.WithSpan(context.Background(), span) // TODO: add timeouts + err := e.client.BatchWriteSpans(ctx, &req) if err != nil { + span.SetStatus(trace.Status{Code: 2, Message: err.Error()}) + // TODO: Allow configuring a logger for exporters. log.Printf("OpenCensus Stackdriver exporter: failed to upload %d spans: %v", len(spans), err) } } diff --git a/vendor/go.opencensus.io/exporter/stackdriver/trace_proto_test.go b/vendor/go.opencensus.io/exporter/stackdriver/trace_proto_test.go index 7749df5a1..e0dbc0ef3 100644 --- a/vendor/go.opencensus.io/exporter/stackdriver/trace_proto_test.go +++ b/vendor/go.opencensus.io/exporter/stackdriver/trace_proto_test.go @@ -78,9 +78,9 @@ func TestExportTrace(t *testing.T) { span2.Annotatef(nil, "in span%d", 2) span2.Annotate(nil, big.NewRat(2, 4).String()) span2.SetAttributes( - trace.StringAttribute{Key: "key1", Value: "value1"}, - trace.StringAttribute{Key: "key2", Value: "value2"}) - span2.SetAttributes(trace.Int64Attribute{Key: "key1", Value: 100}) + trace.StringAttribute("key1", "value1"), + trace.StringAttribute("key2", "value2")) + span2.SetAttributes(trace.Int64Attribute("key1", 100)) span2.End() } { @@ -92,9 +92,9 @@ func TestExportTrace(t *testing.T) { { _, span4 := trace.StartSpan(ctx3, "span4") x := 42 - a1 := []trace.Attribute{trace.StringAttribute{Key: "k1", Value: "v1"}} - a2 := []trace.Attribute{trace.StringAttribute{Key: "k2", Value: "v2"}} - a3 := []trace.Attribute{trace.StringAttribute{Key: "k3", Value: "v3"}} + a1 := []trace.Attribute{trace.StringAttribute("k1", "v1")} + a2 := []trace.Attribute{trace.StringAttribute("k2", "v2")} + a3 := []trace.Attribute{trace.StringAttribute("k3", "v3")} a4 := map[string]interface{}{"k4": "v4"} r := big.NewRat(2, 4) span4.Annotate(a1, r.String()) diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go index 022752eb9..f1d0efd1c 100644 --- a/vendor/go.opencensus.io/internal/internal.go +++ b/vendor/go.opencensus.io/internal/internal.go @@ -14,6 +14,19 @@ package internal +import "time" + // UserAgent is the user agent to be added to the outgoing // requests from the exporters. const UserAgent = "opencensus-go-v0.1.0" + +// MonotonicEndTime returns the end time at present +// but offset from start, monotonically. +// +// The monotonic clock is used in subtractions hence +// the duration since start added back to start gives +// end as a monotonic time. +// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks +func MonotonicEndTime(start time.Time) time.Time { + return start.Add(time.Now().Sub(start)) +} diff --git a/vendor/go.opencensus.io/internal/readme/source.md b/vendor/go.opencensus.io/internal/readme/source.md index 4ac0f8f17..791086c04 100644 --- a/vendor/go.opencensus.io/internal/readme/source.md +++ b/vendor/go.opencensus.io/internal/readme/source.md @@ -16,7 +16,7 @@ rapidly, vendoring is recommended. ## Installation ``` -$ go get -u go.opencensus.io/... +$ go get -u go.opencensus.io ``` ## Prerequisites @@ -32,6 +32,7 @@ Currently, OpenCensus supports: * [OpenZipkin][exporter-zipkin] for traces * Stackdriver [Monitoring][exporter-stackdriver] and [Trace][exporter-stackdriver] * [Jaeger][exporter-jaeger] for traces +* [AWS X-Ray][exporter-xray] for traces ## Tags @@ -70,62 +71,44 @@ use New and pass the returned context. ## Stats -### Creating, retrieving and deleting a measure +### Measures -Create and load measures with units: +Measures are used for recording data points with associated units. +Creating a Measure: [embedmd]:# (stats.go measure) -Retrieve measure by name: +### Recording Measurements -[embedmd]:# (stats.go findMeasure) - -### Creating an aggregation - -Currently 4 types of aggregations are supported. The CountAggregation is used to count -the number of times a sample was recorded. The DistributionAggregation is used to -provide a histogram of the values of the samples. The SumAggregation is used to -sum up all sample values. The MeanAggregation is used to calculate the mean of -sample values. - -[embedmd]:# (stats.go aggs) - -### Creating, registering and unregistering a view - -Create and register a view: - -[embedmd]:# (stats.go view) - -Find view by name: - -[embedmd]:# (stats.go findView) - -Unregister view: - -[embedmd]:# (stats.go unregisterView) - -Configure the default interval between reports of collected data. -This is a system wide interval and impacts all views. The default -interval duration is 10 seconds. Trying to set an interval with -a duration less than a certain minimum (maybe 1s) should have no effect. - -[embedmd]:# (stats.go reportingPeriod) - -### Recording measurements - -Recording usage can only be performed against already registered measure -and their registered views. Measurements are implicitly tagged with the -tags in the context: +Measurements are data points associated with Measures. +Recording implicitly tags the set of Measurements with the tags from the +provided context: [embedmd]:# (stats.go record) -### Retrieving collected data for a view +### Views -Users need to subscribe to a view in order to retrieve collected data. +Views are how Measures are aggregated. You can think of them as queries over the +set of recorded data points (Measurements). -[embedmd]:# (stats.go subscribe) +Views have two parts: the tags to group by and the aggregation type used. -Subscribed views' data will be exported via the registered exporters. +Currently four types of aggregations are supported: +* CountAggregation is used to count the number of times a sample was recorded. +* DistributionAggregation is used to provide a histogram of the values of the samples. +* SumAggregation is used to sum up all sample values. +* MeanAggregation is used to calculate the mean of sample values. + +[embedmd]:# (stats.go aggs) + +Here we create a view with the DistributionAggregation over our Measure. +All Measurements will be aggregated together irrespective of their tags, +i.e. no grouping by tag. + +[embedmd]:# (stats.go view) + +Subscribe begins collecting data for the view. Subscribed views' data will be +exported via the registered exporters. [embedmd]:# (stats.go registerExporter) @@ -133,6 +116,13 @@ An example logger exporter is below: [embedmd]:# (stats.go exporter) +Configure the default interval between reports of collected data. +This is a system wide interval and impacts all views. The default +interval duration is 10 seconds. + +[embedmd]:# (stats.go reportingPeriod) + + ## Traces ### Starting and ending a span @@ -169,4 +159,5 @@ A screenshot of the CPU profile from the program above: [exporter-prom]: https://godoc.org/go.opencensus.io/exporter/prometheus [exporter-stackdriver]: https://godoc.org/go.opencensus.io/exporter/stackdriver [exporter-zipkin]: https://godoc.org/go.opencensus.io/exporter/zipkin -[exporter-jaeger]: https://godoc.org/go.opencensus.io/exporter/jaeger \ No newline at end of file +[exporter-jaeger]: https://godoc.org/go.opencensus.io/exporter/jaeger +[exporter-xray]: https://godoc.org/go.opencensus.io/exporter/xray diff --git a/vendor/go.opencensus.io/internal/readme/stats.go b/vendor/go.opencensus.io/internal/readme/stats.go index 446287304..494968d88 100644 --- a/vendor/go.opencensus.io/internal/readme/stats.go +++ b/vendor/go.opencensus.io/internal/readme/stats.go @@ -48,7 +48,7 @@ func statsExamples() { _ = m // START aggs - distAgg := view.DistributionAggregation([]float64{0, 1 << 32, 2 << 32, 3 << 32}) + distAgg := view.DistributionAggregation{0, 1 << 32, 2 << 32, 3 << 32} countAgg := view.CountAggregation{} sumAgg := view.SumAggregation{} meanAgg := view.MeanAggregation{} @@ -57,36 +57,16 @@ func statsExamples() { _, _, _, _ = distAgg, countAgg, sumAgg, meanAgg // START view - v, err := view.New( - "my.org/video_size_distribution", - "distribution of processed video size over time", - nil, - videoSize, - distAgg, - ) - if err != nil { - log.Fatalf("cannot create view: %v", err) - } - if err := view.Register(v); err != nil { - log.Fatal(err) + if err = view.Subscribe(&view.View{ + Name: "my.org/video_size_distribution", + Description: "distribution of processed video size over time", + Measure: videoSize, + Aggregation: view.DistributionAggregation([]float64{0, 1 << 32, 2 << 32, 3 << 32}), + }); err != nil { + log.Fatalf("Failed to subscribe to view: %v", err) } // END view - // START findView - v = view.Find("my.org/video_size_distribution") - if v == nil { - log.Fatalln("view not found") - } - // END findView - - _ = v - - // START unregisterView - if err = view.Unregister(v); err != nil { - log.Fatal(err) - } - // END unregisterView - // START reportingPeriod view.SetReportingPeriod(5 * time.Second) // END reportingPeriod @@ -95,12 +75,6 @@ func statsExamples() { stats.Record(ctx, videoSize.M(102478)) // END record - // START subscribe - if err := v.Subscribe(); err != nil { - log.Fatal(err) - } - // END subscribe - // START registerExporter // Register an exporter to be able to retrieve // the data from the subscribed views. diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client.go b/vendor/go.opencensus.io/plugin/ocgrpc/client.go index 0aa8f371c..a3d7fcd7b 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client.go @@ -20,12 +20,6 @@ import ( "google.golang.org/grpc/stats" ) -// NewClientStatsHandler enables OpenCensus stats and trace -// for gRPC clients. Deprecated, construct a ClientHandler directly. -func NewClientStatsHandler() stats.Handler { - return &ClientHandler{} -} - // ClientHandler implements a gRPC stats.Handler for recording OpenCensus stats and // traces. Use with gRPC clients only. type ClientHandler struct { @@ -38,11 +32,6 @@ type ClientHandler struct { NoStats bool } -var ( - clientTrace clientTraceHandler - clientStats clientStatsHandler -) - func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { // no-op } @@ -54,19 +43,19 @@ func (c *ClientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) con func (c *ClientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { if !c.NoTrace { - clientTrace.HandleRPC(ctx, rs) + c.traceHandleRPC(ctx, rs) } if !c.NoStats { - clientStats.HandleRPC(ctx, rs) + c.statsHandleRPC(ctx, rs) } } func (c *ClientHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { if !c.NoTrace { - ctx = clientTrace.TagRPC(ctx, rti) + ctx = c.traceTagRPC(ctx, rti) } if !c.NoStats { - ctx = clientStats.TagRPC(ctx, rti) + ctx = c.statsTagRPC(ctx, rti) } return ctx } diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go index f70353dc6..60edcc012 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go @@ -38,47 +38,53 @@ var ( // package. These are declared as a convenience only; none are subscribed by // default. var ( - ClientErrorCountView, _ = view.New( - "grpc.io/client/error_count", - "RPC Errors", - []tag.Key{KeyStatus, KeyMethod}, - ClientErrorCount, - view.MeanAggregation{}) + ClientErrorCountView = &view.View{ + Name: "grpc.io/client/error_count", + Description: "RPC Errors", + TagKeys: []tag.Key{KeyStatus, KeyMethod}, + Measure: ClientErrorCount, + Aggregation: view.MeanAggregation{}, + } - ClientRoundTripLatencyView, _ = view.New( - "grpc.io/client/roundtrip_latency", - "Latency in msecs", - []tag.Key{KeyMethod}, - ClientRoundTripLatency, - DefaultMillisecondsDistribution) + ClientRoundTripLatencyView = &view.View{ + Name: "grpc.io/client/roundtrip_latency", + Description: "Latency in msecs", + TagKeys: []tag.Key{KeyMethod}, + Measure: ClientRoundTripLatency, + Aggregation: DefaultMillisecondsDistribution, + } - ClientRequestBytesView, _ = view.New( - "grpc.io/client/request_bytes", - "Request bytes", - []tag.Key{KeyMethod}, - ClientRequestBytes, - DefaultBytesDistribution) + ClientRequestBytesView = &view.View{ + Name: "grpc.io/client/request_bytes", + Description: "Request bytes", + TagKeys: []tag.Key{KeyMethod}, + Measure: ClientRequestBytes, + Aggregation: DefaultBytesDistribution, + } - ClientResponseBytesView, _ = view.New( - "grpc.io/client/response_bytes", - "Response bytes", - []tag.Key{KeyMethod}, - ClientResponseBytes, - DefaultBytesDistribution) + ClientResponseBytesView = &view.View{ + Name: "grpc.io/client/response_bytes", + Description: "Response bytes", + TagKeys: []tag.Key{KeyMethod}, + Measure: ClientResponseBytes, + Aggregation: DefaultBytesDistribution, + } - ClientRequestCountView, _ = view.New( - "grpc.io/client/request_count", - "Count of request messages per client RPC", - []tag.Key{KeyMethod}, - ClientRequestCount, - DefaultMessageCountDistribution) + ClientRequestCountView = &view.View{ + Name: "grpc.io/client/request_count", + Description: "Count of request messages per client RPC", + TagKeys: []tag.Key{KeyMethod}, + Measure: ClientRequestCount, + Aggregation: DefaultMessageCountDistribution, + } - ClientResponseCountView, _ = view.New( - "grpc.io/client/response_count", - "Count of response messages per client RPC", - []tag.Key{KeyMethod}, - ClientResponseCount, - DefaultMessageCountDistribution) + ClientResponseCountView = &view.View{ + Name: "grpc.io/client/response_count", + Description: "Count of response messages per client RPC", + TagKeys: []tag.Key{KeyMethod}, + Measure: ClientResponseCount, + Aggregation: DefaultMessageCountDistribution, + } ) // All the default client views provided by this package: diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics_test.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics_test.go index ab9e0ea33..282be0627 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics_test.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics_test.go @@ -31,7 +31,7 @@ func TestViewsAggregationsConform(t *testing.T) { // Add any other defined views to be type checked during tests to ensure we don't regress. assertTypeOf := func(v *view.View, wantSample view.Aggregation) { - aggregation := v.Aggregation() + aggregation := v.Aggregation gotValue := reflect.ValueOf(aggregation) wantValue := reflect.ValueOf(wantSample) if gotValue.Type() != wantValue.Type() { @@ -52,14 +52,14 @@ func TestStrictViewNames(t *testing.T) { alreadySeen := make(map[string]int) assertName := func(v *view.View, want string) { _, _, line, _ := runtime.Caller(1) - if prevLine, ok := alreadySeen[v.Name()]; ok { + if prevLine, ok := alreadySeen[v.Name]; ok { t.Errorf("Item's Name on line %d was already used on line %d", line, prevLine) return } - if got := v.Name(); got != want { + if got := v.Name; got != want { t.Errorf("Item on line: %d got %q want %q", line, got, want) } - alreadySeen[v.Name()] = line + alreadySeen[v.Name] = line } assertName(ClientErrorCountView, "grpc.io/client/error_count") diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go index 99b01502e..59d963478 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go @@ -27,14 +27,9 @@ import ( "google.golang.org/grpc/status" ) -// clientStatsHandler is a stats.Handler implementation -// that collects stats for a gRPC client. Predefined -// measures and views can be used to access the collected data. -type clientStatsHandler struct{} - // TagRPC gets the tag.Map populated by the application code, serializes // its tags into the GRPC metadata in order to be sent to the server. -func (h *clientStatsHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { +func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { startTime := time.Now() if info == nil { if grpclog.V(2) { @@ -60,7 +55,7 @@ func (h *clientStatsHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) } // HandleRPC processes the RPC events. -func (h *clientStatsHandler) HandleRPC(ctx context.Context, s stats.RPCStats) { +func (h *ClientHandler) statsHandleRPC(ctx context.Context, s stats.RPCStats) { switch st := s.(type) { case *stats.Begin, *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer: // do nothing for client @@ -75,7 +70,7 @@ func (h *clientStatsHandler) HandleRPC(ctx context.Context, s stats.RPCStats) { } } -func (h *clientStatsHandler) handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) { +func (h *ClientHandler) handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) { d, ok := ctx.Value(grpcClientRPCKey).(*rpcData) if !ok { if grpclog.V(2) { @@ -88,7 +83,7 @@ func (h *clientStatsHandler) handleRPCOutPayload(ctx context.Context, s *stats.O atomic.AddInt64(&d.reqCount, 1) } -func (h *clientStatsHandler) handleRPCInPayload(ctx context.Context, s *stats.InPayload) { +func (h *ClientHandler) handleRPCInPayload(ctx context.Context, s *stats.InPayload) { d, ok := ctx.Value(grpcClientRPCKey).(*rpcData) if !ok { if grpclog.V(2) { @@ -101,7 +96,7 @@ func (h *clientStatsHandler) handleRPCInPayload(ctx context.Context, s *stats.In atomic.AddInt64(&d.respCount, 1) } -func (h *clientStatsHandler) handleRPCEnd(ctx context.Context, s *stats.End) { +func (h *ClientHandler) handleRPCEnd(ctx context.Context, s *stats.End) { d, ok := ctx.Value(grpcClientRPCKey).(*rpcData) if !ok { if grpclog.V(2) { diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler_test.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler_test.go index de8ebadc8..d34a8a5cb 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler_test.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler_test.go @@ -298,13 +298,12 @@ func TestClientDefaultCollections(t *testing.T) { for _, tc := range tcs { // Register views. - for _, v := range DefaultClientViews { - if err := v.Subscribe(); err != nil { - t.Error(err) - } + err := view.Subscribe(DefaultClientViews...) + if err != nil { + t.Error(err) } - h := &clientStatsHandler{} + h := &ClientHandler{NoTrace: true} for _, rpc := range tc.rpcs { mods := []tag.Mutator{} for _, t := range rpc.tags { @@ -327,33 +326,29 @@ func TestClientDefaultCollections(t *testing.T) { } for _, wantData := range tc.wants { - gotRows, err := wantData.v().RetrieveData() + gotRows, err := view.RetrieveData(wantData.v().Name) if err != nil { - t.Errorf("%q: RetrieveData(%q) = %v", tc.label, wantData.v().Name(), err) + t.Errorf("%q: RetrieveData(%q) = %v", tc.label, wantData.v().Name, err) continue } for _, gotRow := range gotRows { if !containsRow(wantData.rows, gotRow) { - t.Errorf("%q: unwanted row for view %q = %v", tc.label, wantData.v().Name(), gotRow) + t.Errorf("%q: unwanted row for view %q = %v", tc.label, wantData.v().Name, gotRow) break } } for _, wantRow := range wantData.rows { if !containsRow(gotRows, wantRow) { - t.Errorf("%q: row missing for view %q; want %v", tc.label, wantData.v().Name(), wantRow) + t.Errorf("%q: row missing for view %q; want %v", tc.label, wantData.v().Name, wantRow) break } } } // Unregister views to cleanup. - for _, v := range DefaultClientViews { - if err := v.Unsubscribe(); err != nil { - t.Error(err) - } - } + view.Unsubscribe(DefaultClientViews...) } } diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/example_test.go b/vendor/go.opencensus.io/plugin/ocgrpc/example_test.go index 84afa69b3..7c8c861a4 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/example_test.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/example_test.go @@ -18,12 +18,14 @@ import ( "log" "go.opencensus.io/plugin/ocgrpc" + "go.opencensus.io/stats/view" "google.golang.org/grpc" ) func ExampleClientHandler() { - // Subscribe to collect client request count. - if err := ocgrpc.ClientRequestCountView.Subscribe(); err != nil { + // Subscribe views to collect data. + err := view.Subscribe(ocgrpc.DefaultClientViews...) + if err != nil { log.Fatal(err) } @@ -37,8 +39,9 @@ func ExampleClientHandler() { } func ExampleServerHandler() { - // Subscribe to collect server request count. - if err := ocgrpc.ServerRequestCountView.Subscribe(); err != nil { + // Subscribe to views to collect data. + err := view.Subscribe(ocgrpc.DefaultServerViews...) + if err != nil { log.Fatal(err) } diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/grpc_test.go b/vendor/go.opencensus.io/plugin/ocgrpc/grpc_test.go index 4b5a02b94..81b020e6a 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/grpc_test.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/grpc_test.go @@ -18,6 +18,7 @@ import ( "testing" "time" + "go.opencensus.io/stats/view" "golang.org/x/net/context" "go.opencensus.io/trace" @@ -25,11 +26,8 @@ import ( "google.golang.org/grpc/stats" ) -func TestNewClientStatsHandler(t *testing.T) { +func TestClientHandler(t *testing.T) { ctx := context.Background() - - handler := NewClientStatsHandler() - te := &traceExporter{} trace.RegisterExporter(te) if err := ClientRequestCountView.Subscribe(); err != nil { @@ -41,6 +39,7 @@ func TestNewClientStatsHandler(t *testing.T) { }) ctx = trace.WithSpan(ctx, span) + var handler ClientHandler ctx = handler.TagRPC(ctx, &stats.RPCTagInfo{ FullMethodName: "/service.foo/method", }) @@ -53,7 +52,7 @@ func TestNewClientStatsHandler(t *testing.T) { EndTime: time.Now(), }) - stats, err := ClientRequestCountView.RetrieveData() + stats, err := view.RetrieveData(ClientRequestCountView.Name) if err != nil { t.Fatal(err) } @@ -67,26 +66,24 @@ func TestNewClientStatsHandler(t *testing.T) { } // Cleanup. - if err := ClientRequestCountView.Unsubscribe(); err != nil { - t.Fatal(err) - } + view.Unsubscribe(ClientErrorCountView) } -func TestNewServerStatsHandler(t *testing.T) { +func TestServerHandler(t *testing.T) { ctx := context.Background() - - handler := NewServerStatsHandler() - te := &traceExporter{} trace.RegisterExporter(te) if err := ServerRequestCountView.Subscribe(); err != nil { t.Fatal(err) } + // Ensure we start tracing. span := trace.NewSpan("/foo", nil, trace.StartOptions{ Sampler: trace.AlwaysSample(), }) ctx = trace.WithSpan(ctx, span) + + handler := &ServerHandler{} ctx = handler.TagRPC(ctx, &stats.RPCTagInfo{ FullMethodName: "/service.foo/method", }) @@ -97,7 +94,7 @@ func TestNewServerStatsHandler(t *testing.T) { EndTime: time.Now(), }) - stats, err := ServerRequestCountView.RetrieveData() + stats, err := view.RetrieveData(ServerRequestCountView.Name) if err != nil { t.Fatal(err) } @@ -111,10 +108,7 @@ func TestNewServerStatsHandler(t *testing.T) { } // Cleanup. - if err := ServerRequestCountView.Unsubscribe(); err != nil { - t.Fatal(err) - } - + view.Unsubscribe(ServerRequestCountView) } type traceExporter struct { diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/testdata/generate.sh b/vendor/go.opencensus.io/plugin/ocgrpc/internal/testpb/generate.sh similarity index 100% rename from vendor/go.opencensus.io/plugin/ocgrpc/testdata/generate.sh rename to vendor/go.opencensus.io/plugin/ocgrpc/internal/testpb/generate.sh diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/testdata/test.pb.go b/vendor/go.opencensus.io/plugin/ocgrpc/internal/testpb/test.pb.go similarity index 81% rename from vendor/go.opencensus.io/plugin/ocgrpc/testdata/test.pb.go rename to vendor/go.opencensus.io/plugin/ocgrpc/internal/testpb/test.pb.go index ea9c8df8d..76346fc1e 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/testdata/test.pb.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/internal/testpb/test.pb.go @@ -2,7 +2,7 @@ // source: test.proto /* -Package testdata is a generated protocol buffer package. +Package testpb is a generated protocol buffer package. It is generated from these files: test.proto @@ -11,7 +11,7 @@ It has these top-level messages: FooRequest FooResponse */ -package testdata +package testpb import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -58,8 +58,8 @@ func (*FooResponse) ProtoMessage() {} func (*FooResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func init() { - proto.RegisterType((*FooRequest)(nil), "testdata.FooRequest") - proto.RegisterType((*FooResponse)(nil), "testdata.FooResponse") + proto.RegisterType((*FooRequest)(nil), "testpb.FooRequest") + proto.RegisterType((*FooResponse)(nil), "testpb.FooResponse") } // Reference imports to suppress errors if they are not otherwise used. @@ -87,7 +87,7 @@ func NewFooClient(cc *grpc.ClientConn) FooClient { func (c *fooClient) Single(ctx context.Context, in *FooRequest, opts ...grpc.CallOption) (*FooResponse, error) { out := new(FooResponse) - err := grpc.Invoke(ctx, "/testdata.Foo/Single", in, out, c.cc, opts...) + err := grpc.Invoke(ctx, "/testpb.Foo/Single", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -95,7 +95,7 @@ func (c *fooClient) Single(ctx context.Context, in *FooRequest, opts ...grpc.Cal } func (c *fooClient) Multiple(ctx context.Context, opts ...grpc.CallOption) (Foo_MultipleClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Foo_serviceDesc.Streams[0], c.cc, "/testdata.Foo/Multiple", opts...) + stream, err := grpc.NewClientStream(ctx, &_Foo_serviceDesc.Streams[0], c.cc, "/testpb.Foo/Multiple", opts...) if err != nil { return nil, err } @@ -146,7 +146,7 @@ func _Foo_Single_Handler(srv interface{}, ctx context.Context, dec func(interfac } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/testdata.Foo/Single", + FullMethod: "/testpb.Foo/Single", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(FooServer).Single(ctx, req.(*FooRequest)) @@ -181,7 +181,7 @@ func (x *fooMultipleServer) Recv() (*FooRequest, error) { } var _Foo_serviceDesc = grpc.ServiceDesc{ - ServiceName: "testdata.Foo", + ServiceName: "testpb.Foo", HandlerType: (*FooServer)(nil), Methods: []grpc.MethodDesc{ { @@ -203,16 +203,16 @@ var _Foo_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("test.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 139 bytes of a gzipped FileDescriptorProto + // 137 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0x49, 0x2d, 0x2e, - 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x00, 0xb1, 0x53, 0x12, 0x4b, 0x12, 0x95, 0x14, - 0xb8, 0xb8, 0xdc, 0xf2, 0xf3, 0x83, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, 0x84, 0xb8, 0x58, - 0xd2, 0x12, 0x33, 0x73, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x82, 0xc0, 0x6c, 0x25, 0x5e, 0x2e, - 0x6e, 0xb0, 0x8a, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0xa3, 0x4a, 0x2e, 0x66, 0xb7, 0xfc, 0x7c, - 0x21, 0x53, 0x2e, 0xb6, 0xe0, 0xcc, 0xbc, 0xf4, 0x9c, 0x54, 0x21, 0x11, 0x3d, 0x98, 0x61, 0x7a, - 0x08, 0x93, 0xa4, 0x44, 0xd1, 0x44, 0x21, 0xba, 0x85, 0xac, 0xb9, 0x38, 0x7c, 0x4b, 0x73, 0x4a, - 0x32, 0x0b, 0x48, 0xd4, 0xa8, 0xc1, 0x68, 0xc0, 0x98, 0xc4, 0x06, 0x76, 0xbc, 0x31, 0x20, 0x00, - 0x00, 0xff, 0xff, 0x10, 0x60, 0x13, 0xc6, 0xca, 0x00, 0x00, 0x00, + 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0x94, 0x14, 0xb8, 0xb8, + 0xdc, 0xf2, 0xf3, 0x83, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, 0x84, 0xb8, 0x58, 0xd2, 0x12, + 0x33, 0x73, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x82, 0xc0, 0x6c, 0x25, 0x5e, 0x2e, 0x6e, 0xb0, + 0x8a, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0xa3, 0x42, 0x2e, 0x66, 0xb7, 0xfc, 0x7c, 0x21, 0x43, + 0x2e, 0xb6, 0xe0, 0xcc, 0xbc, 0xf4, 0x9c, 0x54, 0x21, 0x21, 0x3d, 0x88, 0x51, 0x7a, 0x08, 0x73, + 0xa4, 0x84, 0x51, 0xc4, 0x20, 0x3a, 0x85, 0xcc, 0xb9, 0x38, 0x7c, 0x4b, 0x73, 0x4a, 0x32, 0x0b, + 0x48, 0xd0, 0xa4, 0xc1, 0x68, 0xc0, 0x98, 0xc4, 0x06, 0x76, 0xb2, 0x31, 0x20, 0x00, 0x00, 0xff, + 0xff, 0xda, 0xc5, 0x9f, 0x2f, 0xc0, 0x00, 0x00, 0x00, } //go:generate ./generate.sh diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/testdata/test.proto b/vendor/go.opencensus.io/plugin/ocgrpc/internal/testpb/test.proto similarity index 92% rename from vendor/go.opencensus.io/plugin/ocgrpc/testdata/test.proto rename to vendor/go.opencensus.io/plugin/ocgrpc/internal/testpb/test.proto index 788f59f4f..2a198a6f5 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/testdata/test.proto +++ b/vendor/go.opencensus.io/plugin/ocgrpc/internal/testpb/test.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -package testdata; +package testpb; message FooRequest { bool fail = 1; diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server.go b/vendor/go.opencensus.io/plugin/ocgrpc/server.go index a1144423e..b0343ff8b 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server.go @@ -20,12 +20,6 @@ import ( "google.golang.org/grpc/stats" ) -// NewServerStatsHandler enables OpenCensus stats and trace -// for gRPC servers. Deprecated, construct a ServerHandler directly. -func NewServerStatsHandler() stats.Handler { - return &ServerHandler{} -} - // ServerHandler implements gRPC stats.Handler recording OpenCensus stats and // traces. Use with gRPC servers. type ServerHandler struct { @@ -38,11 +32,6 @@ type ServerHandler struct { NoStats bool } -var ( - serverTrace serverTraceHandler - serverStats serverStatsHandler -) - func (s *ServerHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { // no-op } @@ -54,19 +43,19 @@ func (s *ServerHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) con func (s *ServerHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { if !s.NoTrace { - serverTrace.HandleRPC(ctx, rs) + s.traceHandleRPC(ctx, rs) } if !s.NoStats { - serverStats.HandleRPC(ctx, rs) + s.statsHandleRPC(ctx, rs) } } func (s *ServerHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { if !s.NoTrace { - ctx = serverTrace.TagRPC(ctx, rti) + ctx = s.traceTagRPC(ctx, rti) } if !s.NoStats { - ctx = serverStats.TagRPC(ctx, rti) + ctx = s.statsTagRPC(ctx, rti) } return ctx } diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go index d1b20d70d..3693e406c 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go @@ -42,47 +42,53 @@ var ( // package. These are declared as a convenience only; none are subscribed by // default. var ( - ServerErrorCountView, _ = view.New( - "grpc.io/server/error_count", - "RPC Errors", - []tag.Key{KeyMethod, KeyStatus}, - ServerErrorCount, - view.CountAggregation{}) + ServerErrorCountView = &view.View{ + Name: "grpc.io/server/error_count", + Description: "RPC Errors", + TagKeys: []tag.Key{KeyMethod, KeyStatus}, + Measure: ServerErrorCount, + Aggregation: view.CountAggregation{}, + } - ServerServerElapsedTimeView, _ = view.New( - "grpc.io/server/server_elapsed_time", - "Server elapsed time in msecs", - []tag.Key{KeyMethod}, - ServerServerElapsedTime, - DefaultMillisecondsDistribution) + ServerServerElapsedTimeView = &view.View{ + Name: "grpc.io/server/server_elapsed_time", + Description: "Server elapsed time in msecs", + TagKeys: []tag.Key{KeyMethod}, + Measure: ServerServerElapsedTime, + Aggregation: DefaultMillisecondsDistribution, + } - ServerRequestBytesView, _ = view.New( - "grpc.io/server/request_bytes", - "Request bytes", - []tag.Key{KeyMethod}, - ServerRequestBytes, - DefaultBytesDistribution) + ServerRequestBytesView = &view.View{ + Name: "grpc.io/server/request_bytes", + Description: "Request bytes", + TagKeys: []tag.Key{KeyMethod}, + Measure: ServerRequestBytes, + Aggregation: DefaultBytesDistribution, + } - ServerResponseBytesView, _ = view.New( - "grpc.io/server/response_bytes", - "Response bytes", - []tag.Key{KeyMethod}, - ServerResponseBytes, - DefaultBytesDistribution) + ServerResponseBytesView = &view.View{ + Name: "grpc.io/server/response_bytes", + Description: "Response bytes", + TagKeys: []tag.Key{KeyMethod}, + Measure: ServerResponseBytes, + Aggregation: DefaultBytesDistribution, + } - ServerRequestCountView, _ = view.New( - "grpc.io/server/request_count", - "Count of request messages per server RPC", - []tag.Key{KeyMethod}, - ServerRequestCount, - DefaultMessageCountDistribution) + ServerRequestCountView = &view.View{ + Name: "grpc.io/server/request_count", + Description: "Count of request messages per server RPC", + TagKeys: []tag.Key{KeyMethod}, + Measure: ServerRequestCount, + Aggregation: DefaultMessageCountDistribution, + } - ServerResponseCountView, _ = view.New( - "grpc.io/server/response_count", - "Count of response messages per server RPC", - []tag.Key{KeyMethod}, - ServerResponseCount, - DefaultMessageCountDistribution) + ServerResponseCountView = &view.View{ + Name: "grpc.io/server/response_count", + Description: "Count of response messages per server RPC", + TagKeys: []tag.Key{KeyMethod}, + Measure: ServerResponseCount, + Aggregation: DefaultMessageCountDistribution, + } ) // All default server views provided by this package: diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go index 0745fe887..29f1ef40b 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go @@ -29,14 +29,9 @@ import ( "google.golang.org/grpc/status" ) -// serverStatsHandler is a stats.Handler implementation -// that collects stats for a gRPC server. Predefined -// measures and views can be used to access the collected data. -type serverStatsHandler struct{} - // TagRPC gets the metadata from gRPC context, extracts the encoded tags from // it and creates a new tag.Map and puts them into the returned context. -func (h *serverStatsHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { +func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { startTime := time.Now() if info == nil { if grpclog.V(2) { @@ -51,7 +46,7 @@ func (h *serverStatsHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) } // HandleRPC processes the RPC events. -func (h *serverStatsHandler) HandleRPC(ctx context.Context, s stats.RPCStats) { +func (h *ServerHandler) statsHandleRPC(ctx context.Context, s stats.RPCStats) { switch st := s.(type) { case *stats.Begin, *stats.InHeader, *stats.InTrailer, *stats.OutHeader, *stats.OutTrailer: // Do nothing for server @@ -67,7 +62,7 @@ func (h *serverStatsHandler) HandleRPC(ctx context.Context, s stats.RPCStats) { } } -func (h *serverStatsHandler) handleRPCInPayload(ctx context.Context, s *stats.InPayload) { +func (h *ServerHandler) handleRPCInPayload(ctx context.Context, s *stats.InPayload) { d, ok := ctx.Value(grpcServerRPCKey).(*rpcData) if !ok { if grpclog.V(2) { @@ -80,7 +75,7 @@ func (h *serverStatsHandler) handleRPCInPayload(ctx context.Context, s *stats.In atomic.AddInt64(&d.reqCount, 1) } -func (h *serverStatsHandler) handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) { +func (h *ServerHandler) handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) { d, ok := ctx.Value(grpcServerRPCKey).(*rpcData) if !ok { if grpclog.V(2) { @@ -93,7 +88,7 @@ func (h *serverStatsHandler) handleRPCOutPayload(ctx context.Context, s *stats.O atomic.AddInt64(&d.respCount, 1) } -func (h *serverStatsHandler) handleRPCEnd(ctx context.Context, s *stats.End) { +func (h *ServerHandler) handleRPCEnd(ctx context.Context, s *stats.End) { d, ok := ctx.Value(grpcServerRPCKey).(*rpcData) if !ok { if grpclog.V(2) { @@ -128,7 +123,7 @@ func (h *serverStatsHandler) handleRPCEnd(ctx context.Context, s *stats.End) { // createTags creates a new tag map containing the tags extracted from the // gRPC metadata. -func (h *serverStatsHandler) createTags(ctx context.Context, fullinfo string) (context.Context, error) { +func (h *ServerHandler) createTags(ctx context.Context, fullinfo string) (context.Context, error) { mods := []tag.Mutator{ tag.Upsert(KeyMethod, methodName(fullinfo)), } diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler_test.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler_test.go index a11e04b9c..b90ac3e74 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler_test.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler_test.go @@ -302,7 +302,7 @@ func TestServerDefaultCollections(t *testing.T) { } } - h := &serverStatsHandler{} + h := &ServerHandler{NoTrace: true} for _, rpc := range tc.rpcs { mods := []tag.Mutator{} for _, t := range rpc.tags { @@ -326,33 +326,29 @@ func TestServerDefaultCollections(t *testing.T) { } for _, wantData := range tc.wants { - gotRows, err := wantData.v().RetrieveData() + gotRows, err := view.RetrieveData(wantData.v().Name) if err != nil { - t.Errorf("%q: RetrieveData (%q) = %v", tc.label, wantData.v().Name(), err) + t.Errorf("%q: RetrieveData (%q) = %v", tc.label, wantData.v().Name, err) continue } for _, gotRow := range gotRows { if !containsRow(wantData.rows, gotRow) { - t.Errorf("%q: unwanted row for view %q: %v", tc.label, wantData.v().Name(), gotRow) + t.Errorf("%q: unwanted row for view %q: %v", tc.label, wantData.v().Name, gotRow) break } } for _, wantRow := range wantData.rows { if !containsRow(gotRows, wantRow) { - t.Errorf("%q: missing row for view %q: %v", tc.label, wantData.v().Name(), wantRow) + t.Errorf("%q: missing row for view %q: %v", tc.label, wantData.v().Name, wantRow) break } } } // Unregister views to cleanup. - for _, v := range DefaultServerViews { - if err := v.Unsubscribe(); err != nil { - t.Error(err) - } - } + view.Unsubscribe(DefaultServerViews...) } } diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go index 2ee425578..3068150b2 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go @@ -39,9 +39,9 @@ type rpcData struct { // The following variables define the default hard-coded auxiliary data used by // both the default GRPC client and GRPC server metrics. var ( - DefaultBytesDistribution = view.DistributionAggregation([]float64{0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296}) - DefaultMillisecondsDistribution = view.DistributionAggregation([]float64{0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000}) - DefaultMessageCountDistribution = view.DistributionAggregation([]float64{0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536}) + DefaultBytesDistribution = view.DistributionAggregation{0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296} + DefaultMillisecondsDistribution = view.DistributionAggregation{0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000} + DefaultMessageCountDistribution = view.DistributionAggregation{0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536} ) var ( diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go index d324b071b..fe086519c 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go @@ -17,29 +17,23 @@ package ocgrpc import ( "strings" + "google.golang.org/grpc/codes" + "go.opencensus.io/trace" "go.opencensus.io/trace/propagation" "golang.org/x/net/context" - "google.golang.org/grpc" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" ) -// clientTraceHandler is a an implementation of grpc.StatsHandler -// that can be passed to grpc.Dial -// using grpc.WithStatsHandler to enable trace context propagation and -// automatic span creation for outgoing gRPC requests. -type clientTraceHandler struct{} - -type serverTraceHandler struct{} - const traceContextKey = "grpc-trace-bin" // TagRPC creates a new trace span for the client side of the RPC. // // It returns ctx with the new trace span added and a serialization of the // SpanContext added to the outgoing gRPC metadata. -func (c *clientTraceHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { +func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { name := "Sent" + strings.Replace(rti.FullMethodName, "/", ".", -1) ctx, _ = trace.StartSpan(ctx, name) traceContextBinary := propagation.Binary(trace.FromContext(ctx).SpanContext()) @@ -55,26 +49,27 @@ func (c *clientTraceHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) // it finds one, uses that SpanContext as the parent context of the new span. // // It returns ctx, with the new trace span added. -func (s *serverTraceHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { +func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { md, _ := metadata.FromIncomingContext(ctx) name := "Recv" + strings.Replace(rti.FullMethodName, "/", ".", -1) if s := md[traceContextKey]; len(s) > 0 { if parent, ok := propagation.FromBinary([]byte(s[0])); ok { - ctx, _ = trace.StartSpanWithRemoteParent(ctx, name, parent, trace.StartOptions{}) - return ctx + span := trace.NewSpanWithRemoteParent(name, parent, trace.StartOptions{}) + return trace.WithSpan(ctx, span) } } + // TODO(ramonza): should we ignore the in-process parent here? ctx, _ = trace.StartSpan(ctx, name) return ctx } // HandleRPC processes the RPC stats, adding information to the current trace span. -func (c *clientTraceHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { +func (c *ClientHandler) traceHandleRPC(ctx context.Context, rs stats.RPCStats) { handleRPC(ctx, rs) } // HandleRPC processes the RPC stats, adding information to the current trace span. -func (s *serverTraceHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { +func (s *ServerHandler) traceHandleRPC(ctx context.Context, rs stats.RPCStats) { handleRPC(ctx, rs) } @@ -84,16 +79,20 @@ func handleRPC(ctx context.Context, rs stats.RPCStats) { switch rs := rs.(type) { case *stats.Begin: span.SetAttributes( - trace.BoolAttribute{Key: "Client", Value: rs.Client}, - trace.BoolAttribute{Key: "FailFast", Value: rs.FailFast}) + trace.BoolAttribute("Client", rs.Client), + trace.BoolAttribute("FailFast", rs.FailFast)) case *stats.InPayload: span.AddMessageReceiveEvent(0 /* TODO: messageID */, int64(rs.Length), int64(rs.WireLength)) case *stats.OutPayload: span.AddMessageSendEvent(0, int64(rs.Length), int64(rs.WireLength)) case *stats.End: if rs.Error != nil { - code, desc := grpc.Code(rs.Error), grpc.ErrorDesc(rs.Error) - span.SetStatus(trace.Status{Code: int32(code), Message: desc}) + s, ok := status.FromError(rs.Error) + if ok { + span.SetStatus(trace.Status{Code: int32(s.Code()), Message: s.Message()}) + } else { + span.SetStatus(trace.Status{Code: int32(codes.Internal), Message: rs.Error.Error()}) + } } span.End() } diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common_test.go b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common_test.go index 6ee09ccb1..25d0f5f1c 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common_test.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common_test.go @@ -21,7 +21,7 @@ import ( "testing" "time" - testpb "go.opencensus.io/plugin/ocgrpc/testdata" + "go.opencensus.io/plugin/ocgrpc/internal/testpb" "go.opencensus.io/trace" "golang.org/x/net/context" "google.golang.org/grpc" @@ -124,7 +124,7 @@ func TestStreaming(t *testing.T) { s1 := <-te.ch s2 := <-te.ch - checkSpanData(t, s1, s2, ".testdata.Foo.Multiple", true) + checkSpanData(t, s1, s2, ".testpb.Foo.Multiple", true) select { case <-te.ch: @@ -167,7 +167,7 @@ func TestStreamingFail(t *testing.T) { s1 := <-te.ch s2 := <-te.ch - checkSpanData(t, s1, s2, ".testdata.Foo.Multiple", false) + checkSpanData(t, s1, s2, ".testpb.Foo.Multiple", false) cleanup() select { @@ -196,7 +196,7 @@ func TestSingle(t *testing.T) { s1 := <-te.ch s2 := <-te.ch - checkSpanData(t, s1, s2, ".testdata.Foo.Single", true) + checkSpanData(t, s1, s2, ".testpb.Foo.Single", true) cleanup() select { @@ -225,7 +225,7 @@ func TestSingleFail(t *testing.T) { s1 := <-te.ch s2 := <-te.ch - checkSpanData(t, s1, s2, ".testdata.Foo.Single", false) + checkSpanData(t, s1, s2, ".testpb.Foo.Single", false) cleanup() select { diff --git a/vendor/go.opencensus.io/plugin/ochttp/client.go b/vendor/go.opencensus.io/plugin/ochttp/client.go index 6d981bc1d..2be6beefc 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/client.go +++ b/vendor/go.opencensus.io/plugin/ochttp/client.go @@ -42,10 +42,9 @@ type Transport struct { // (currently B3 format) will be used. Propagation propagation.HTTPFormat - // Sampler if provided, will be consulted for each span generated by this - // RoundTripper. Otherwise, the default sampling behavior takes place - // (see trace.StartOptions). - Sampler trace.Sampler + // StartOptions are applied to the span started by this Transport around each + // request. + StartOptions trace.StartOptions // TODO: Implement tag propagation for HTTP. } @@ -60,15 +59,14 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { format = defaultFormat } rt = &traceTransport{ - base: rt, - format: format, - sampler: t.Sampler, + base: rt, + format: format, + startOptions: t.StartOptions, } } if !t.NoStats { rt = statsTransport{ - base: rt, - sampler: t.Sampler, + base: rt, } } return rt.RoundTrip(req) diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats_transport.go b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go similarity index 92% rename from vendor/go.opencensus.io/plugin/ochttp/stats_transport.go rename to vendor/go.opencensus.io/plugin/ochttp/client_stats.go index 63b5cbb28..9b286b929 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/stats_transport.go +++ b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go @@ -24,13 +24,11 @@ import ( "go.opencensus.io/stats" "go.opencensus.io/tag" - "go.opencensus.io/trace" ) // statsTransport is an http.RoundTripper that collects stats for the outgoing requests. type statsTransport struct { - base http.RoundTripper - sampler trace.Sampler + base http.RoundTripper } // RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request. @@ -56,10 +54,10 @@ func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { resp, err := t.base.RoundTrip(req) if err != nil { - track.statusCode = "error" + track.statusCode = http.StatusInternalServerError track.end() } else { - track.statusCode = strconv.Itoa(resp.StatusCode) + track.statusCode = resp.StatusCode if resp.Body == nil { track.end() } else { @@ -67,7 +65,6 @@ func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { resp.Body = track } } - return resp, err } @@ -82,15 +79,17 @@ func (t statsTransport) CancelRequest(req *http.Request) { } type tracker struct { + ctx context.Context respSize int64 reqSize int64 - ctx context.Context start time.Time body io.ReadCloser - statusCode string + statusCode int endOnce sync.Once } +var _ io.ReadCloser = (*tracker)(nil) + func (t *tracker) end() { t.endOnce.Do(func() { m := []stats.Measurement{ @@ -100,13 +99,11 @@ func (t *tracker) end() { if t.reqSize >= 0 { m = append(m, ClientRequestBytes.M(t.reqSize)) } - ctx, _ := tag.New(t.ctx, tag.Upsert(StatusCode, t.statusCode)) + ctx, _ := tag.New(t.ctx, tag.Upsert(StatusCode, strconv.Itoa(t.statusCode))) stats.Record(ctx, m...) }) } -var _ io.ReadCloser = (*tracker)(nil) - func (t *tracker) Read(b []byte) (int, error) { n, err := t.body.Read(b) switch err { diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_test.go b/vendor/go.opencensus.io/plugin/ochttp/client_test.go index 2219f8289..1df1c57df 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/client_test.go +++ b/vendor/go.opencensus.io/plugin/ochttp/client_test.go @@ -97,7 +97,7 @@ func TestClient(t *testing.T) { t.Errorf("view not found %q", viewName) continue } - rows, err := v.RetrieveData() + rows, err := view.RetrieveData(v.Name) if err != nil { t.Error(err) continue @@ -114,7 +114,8 @@ func TestClient(t *testing.T) { case *view.DistributionData: count = data.Count default: - t.Errorf("don't know how to handle data type: %v", data) + t.Errorf("Unkown data type: %v", data) + continue } if got := count; got != reqCount { t.Fatalf("%s = %d; want %d", viewName, got, reqCount) @@ -144,7 +145,7 @@ func benchmarkClientServer(b *testing.B, transport *ochttp.Transport) { fmt.Fprintf(rw, "Hello world.\n") })) defer ts.Close() - transport.Sampler = trace.AlwaysSample() + transport.StartOptions.Sampler = trace.AlwaysSample() var client http.Client client.Transport = transport b.ResetTimer() @@ -194,7 +195,7 @@ func benchmarkClientServerParallel(b *testing.B, parallelism int, transport *och MaxIdleConns: parallelism, MaxIdleConnsPerHost: parallelism, } - transport.Sampler = trace.AlwaysSample() + transport.StartOptions.Sampler = trace.AlwaysSample() c.Transport = transport b.ResetTimer() diff --git a/vendor/go.opencensus.io/plugin/ochttp/doc.go b/vendor/go.opencensus.io/plugin/ochttp/doc.go index 7d4608592..10e626b16 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/doc.go +++ b/vendor/go.opencensus.io/plugin/ochttp/doc.go @@ -13,4 +13,7 @@ // limitations under the License. // Package ochttp provides OpenCensus instrumentation for net/http package. +// +// For server instrumentation, see Handler. For client-side instrumentation, +// see Transport. package ochttp // import "go.opencensus.io/plugin/ochttp" diff --git a/vendor/go.opencensus.io/plugin/ochttp/example_test.go b/vendor/go.opencensus.io/plugin/ochttp/example_test.go index 58bd5f907..fa4dd9370 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/example_test.go +++ b/vendor/go.opencensus.io/plugin/ochttp/example_test.go @@ -19,10 +19,29 @@ import ( "net/http" "go.opencensus.io/plugin/ochttp" - "go.opencensus.io/plugin/ochttp/propagation/google" + "go.opencensus.io/plugin/ochttp/propagation/b3" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" ) func ExampleTransport() { + err := view.Subscribe( + // Subscribe to a few default views, with renaming + ochttp.ClientRequestCountByMethod, + ochttp.ClientResponseCountByStatusCode, + ochttp.ClientLatencyView, + // Subscribe to a custom view + &view.View{ + Name: "httpclient_latency_by_hostpath", + TagKeys: []tag.Key{ochttp.Host, ochttp.Path}, + Measure: ochttp.ClientLatency, + Aggregation: ochttp.DefaultLatencyDistribution, + }, + ) + if err != nil { + log.Fatal(err) + } + client := &http.Client{ Transport: &ochttp.Transport{}, } @@ -44,6 +63,6 @@ func ExampleHandler_mux() { log.Fatal(http.ListenAndServe("localhost:8080", &ochttp.Handler{ Handler: mux, - Propagation: &google.HTTPFormat{}, // Uses Google's propagation format. + Propagation: &b3.HTTPFormat{}, })) } diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/google/google.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/google/google.go index 8e9a7b2f9..f73bb803a 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/propagation/google/google.go +++ b/vendor/go.opencensus.io/plugin/ochttp/propagation/google/google.go @@ -12,8 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package google contains a propagation.HTTPFormat implementation -// for Google Cloud Trace and Stackdriver. +// Package google is deprecated: Use go.opencensus.io/exporter/stackdriver/propagation. package google // import "go.opencensus.io/plugin/ochttp/propagation/google" import ( @@ -25,7 +24,6 @@ import ( "strings" "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" ) const ( @@ -33,12 +31,9 @@ const ( httpHeader = `X-Cloud-Trace-Context` ) -// HTTPFormat implements propagation.HTTPFormat to propagate -// traces in HTTP headers for Google Cloud Platform and Stackdriver Trace. +// Deprecated: Use go.opencensus.io/exporter/stackdriver/propagation.HTTPFormat type HTTPFormat struct{} -var _ propagation.HTTPFormat = (*HTTPFormat)(nil) - // SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests. func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { h := req.Header.Get(httpHeader) diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation_test.go b/vendor/go.opencensus.io/plugin/ochttp/propagation_test.go index f62623721..8c09efa11 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/propagation_test.go +++ b/vendor/go.opencensus.io/plugin/ochttp/propagation_test.go @@ -23,7 +23,7 @@ import ( "testing" "go.opencensus.io/plugin/ochttp/propagation/b3" - "go.opencensus.io/plugin/ochttp/propagation/google" + "go.opencensus.io/plugin/ochttp/propagation/tracecontext" "go.opencensus.io/trace" "go.opencensus.io/trace/propagation" ) @@ -32,7 +32,7 @@ func TestRoundTripAllFormats(t *testing.T) { // TODO: test combinations of different formats for chains of calls formats := []propagation.HTTPFormat{ &b3.HTTPFormat{}, - &google.HTTPFormat{}, + &tracecontext.HTTPFormat{}, } ctx := context.Background() diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go new file mode 100644 index 000000000..1c95d1466 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -0,0 +1,164 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "net/http" + "strconv" + "sync" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// Handler is a http.Handler that is aware of the incoming request's span. +// +// The extracted span can be accessed from the incoming request's +// context. +// +// span := trace.FromContext(r.Context()) +// +// The server span will be automatically ended at the end of ServeHTTP. +// +// Incoming propagation mechanism is determined by the given HTTP propagators. +type Handler struct { + // NoStats may be set to disable recording of stats. + NoStats bool + + // NoTrace may be set to disable recording of traces. + NoTrace bool + + // Propagation defines how traces are propagated. If unspecified, + // B3 propagation will be used. + Propagation propagation.HTTPFormat + + // Handler is the handler used to handle the incoming request. + Handler http.Handler + + // StartOptions are applied to the span started by this Handler around each + // request. + StartOptions trace.StartOptions +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if !h.NoTrace { + var end func() + r, end = h.startTrace(w, r) + defer end() + } + if !h.NoStats { + var end func() + w, end = h.startStats(w, r) + defer end() + } + + handler := h.Handler + if handler == nil { + handler = http.DefaultServeMux + } + handler.ServeHTTP(w, r) +} + +func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { + name := spanNameFromURL("Recv", r.URL) + p := h.Propagation + if p == nil { + p = defaultFormat + } + ctx := r.Context() + var span *trace.Span + if sc, ok := p.SpanContextFromRequest(r); ok { + span = trace.NewSpanWithRemoteParent(name, sc, h.StartOptions) + } else { + span = trace.NewSpan(name, nil, h.StartOptions) + } + ctx = trace.WithSpan(ctx, span) + span.SetAttributes(requestAttrs(r)...) + return r.WithContext(trace.WithSpan(r.Context(), span)), span.End +} + +func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func()) { + ctx, _ := tag.New(r.Context(), + tag.Upsert(Host, r.URL.Host), + tag.Upsert(Path, r.URL.Path), + tag.Upsert(Method, r.Method)) + track := &trackingResponseWriter{ + start: time.Now(), + ctx: ctx, + writer: w, + } + if r.Body == nil { + // TODO: Handle cases where ContentLength is not set. + track.reqSize = -1 + } else if r.ContentLength > 0 { + track.reqSize = r.ContentLength + } + stats.Record(ctx, ServerRequestCount.M(1)) + return track, track.end +} + +type trackingResponseWriter struct { + ctx context.Context + reqSize int64 + respSize int64 + start time.Time + statusCode int + endOnce sync.Once + writer http.ResponseWriter +} + +var _ http.ResponseWriter = (*trackingResponseWriter)(nil) + +func (t *trackingResponseWriter) end() { + t.endOnce.Do(func() { + if t.statusCode == 0 { + t.statusCode = 200 + } + m := []stats.Measurement{ + ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), + ServerResponseBytes.M(t.respSize), + } + if t.reqSize >= 0 { + m = append(m, ServerRequestBytes.M(t.reqSize)) + } + ctx, _ := tag.New(t.ctx, tag.Upsert(StatusCode, strconv.Itoa(t.statusCode))) + stats.Record(ctx, m...) + }) +} + +func (t *trackingResponseWriter) Header() http.Header { + return t.writer.Header() +} + +func (t *trackingResponseWriter) Write(data []byte) (int, error) { + n, err := t.writer.Write(data) + t.respSize += int64(n) + return n, err +} + +func (t *trackingResponseWriter) WriteHeader(statusCode int) { + t.writer.WriteHeader(statusCode) + t.statusCode = statusCode +} + +func (t *trackingResponseWriter) Flush() { + if flusher, ok := t.writer.(http.Flusher); ok { + flusher.Flush() + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/server_test.go b/vendor/go.opencensus.io/plugin/ochttp/server_test.go new file mode 100644 index 000000000..70c4f2de8 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/server_test.go @@ -0,0 +1,117 @@ +package ochttp + +import ( + "bytes" + "net/http" + "net/http/httptest" + "testing" + + "go.opencensus.io/stats/view" +) + +func httpHandler(statusCode, respSize int) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(statusCode) + body := make([]byte, respSize) + w.Write(body) + }) +} + +func updateMean(mean float64, sample, count int) float64 { + if count == 1 { + return float64(sample) + } + return mean + (float64(sample)-mean)/float64(count) +} + +func TestHandlerStatsCollection(t *testing.T) { + for _, v := range DefaultViews { + v.Subscribe() + } + + views := []string{ + "opencensus.io/http/server/request_count", + "opencensus.io/http/server/latency", + "opencensus.io/http/server/request_bytes", + "opencensus.io/http/server/response_bytes", + } + + // TODO: test latency measurements? + tests := []struct { + name, method, target string + count, statusCode, reqSize, respSize int + }{ + {"get 200", "GET", "http://opencensus.io/request/one", 10, 200, 512, 512}, + {"post 503", "POST", "http://opencensus.io/request/two", 5, 503, 1024, 16384}, + {"no body 302", "GET", "http://opencensus.io/request/three", 2, 302, 0, 0}, + } + totalCount, meanReqSize, meanRespSize := 0, 0.0, 0.0 + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + body := bytes.NewBuffer(make([]byte, test.reqSize)) + r := httptest.NewRequest(test.method, test.target, body) + w := httptest.NewRecorder() + h := &Handler{ + NoTrace: true, + Handler: httpHandler(test.statusCode, test.respSize), + } + + for i := 0; i < test.count; i++ { + h.ServeHTTP(w, r) + totalCount++ + // Distributions do not track sum directly, we must + // mimic their behaviour to avoid rounding failures. + meanReqSize = updateMean(meanReqSize, test.reqSize, totalCount) + meanRespSize = updateMean(meanRespSize, test.respSize, totalCount) + } + }) + } + + for _, viewName := range views { + v := view.Find(viewName) + if v == nil { + t.Errorf("view not found %q", viewName) + continue + } + rows, err := view.RetrieveData(viewName) + if err != nil { + t.Error(err) + continue + } + if got, want := len(rows), 1; got != want { + t.Errorf("len(%q) = %d; want %d", viewName, got, want) + continue + } + data := rows[0].Data + + var count int + var sum float64 + switch data := data.(type) { + case *view.CountData: + count = int(*data) + case *view.DistributionData: + count = int(data.Count) + sum = data.Sum() + default: + t.Errorf("Unkown data type: %v", data) + continue + } + + if got, want := count, totalCount; got != want { + t.Fatalf("%s = %d; want %d", viewName, got, want) + } + + // We can only check sum for distribution views. + switch viewName { + case "opencensus.io/http/server/request_bytes": + if got, want := sum, meanReqSize*float64(totalCount); got != want { + t.Fatalf("%s = %g; want %g", viewName, got, want) + } + case "opencensus.io/http/server/response_bytes": + if got, want := sum, meanRespSize*float64(totalCount); got != want { + t.Fatalf("%s = %g; want %g", viewName, got, want) + } + } + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go index df403b313..02e7f68dd 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/stats.go +++ b/vendor/go.opencensus.io/plugin/ochttp/stats.go @@ -20,7 +20,7 @@ import ( "go.opencensus.io/tag" ) -// The following client HTTP measures are supported for use in custom views: +// The following client HTTP measures are supported for use in custom views. var ( ClientRequestCount, _ = stats.Int64("opencensus.io/http/client/request_count", "Number of HTTP requests started", stats.UnitNone) ClientRequestBytes, _ = stats.Int64("opencensus.io/http/client/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) @@ -28,46 +28,128 @@ var ( ClientLatency, _ = stats.Float64("opencensus.io/http/client/latency", "End-to-end latency", stats.UnitMilliseconds) ) +// The following server HTTP measures are supported for use in custom views: +var ( + ServerRequestCount, _ = stats.Int64("opencensus.io/http/server/request_count", "Number of HTTP requests started", stats.UnitNone) + ServerRequestBytes, _ = stats.Int64("opencensus.io/http/server/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) + ServerResponseBytes, _ = stats.Int64("opencensus.io/http/server/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes) + ServerLatency, _ = stats.Float64("opencensus.io/http/server/latency", "End-to-end latency", stats.UnitMilliseconds) +) + // The following tags are applied to stats recorded by this package. Host, Path // and Method are applied to all measures. StatusCode is not applied to -// ClientRequestCount, since it is recorded before the status is known. +// ClientRequestCount or ServerRequestCount, since it is recorded before the status is known. var ( // Host is the value of the HTTP Host header. Host, _ = tag.NewKey("http.host") + // StatusCode is the numeric HTTP response status code, // or "error" if a transport error occurred and no status code was read. StatusCode, _ = tag.NewKey("http.status") + // Path is the URL path (not including query string) in the request. Path, _ = tag.NewKey("http.path") + // Method is the HTTP method of the request, capitalized (GET, POST, etc.). Method, _ = tag.NewKey("http.method") ) +// Default distributions used by views in this package. var ( - DefaultSizeDistribution = view.DistributionAggregation([]float64{0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296}) - DefaultLatencyDistribution = view.DistributionAggregation([]float64{0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000}) + DefaultSizeDistribution = view.DistributionAggregation{0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296} + DefaultLatencyDistribution = view.DistributionAggregation{0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000} ) // Package ochttp provides some convenience views. // You need to subscribe to the views for data to actually be collected. var ( - ClientRequestCountView, _ = view.New("opencensus.io/http/client/request_count", "Count of HTTP requests started", nil, ClientRequestCount, view.CountAggregation{}) - ClientRequestBytesView, _ = view.New("opencensus.io/http/client/request_bytes", "Size distribution of HTTP request body", nil, ClientRequestBytes, DefaultSizeDistribution) - ClientResponseBytesView, _ = view.New("opencensus.io/http/client/response_bytes", "Size distribution of HTTP response body", nil, ClientResponseBytes, DefaultSizeDistribution) - ClientLatencyView, _ = view.New("opencensus.io/http/client/latency", "Latency distribution of HTTP requests", nil, ClientLatency, DefaultLatencyDistribution) + ClientRequestCountView = &view.View{ + Name: "opencensus.io/http/client/request_count", + Description: "Count of HTTP requests started", + Measure: ClientRequestCount, + Aggregation: view.CountAggregation{}, + } - ClientRequestCountByMethod, _ = view.New( - "opencensus.io/http/client/request_count_by_method", - "Client request count by HTTP method", - []tag.Key{Method}, - ClientRequestCount, - view.CountAggregation{}) - ClientResponseCountByStatusCode, _ = view.New( - "opencensus.io/http/client/response_count_by_status_code", - "Client response count by status code", - []tag.Key{StatusCode}, - ClientLatency, - view.CountAggregation{}) + ClientRequestBytesView = &view.View{ + Name: "opencensus.io/http/client/request_bytes", + Description: "Size distribution of HTTP request body", + Measure: ClientRequestBytes, + Aggregation: DefaultSizeDistribution, + } + + ClientResponseBytesView = &view.View{ + Name: "opencensus.io/http/client/response_bytes", + Description: "Size distribution of HTTP response body", + Measure: ClientResponseBytes, + Aggregation: DefaultSizeDistribution, + } + + ClientLatencyView = &view.View{ + Name: "opencensus.io/http/client/latency", + Description: "Latency distribution of HTTP requests", + Measure: ClientLatency, + Aggregation: DefaultLatencyDistribution, + } + + ClientRequestCountByMethod = &view.View{ + Name: "opencensus.io/http/client/request_count_by_method", + Description: "Client request count by HTTP method", + TagKeys: []tag.Key{Method}, + Measure: ClientRequestCount, + Aggregation: view.CountAggregation{}, + } + + ClientResponseCountByStatusCode = &view.View{ + Name: "opencensus.io/http/client/response_count_by_status_code", + Description: "Client response count by status code", + TagKeys: []tag.Key{StatusCode}, + Measure: ClientLatency, + Aggregation: view.CountAggregation{}, + } + + ServerRequestCountView = &view.View{ + Name: "opencensus.io/http/server/request_count", + Description: "Count of HTTP requests started", + Measure: ServerRequestCount, + Aggregation: view.CountAggregation{}, + } + + ServerRequestBytesView = &view.View{ + Name: "opencensus.io/http/server/request_bytes", + Description: "Size distribution of HTTP request body", + Measure: ServerRequestBytes, + Aggregation: DefaultSizeDistribution, + } + + ServerResponseBytesView = &view.View{ + Name: "opencensus.io/http/server/response_bytes", + Description: "Size distribution of HTTP response body", + Measure: ServerResponseBytes, + Aggregation: DefaultSizeDistribution, + } + + ServerLatencyView = &view.View{ + Name: "opencensus.io/http/server/latency", + Description: "Latency distribution of HTTP requests", + Measure: ServerLatency, + Aggregation: DefaultLatencyDistribution, + } + + ServerRequestCountByMethod = &view.View{ + Name: "opencensus.io/http/server/request_count_by_method", + Description: "Server request count by HTTP method", + TagKeys: []tag.Key{Method}, + Measure: ServerRequestCount, + Aggregation: view.CountAggregation{}, + } + + ServerResponseCountByStatusCode = &view.View{ + Name: "opencensus.io/http/server/response_count_by_status_code", + Description: "Server response count by status code", + TagKeys: []tag.Key{StatusCode}, + Measure: ServerLatency, + Aggregation: view.CountAggregation{}, + } DefaultViews = []*view.View{ ClientRequestCountView, @@ -76,5 +158,11 @@ var ( ClientLatencyView, ClientRequestCountByMethod, ClientResponseCountByStatusCode, + ServerRequestCountView, + ServerRequestBytesView, + ServerResponseBytesView, + ServerLatencyView, + ServerRequestCountByMethod, + ServerResponseCountByStatusCode, } ) diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats_test.go b/vendor/go.opencensus.io/plugin/ochttp/stats_test.go deleted file mode 100644 index 7980433f5..000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/stats_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package ochttp - -import ( - "testing" - - "go.opencensus.io/stats" - "go.opencensus.io/tag" -) - -func TestVarsInitialized(t *testing.T) { - // Test that global initialization was successful - for i, k := range []tag.Key{Host, StatusCode, Path, Method} { - if k.Name() == "" { - t.Errorf("key not initialized: %d", i) - } - } - for i, m := range []stats.Measure{ClientRequestCount, ClientResponseBytes, ClientRequestBytes, ClientLatency} { - if m == nil { - t.Errorf("measure not initialized: %d", i) - } - } - for i, v := range DefaultViews { - if v == nil { - t.Errorf("view not initialized: %d", i) - } - } -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go index 157575a3c..70f18ac78 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/trace.go +++ b/vendor/go.opencensus.io/plugin/ochttp/trace.go @@ -20,12 +20,15 @@ import ( "net/url" "sync" + "go.opencensus.io/plugin/ochttp/propagation/b3" "go.opencensus.io/trace" "go.opencensus.io/trace/propagation" ) // TODO(jbd): Add godoc examples. +var defaultFormat propagation.HTTPFormat = &b3.HTTPFormat{} + // Attributes recorded on the span for the requests. // Only trace exporters will need them. const ( @@ -33,13 +36,13 @@ const ( MethodAttribute = "http.method" PathAttribute = "http.path" UserAgentAttribute = "http.user_agent" - StatusCodeAttribute = "http.status" + StatusCodeAttribute = "http.status_code" ) type traceTransport struct { - base http.RoundTripper - sampler trace.Sampler - format propagation.HTTPFormat + base http.RoundTripper + startOptions trace.StartOptions + format propagation.HTTPFormat } // TODO(jbd): Add message events for request and response size. @@ -52,7 +55,7 @@ func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { // TODO(jbd): Discuss whether we want to prefix // outgoing requests with Sent. parent := trace.FromContext(req.Context()) - span := trace.NewSpan(name, parent, trace.StartOptions{Sampler: t.sampler}) + span := trace.NewSpan(name, parent, t.startOptions) req = req.WithContext(trace.WithSpan(req.Context(), span)) if t.format != nil { @@ -132,55 +135,6 @@ func (t *traceTransport) CancelRequest(req *http.Request) { } } -// Handler is a http.Handler that is aware of the incoming request's span. -// -// The extracted span can be accessed from the incoming request's -// context. -// -// span := trace.FromContext(r.Context()) -// -// The server span will be automatically ended at the end of ServeHTTP. -// -// Incoming propagation mechanism is determined by the given HTTP propagators. -type Handler struct { - // Propagation defines how traces are propagated. If unspecified, - // B3 propagation will be used. - Propagation propagation.HTTPFormat - - // Handler is the handler used to handle the incoming request. - Handler http.Handler -} - -// TODO(jbd): Add Handler.NoTrace and Handler.NoStats. - -func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - name := spanNameFromURL("Recv", r.URL) - - p := h.Propagation - if p == nil { - p = defaultFormat - } - - ctx := r.Context() - var span *trace.Span - if sc, ok := p.SpanContextFromRequest(r); ok { - ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc, trace.StartOptions{}) - } else { - ctx, span = trace.StartSpan(ctx, name) - } - defer span.End() - - span.SetAttributes(requestAttrs(r)...) - - r = r.WithContext(ctx) - - handler := h.Handler - if handler == nil { - handler = http.DefaultServeMux - } - handler.ServeHTTP(w, r) -} - func spanNameFromURL(prefix string, u *url.URL) string { host := u.Hostname() port := ":" + u.Port() @@ -192,15 +146,15 @@ func spanNameFromURL(prefix string, u *url.URL) string { func requestAttrs(r *http.Request) []trace.Attribute { return []trace.Attribute{ - trace.StringAttribute{Key: PathAttribute, Value: r.URL.Path}, - trace.StringAttribute{Key: HostAttribute, Value: r.URL.Host}, - trace.StringAttribute{Key: MethodAttribute, Value: r.Method}, - trace.StringAttribute{Key: UserAgentAttribute, Value: r.UserAgent()}, + trace.StringAttribute(PathAttribute, r.URL.Path), + trace.StringAttribute(HostAttribute, r.URL.Host), + trace.StringAttribute(MethodAttribute, r.Method), + trace.StringAttribute(UserAgentAttribute, r.UserAgent()), } } func responseAttrs(resp *http.Response) []trace.Attribute { return []trace.Attribute{ - trace.Int64Attribute{Key: StatusCodeAttribute, Value: int64(resp.StatusCode)}, + trace.Int64Attribute(StatusCodeAttribute, int64(resp.StatusCode)), } } diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace_test.go b/vendor/go.opencensus.io/plugin/ochttp/trace_test.go index 163d40868..7cc843e59 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/trace_test.go +++ b/vendor/go.opencensus.io/plugin/ochttp/trace_test.go @@ -121,9 +121,6 @@ func TestTransport_RoundTrip(t *testing.T) { } func TestHandler(t *testing.T) { - // TODO(#431): remove SetDefaultSampler - trace.SetDefaultSampler(trace.ProbabilitySampler(0.0)) - traceID := [16]byte{16, 84, 69, 170, 120, 67, 188, 139, 242, 6, 177, 32, 0, 16, 0, 0} tests := []struct { header string @@ -157,8 +154,9 @@ func TestHandler(t *testing.T) { t.Errorf("TraceOptions = %v; want %v", got, want) } }), + StartOptions: trace.StartOptions{Sampler: trace.ProbabilitySampler(0.0)}, + Propagation: propagator, } - handler.Propagation = propagator req, _ := http.NewRequest("GET", "http://foo.com", nil) req.Header.Add("trace", tt.header) handler.ServeHTTP(nil, req) @@ -347,10 +345,10 @@ func TestRequestAttributes(t *testing.T) { return req }, wantAttrs: []trace.Attribute{ - trace.StringAttribute{Key: PathAttribute, Value: "/hello"}, - trace.StringAttribute{Key: HostAttribute, Value: "example.com"}, - trace.StringAttribute{Key: MethodAttribute, Value: "GET"}, - trace.StringAttribute{Key: UserAgentAttribute, Value: "ua"}, + trace.StringAttribute("http.path", "/hello"), + trace.StringAttribute("http.host", "example.com"), + trace.StringAttribute("http.method", "GET"), + trace.StringAttribute("http.user_agent", "ua"), }, }, } @@ -376,14 +374,14 @@ func TestResponseAttributes(t *testing.T) { name: "non-zero HTTP 200 response", resp: &http.Response{StatusCode: 200}, wantAttrs: []trace.Attribute{ - trace.Int64Attribute{Key: StatusCodeAttribute, Value: 200}, + trace.Int64Attribute("http.status_code", 200), }, }, { name: "zero HTTP 500 response", resp: &http.Response{StatusCode: 500}, wantAttrs: []trace.Attribute{ - trace.Int64Attribute{Key: StatusCodeAttribute, Value: 500}, + trace.Int64Attribute("http.status_code", 500), }, }, } diff --git a/vendor/go.opencensus.io/stats/example_test.go b/vendor/go.opencensus.io/stats/example_test.go index 4d2a250d3..b1807ba56 100644 --- a/vendor/go.opencensus.io/stats/example_test.go +++ b/vendor/go.opencensus.io/stats/example_test.go @@ -19,37 +19,13 @@ import ( "log" "go.opencensus.io/stats" - "go.opencensus.io/stats/view" ) -func Example_record() { - m, err := stats.Int64("my.org/measure/openconns", "open connections", "") +func ExampleRecord() { + ctx := context.Background() + openConns, err := stats.Int64("my.org/measure/openconns", "open connections", stats.UnitNone) if err != nil { log.Fatal(err) } - - stats.Record(context.TODO(), m.M(124)) // Record 124 open connections. -} - -func Example_view() { - m, err := stats.Int64("my.org/measure/openconns", "open connections", "") - if err != nil { - log.Fatal(err) - } - - view, err := view.New( - "my.org/views/openconns", - "open connections", - nil, - m, - view.DistributionAggregation([]float64{0, 1000, 2000}), - ) - if err != nil { - log.Fatal(err) - } - if err := view.Subscribe(); err != nil { - log.Fatal(err) - } - - // Use stats.RegisterExporter to export collected data. + stats.Record(ctx, openConns.M(124)) // Record 124 open connections. } diff --git a/vendor/go.opencensus.io/stats/measure.go b/vendor/go.opencensus.io/stats/measure.go index 86f303758..5c575daee 100644 --- a/vendor/go.opencensus.io/stats/measure.go +++ b/vendor/go.opencensus.io/stats/measure.go @@ -43,7 +43,6 @@ type measure struct { name string description string unit string - views int32 } // Name returns the name of the measure. @@ -69,6 +68,7 @@ var ( errMeasureNameTooLong = fmt.Errorf("measure name cannot be longer than %v", internal.MaxNameLength) ) +// FindMeasure finds the Measure instance, if any, associated with the given name. func FindMeasure(name string) Measure { mu.RLock() m := measures[name] @@ -91,8 +91,18 @@ func register(m Measure) (Measure, error) { // provides methods to create measurements of their kind. For example, Int64Measure // provides M to convert an int64 into a measurement. type Measurement struct { - Value float64 - Measure Measure + v float64 + m Measure +} + +// Value returns the value of the Measurement as a float64. +func (m Measurement) Value() float64 { + return m.v +} + +// Measure returns the Measure from which this Measurement was created. +func (m Measurement) Measure() Measure { + return m.m } func checkName(name string) error { diff --git a/vendor/go.opencensus.io/stats/measure_float64.go b/vendor/go.opencensus.io/stats/measure_float64.go index eeb37ae64..9f1bc6841 100644 --- a/vendor/go.opencensus.io/stats/measure_float64.go +++ b/vendor/go.opencensus.io/stats/measure_float64.go @@ -23,7 +23,7 @@ type Float64Measure struct { // M creates a new float64 measurement. // Use Record to record measurements. func (m *Float64Measure) M(v float64) Measurement { - return Measurement{Measure: m, Value: v} + return Measurement{m: m, v: v} } // Float64 creates a new measure of type Float64Measure. It returns diff --git a/vendor/go.opencensus.io/stats/measure_int64.go b/vendor/go.opencensus.io/stats/measure_int64.go index 7dbcbe892..5ff3bcd1b 100644 --- a/vendor/go.opencensus.io/stats/measure_int64.go +++ b/vendor/go.opencensus.io/stats/measure_int64.go @@ -23,7 +23,7 @@ type Int64Measure struct { // M creates a new int64 measurement. // Use Record to record measurements. func (m *Int64Measure) M(v int64) Measurement { - return Measurement{Measure: m, Value: float64(v)} + return Measurement{m: m, v: float64(v)} } // Int64 creates a new measure of type Int64Measure. It returns an diff --git a/vendor/go.opencensus.io/stats/measure_test.go b/vendor/go.opencensus.io/stats/measure_test.go index af1a004bd..8b2927c58 100644 --- a/vendor/go.opencensus.io/stats/measure_test.go +++ b/vendor/go.opencensus.io/stats/measure_test.go @@ -26,6 +26,11 @@ func TestCheckMeasureName(t *testing.T) { view: "my.org/measures/\007", wantErr: true, }, + { + name: "no emoji for you!", + view: "💩", + wantErr: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go index 07f4fff6f..791d35ef2 100644 --- a/vendor/go.opencensus.io/stats/record.go +++ b/vendor/go.opencensus.io/stats/record.go @@ -26,15 +26,6 @@ import ( // If there are any tags in the context, measurements will be tagged with them. func Record(ctx context.Context, ms ...Measurement) { if internal.DefaultRecorder != nil { - var record bool - for _, m := range ms { - if m.Measure != nil { - record = true - } - } - if !record { - return - } internal.DefaultRecorder(tag.FromContext(ctx), ms) } } diff --git a/vendor/go.opencensus.io/stats/view/benchmark_test.go b/vendor/go.opencensus.io/stats/view/benchmark_test.go new file mode 100644 index 000000000..d9a7d3e76 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/benchmark_test.go @@ -0,0 +1,90 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "context" + "fmt" + "testing" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" +) + +var ( + m, _ = stats.Float64("m", "", "") + k1, _ = tag.NewKey("k1") + k2, _ = tag.NewKey("k2") + k3, _ = tag.NewKey("k3") + k4, _ = tag.NewKey("k4") + k5, _ = tag.NewKey("k5") + k6, _ = tag.NewKey("k6") + k7, _ = tag.NewKey("k7") + k8, _ = tag.NewKey("k8") + view = &View{ + Measure: m, + Aggregation: DistributionAggregation{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + TagKeys: []tag.Key{k1, k2}, + } +) + +// BenchmarkRecordReqCommand benchmarks calling the internal recording machinery +// directly. +func BenchmarkRecordReqCommand(b *testing.B) { + w := newWorker() + + subscribe := &subscribeToViewReq{views: []*View{view}, err: make(chan error, 1)} + subscribe.handleCommand(w) + if err := <-subscribe.err; err != nil { + b.Fatal(err) + } + + const tagCount = 10 + ctxs := make([]context.Context, 0, tagCount) + for i := 0; i < tagCount; i++ { + ctx, _ := tag.New(context.Background(), + tag.Upsert(k1, fmt.Sprintf("v%d", i)), + tag.Upsert(k2, fmt.Sprintf("v%d", i)), + tag.Upsert(k3, fmt.Sprintf("v%d", i)), + tag.Upsert(k4, fmt.Sprintf("v%d", i)), + tag.Upsert(k5, fmt.Sprintf("v%d", i)), + tag.Upsert(k6, fmt.Sprintf("v%d", i)), + tag.Upsert(k7, fmt.Sprintf("v%d", i)), + tag.Upsert(k8, fmt.Sprintf("v%d", i)), + ) + ctxs = append(ctxs, ctx) + } + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + record := &recordReq{ + ms: []stats.Measurement{ + m.M(1), + m.M(1), + m.M(1), + m.M(1), + m.M(1), + m.M(1), + m.M(1), + m.M(1), + }, + tm: tag.FromContext(ctxs[i%len(ctxs)]), + } + record.handleCommand(w) + } +} diff --git a/vendor/go.opencensus.io/stats/view/example_test.go b/vendor/go.opencensus.io/stats/view/example_test.go index 9422adb4d..6e6862a22 100644 --- a/vendor/go.opencensus.io/stats/view/example_test.go +++ b/vendor/go.opencensus.io/stats/view/example_test.go @@ -21,25 +21,18 @@ import ( "go.opencensus.io/stats/view" ) -func Example_view() { - m, err := stats.Int64("my.org/measure/openconns", "open connections", "") - if err != nil { - log.Fatal(err) - } +func Example() { + m, _ := stats.Int64("my.org/measure/openconns", "open connections", "") - v, err := view.New( - "my.org/views/openconns", - "open connections distribution over one second time window", - nil, - m, - view.DistributionAggregation([]float64{0, 1000, 2000}), - ) + err := view.Subscribe(&view.View{ + Name: "my.org/views/openconns", + Description: "open connections", + Measure: m, + Aggregation: view.DistributionAggregation{0, 1000, 2000}, + }) if err != nil { log.Fatal(err) } - if err := v.Subscribe(); err != nil { - log.Fatal(err) - } // Use stats.RegisterExporter to export collected data. } diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go index b4dcbc8b8..97a17816c 100644 --- a/vendor/go.opencensus.io/stats/view/view.go +++ b/vendor/go.opencensus.io/stats/view/view.go @@ -28,106 +28,131 @@ import ( "go.opencensus.io/tag" ) -// View allows users to filter and aggregate the recorded events. -// Each view has to be registered to enable data retrieval. Use New to -// initiate new views. Unregister views once you don't want to collect any more -// events. +// View allows users to aggregate the recorded stats.Measurements. +// Views need to be passed to the Subscribe function to be before data will be +// collected and sent to Exporters. type View struct { - name string // name of View. Must be unique. - description string + Name string // Name of View. Must be unique. If unset, will default to the name of the Measure. + Description string // Description is a human-readable description for this view. - // tagKeys to perform the aggregation on. - tagKeys []tag.Key + // TagKeys are the tag keys describing the grouping of this view. + // A single Row will be produced for each combination of associated tag values. + TagKeys []tag.Key - // Examples of measures are cpu:tickCount, diskio:time... - m stats.Measure + // Measure is a stats.Measure to aggregate in this view. + Measure stats.Measure - subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access - - collector *collector + // Aggregation is the aggregation function tp apply to the set of Measurements. + Aggregation Aggregation } -// New creates a new view with the given name and description. -// View names need to be unique globally in the entire system. -// -// Data collection will only filter measurements recorded by the given keys. -// Collected data will be processed by the given aggregation algorithm. -// -// Views need to be subscribed toin order to retrieve collection data. -// -// Once the view is no longer required, the view can be unregistered. +// Deprecated: Use &View{}. func New(name, description string, keys []tag.Key, measure stats.Measure, agg Aggregation) (*View, error) { - if err := checkViewName(name); err != nil { - return nil, err - } - var ks []tag.Key - if len(keys) > 0 { - ks = make([]tag.Key, len(keys)) - copy(ks, keys) - sort.Slice(ks, func(i, j int) bool { return ks[i].Name() < ks[j].Name() }) + if measure == nil { + panic("measure may not be nil") } return &View{ - name: name, - description: description, - tagKeys: ks, - m: measure, - collector: &collector{make(map[string]AggregationData), agg}, + Name: name, + Description: description, + TagKeys: keys, + Measure: measure, + Aggregation: agg, }, nil } -// Name returns the name of the view. -func (v *View) Name() string { - return v.name +// WithName returns a copy of the View with a new name. This is useful for +// renaming views to cope with limitations placed on metric names by various +// backends. +func (v *View) WithName(name string) *View { + vNew := *v + vNew.Name = name + return &vNew } -// Description returns the name of the view. -func (v *View) Description() string { - return v.description +// same compares two views and returns true if they represent the same aggregation. +func (v *View) same(other *View) bool { + if v == other { + return true + } + if v == nil { + return false + } + return reflect.DeepEqual(v.Aggregation, other.Aggregation) && + v.Measure.Name() == other.Measure.Name() } -func (v *View) subscribe() { +// canonicalized returns a validated View canonicalized by setting explicit +// defaults for Name and Description and sorting the TagKeys +func (v *View) canonicalized() (*View, error) { + if v.Measure == nil { + return nil, fmt.Errorf("cannot subscribe view %q: measure not set", v.Name) + } + if v.Aggregation == nil { + return nil, fmt.Errorf("cannot subscribe view %q: aggregation not set", v.Name) + } + vc := *v + if vc.Name == "" { + vc.Name = vc.Measure.Name() + } + if vc.Description == "" { + vc.Description = vc.Measure.Description() + } + if err := checkViewName(vc.Name); err != nil { + return nil, err + } + vc.TagKeys = make([]tag.Key, len(v.TagKeys)) + copy(vc.TagKeys, v.TagKeys) + sort.Slice(vc.TagKeys, func(i, j int) bool { + return vc.TagKeys[i].Name() < vc.TagKeys[j].Name() + }) + return &vc, nil +} + +// viewInternal is the internal representation of a View. +type viewInternal struct { + view *View // view is the canonicalized View definition associated with this view. + subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access + collector *collector +} + +func newViewInternal(v *View) (*viewInternal, error) { + vc, err := v.canonicalized() + if err != nil { + return nil, err + } + return &viewInternal{ + view: vc, + collector: &collector{make(map[string]AggregationData), v.Aggregation}, + }, nil +} + +func (v *viewInternal) subscribe() { atomic.StoreUint32(&v.subscribed, 1) } -func (v *View) unsubscribe() { +func (v *viewInternal) unsubscribe() { atomic.StoreUint32(&v.subscribed, 0) } // isSubscribed returns true if the view is exporting // data by subscription. -func (v *View) isSubscribed() bool { +func (v *viewInternal) isSubscribed() bool { return atomic.LoadUint32(&v.subscribed) == 1 } -func (v *View) clearRows() { +func (v *viewInternal) clearRows() { v.collector.clearRows() } -// TagKeys returns the list of tag keys associated with this view. -func (v *View) TagKeys() []tag.Key { - return v.tagKeys +func (v *viewInternal) collectedRows() []*Row { + return v.collector.collectedRows(v.view.TagKeys) } -// Aggregation returns the data aggregation method used to aggregate -// the measurements collected by this view. -func (v *View) Aggregation() Aggregation { - return v.collector.a -} - -// Measure returns the measure the view is collecting measurements for. -func (v *View) Measure() stats.Measure { - return v.m -} - -func (v *View) collectedRows() []*Row { - return v.collector.collectedRows(v.tagKeys) -} - -func (v *View) addSample(m *tag.Map, val float64) { +func (v *viewInternal) addSample(m *tag.Map, val float64) { if !v.isSubscribed() { return } - sig := string(encodeWithKeys(m, v.tagKeys)) + sig := string(encodeWithKeys(m, v.view.TagKeys)) v.collector.addSample(sig, val) } @@ -158,7 +183,7 @@ func (r *Row) String() string { return buffer.String() } -// Equal returns true if both Rows are equal. Tags are expected to be ordered +// same returns true if both Rows are equal. Tags are expected to be ordered // by the key name. Even both rows have the same tags but the tags appear in // different orders it will return false. func (r *Row) Equal(other *Row) bool { diff --git a/vendor/go.opencensus.io/stats/view/view_test.go b/vendor/go.opencensus.io/stats/view/view_test.go index 70b577b79..c82310b94 100644 --- a/vendor/go.opencensus.io/stats/view/view_test.go +++ b/vendor/go.opencensus.io/stats/view/view_test.go @@ -19,15 +19,22 @@ import ( "context" "testing" + "go.opencensus.io/stats" "go.opencensus.io/tag" ) -func Test_View_MeasureFloat64_AggregationDistribution_WindowCumulative(t *testing.T) { +func Test_View_MeasureFloat64_AggregationDistribution(t *testing.T) { k1, _ := tag.NewKey("k1") k2, _ := tag.NewKey("k2") k3, _ := tag.NewKey("k3") - agg1 := DistributionAggregation([]float64{2}) - view, err := New("VF1", "desc VF1", []tag.Key{k1, k2}, nil, agg1) + agg1 := DistributionAggregation{2} + m, _ := stats.Int64("Test_View_MeasureFloat64_AggregationDistribution/m1", "", stats.UnitNone) + view1 := &View{ + TagKeys: []tag.Key{k1, k2}, + Measure: m, + Aggregation: agg1, + } + view, err := newViewInternal(view1) if err != nil { t.Fatal(err) } @@ -190,7 +197,8 @@ func Test_View_MeasureFloat64_AggregationSum(t *testing.T) { k1, _ := tag.NewKey("k1") k2, _ := tag.NewKey("k2") k3, _ := tag.NewKey("k3") - view, err := New("VF1", "desc VF1", []tag.Key{k1, k2}, nil, SumAggregation{}) + m, _ := stats.Int64("Test_View_MeasureFloat64_AggregationSum/m1", "", stats.UnitNone) + view, err := newViewInternal(&View{TagKeys: []tag.Key{k1, k2}, Measure: m, Aggregation: SumAggregation{}}) if err != nil { t.Fatal(err) } @@ -301,11 +309,36 @@ func Test_View_MeasureFloat64_AggregationSum(t *testing.T) { } } -func Test_View_MeasureFloat64_AggregationMean_WindowCumulative(t *testing.T) { +func TestCanonicalize(t *testing.T) { + k1, _ := tag.NewKey("k1") + k2, _ := tag.NewKey("k2") + m, _ := stats.Int64("TestCanonicalize/m1", "desc desc", stats.UnitNone) + v := &View{TagKeys: []tag.Key{k2, k1}, Measure: m, Aggregation: MeanAggregation{}} + vc, err := v.canonicalized() + if err != nil { + t.Fatal(err) + } + if got, want := vc.Name, "TestCanonicalize/m1"; got != want { + t.Errorf("vc.Name = %q; want %q", got, want) + } + if got, want := vc.Description, "desc desc"; got != want { + t.Errorf("vc.Description = %q; want %q", got, want) + } + if got, want := len(vc.TagKeys), 2; got != want { + t.Errorf("len(vc.TagKeys) = %d; want %d", got, want) + } + if got, want := vc.TagKeys[0].Name(), "k1"; got != want { + t.Errorf("vc.TagKeys[0].Name() = %q; want %q", got, want) + } +} + +func Test_View_MeasureFloat64_AggregationMean(t *testing.T) { k1, _ := tag.NewKey("k1") k2, _ := tag.NewKey("k2") k3, _ := tag.NewKey("k3") - view, err := New("VF1", "desc VF1", []tag.Key{k1, k2}, nil, MeanAggregation{}) + m, _ := stats.Int64("Test_View_MeasureFloat64_AggregationMean/m1", "", stats.UnitNone) + viewDesc := &View{TagKeys: []tag.Key{k1, k2}, Measure: m, Aggregation: MeanAggregation{}} + view, err := newViewInternal(viewDesc) if err != nil { t.Fatal(err) } @@ -424,13 +457,19 @@ func TestViewSortedKeys(t *testing.T) { k3, _ := tag.NewKey("c") ks := []tag.Key{k1, k3, k2} - v, err := New("sort_keys", "desc sort_keys", ks, nil, MeanAggregation{}) - if err != nil { - t.Fatalf("NewView() = %v", err) - } + m, _ := stats.Int64("TestViewSortedKeys/m1", "", stats.UnitNone) + Subscribe(&View{ + Name: "sort_keys", + Description: "desc sort_keys", + TagKeys: ks, + Measure: m, + Aggregation: &MeanAggregation{}, + }) + // Subscribe normalizes the view by sorting the tag keys, retrieve the normalized view + v := Find("sort_keys") want := []string{"a", "b", "c"} - vks := v.TagKeys() + vks := v.TagKeys if len(vks) != len(want) { t.Errorf("Keys = %+v; want %+v", vks, want) } diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go index 80c76e2c2..29f7e1dfd 100644 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -16,7 +16,6 @@ package view import ( - "errors" "fmt" "time" @@ -32,14 +31,14 @@ func init() { } type measureRef struct { - measure stats.Measure - views map[*View]struct{} + measure string + views map[*viewInternal]struct{} } type worker struct { measures map[string]*measureRef - views map[string]*View - startTimes map[*View]time.Time + views map[string]*viewInternal + startTimes map[*viewInternal]time.Time timer *time.Ticker c chan command @@ -50,8 +49,8 @@ var defaultWorker *worker var defaultReportingDuration = 10 * time.Second -// Find returns a registered view associated with this name. -// If no registered view is found, nil is returned. +// Find returns a subscribed view associated with this name. +// If no subscribed view is found, nil is returned. func Find(name string) (v *View) { req := &getViewByNameReq{ name: name, @@ -62,67 +61,60 @@ func Find(name string) (v *View) { return resp.v } -// Register registers view. It returns an error if the view is already registered. -// -// Subscription automatically registers a view. -// Most users will not register directly but register via subscription. -// Registration can be used by libraries to claim a view name. -// -// Unregister the view once the view is not required anymore. -func Register(v *View) error { - req := ®isterViewReq{ - v: v, - err: make(chan error), - } - defaultWorker.c <- req - return <-req.err +// Deprecated: Registering is a no-op. Use the Subscribe function. +func Register(_ *View) error { + return nil } -// Unregister removes the previously registered view. It returns an error -// if the view wasn't registered. All data collected and not reported for the -// corresponding view will be lost. The view is automatically be unsubscribed. -func Unregister(v *View) error { - req := &unregisterViewReq{ - v: v, - err: make(chan error), - } - defaultWorker.c <- req - return <-req.err +// Deprecated: Unregistering is a no-op, see: Unsubscribe. +func Unregister(_ *View) error { + return nil } -// Subscribe subscribes a view. Once a view is subscribed, it reports data -// via the exporters. -// During subscription, if the view wasn't registered, it will be automatically -// registered. Once the view is no longer needed to export data, -// user should unsubscribe from the view. +// Deprecated: Use the Subscribe function. func (v *View) Subscribe() error { + return Subscribe(v) +} + +// Subscribe begins collecting data for the given views. +// Once a view is subscribed, it reports data to the registered exporters. +func Subscribe(views ...*View) error { req := &subscribeToViewReq{ - v: v, - err: make(chan error), + views: views, + err: make(chan error), } defaultWorker.c <- req return <-req.err } -// Unsubscribe unsubscribes a previously subscribed view. -// Data will not be exported from this view once unsubscription happens. -func (v *View) Unsubscribe() error { +// Unsubscribe the given views. Data will not longer be exported for these views +// after Unsubscribe returns. +func Unsubscribe(views ...*View) { + names := make([]string, len(views)) + for i := range views { + names[i] = views[i].Name + } req := &unsubscribeFromViewReq{ - v: v, - err: make(chan error), + views: names, + done: make(chan struct{}), } defaultWorker.c <- req - return <-req.err + <-req.done } -// RetrieveData returns the current collected data for the view. -func (v *View) RetrieveData() ([]*Row, error) { +// Deprecated: Use the Unsubscribe function instead. +func (v *View) Unsubscribe() error { if v == nil { - return nil, errors.New("cannot retrieve data from nil view") + return nil } + Unsubscribe(v) + return nil +} + +func RetrieveData(viewName string) ([]*Row, error) { req := &retrieveDataReq{ now: time.Now(), - v: v, + v: viewName, c: make(chan *retrieveDataResp), } defaultWorker.c <- req @@ -155,8 +147,8 @@ func SetReportingPeriod(d time.Duration) { func newWorker() *worker { return &worker{ measures: make(map[string]*measureRef), - views: make(map[string]*View), - startTimes: make(map[*View]time.Time), + views: make(map[string]*viewInternal), + startTimes: make(map[*viewInternal]time.Time), timer: time.NewTicker(defaultReportingDuration), c: make(chan command, 1024), quit: make(chan bool), @@ -187,41 +179,36 @@ func (w *worker) stop() { <-w.done } -func (w *worker) getMeasureRef(m stats.Measure) *measureRef { - if mr, ok := w.measures[m.Name()]; ok { +func (w *worker) getMeasureRef(name string) *measureRef { + if mr, ok := w.measures[name]; ok { return mr } mr := &measureRef{ - measure: m, - views: make(map[*View]struct{}), + measure: name, + views: make(map[*viewInternal]struct{}), } - w.measures[m.Name()] = mr + w.measures[name] = mr return mr } -func (w *worker) tryRegisterView(v *View) error { - if err := checkViewName(v.name); err != nil { - return err +func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { + vi, err := newViewInternal(v) + if err != nil { + return nil, err } - if x, ok := w.views[v.Name()]; ok { - if x != v { - return fmt.Errorf("cannot register view %q; another view with the same name is already registered", v.Name()) + if x, ok := w.views[vi.view.Name]; ok { + if !x.view.same(vi.view) { + return nil, fmt.Errorf("cannot subscribe view %q; a different view with the same name is already subscribed", v.Name) } // the view is already registered so there is nothing to do and the // command is considered successful. - return nil + return x, nil } - - if v.Measure() == nil { - return fmt.Errorf("cannot register view %q: measure not defined", v.Name()) - } - - w.views[v.Name()] = v - ref := w.getMeasureRef(v.Measure()) - ref.views[v] = struct{}{} - - return nil + w.views[vi.view.Name] = vi + ref := w.getMeasureRef(vi.view.Measure.Name()) + ref.views[vi] = struct{}{} + return vi, nil } func (w *worker) reportUsage(now time.Time) { @@ -238,7 +225,7 @@ func (w *worker) reportUsage(now time.Time) { // to mutate the exported data. rows = deepCopyRowData(rows) viewData := &Data{ - View: v, + View: v.view, Start: w.startTimes[v], End: time.Now(), Rows: rows, diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go index de6d56a0b..4e7c16089 100644 --- a/vendor/go.opencensus.io/stats/view/worker_commands.go +++ b/vendor/go.opencensus.io/stats/view/worker_commands.go @@ -16,7 +16,9 @@ package view import ( + "errors" "fmt" + "strings" "time" "go.opencensus.io/stats" @@ -38,89 +40,61 @@ type getViewByNameResp struct { } func (cmd *getViewByNameReq) handleCommand(w *worker) { - cmd.c <- &getViewByNameResp{w.views[cmd.name]} -} - -// registerViewReq is the command to register a view with the library. -type registerViewReq struct { - v *View - err chan error -} - -func (cmd *registerViewReq) handleCommand(w *worker) { - cmd.err <- w.tryRegisterView(cmd.v) -} - -// unregisterViewReq is the command to unregister a view from the library. -type unregisterViewReq struct { - v *View - err chan error -} - -func (cmd *unregisterViewReq) handleCommand(w *worker) { - v, ok := w.views[cmd.v.Name()] - if !ok { - cmd.err <- nil - return - } - if v != cmd.v { - cmd.err <- nil - return - } - if v.isSubscribed() { - cmd.err <- fmt.Errorf("cannot unregister view %q; all subscriptions must be unsubscribed first", cmd.v.Name()) - return - } - delete(w.views, cmd.v.Name()) - ref := w.getMeasureRef(v.Measure()) - delete(ref.views, v) - cmd.err <- nil + cmd.c <- &getViewByNameResp{w.views[cmd.name].view} } // subscribeToViewReq is the command to subscribe to a view. type subscribeToViewReq struct { - v *View - err chan error + views []*View + err chan error } func (cmd *subscribeToViewReq) handleCommand(w *worker) { - if cmd.v.isSubscribed() { + var errstr []string + for _, view := range cmd.views { + vi, err := w.tryRegisterView(view) + if err != nil { + errstr = append(errstr, fmt.Sprintf("%s: %v", view.Name, err)) + continue + } + vi.subscribe() + } + if len(errstr) > 0 { + cmd.err <- errors.New(strings.Join(errstr, "\n")) + } else { cmd.err <- nil - return } - if err := w.tryRegisterView(cmd.v); err != nil { - cmd.err <- fmt.Errorf("cannot subscribe to view: %v", err) - return - } - cmd.v.subscribe() - cmd.err <- nil } // unsubscribeFromViewReq is the command to unsubscribe to a view. Has no // impact on the data collection for client that are pulling data from the // library. type unsubscribeFromViewReq struct { - v *View - err chan error + views []string + done chan struct{} } func (cmd *unsubscribeFromViewReq) handleCommand(w *worker) { - cmd.v.unsubscribe() - if !cmd.v.isSubscribed() { - // this was the last subscription and view is not collecting anymore. - // The collected data can be cleared. - cmd.v.clearRows() + for _, name := range cmd.views { + vi, ok := w.views[name] + if !ok { + continue + } + + vi.unsubscribe() + if !vi.isSubscribed() { + // this was the last subscription and view is not collecting anymore. + // The collected data can be cleared. + vi.clearRows() + } } - // we always return nil because this operation never fails. However we - // still need to return something on the channel to signal to the waiting - // go routine that the operation completed. - cmd.err <- nil + cmd.done <- struct{}{} } // retrieveDataReq is the command to retrieve data for a view. type retrieveDataReq struct { now time.Time - v *View + v string c chan *retrieveDataResp } @@ -130,23 +104,24 @@ type retrieveDataResp struct { } func (cmd *retrieveDataReq) handleCommand(w *worker) { - if _, ok := w.views[cmd.v.Name()]; !ok { + vi, ok := w.views[cmd.v] + if !ok { cmd.c <- &retrieveDataResp{ nil, - fmt.Errorf("cannot retrieve data; view %q is not registered", cmd.v.Name()), + fmt.Errorf("cannot retrieve data; view %q is not registered", cmd.v), } return } - if !cmd.v.isSubscribed() { + if !vi.isSubscribed() { cmd.c <- &retrieveDataResp{ nil, - fmt.Errorf("cannot retrieve data; view %q has no subscriptions or collection is not forcibly started", cmd.v.Name()), + fmt.Errorf("cannot retrieve data; view %q has no subscriptions or collection is not forcibly started", cmd.v), } return } cmd.c <- &retrieveDataResp{ - cmd.v.collectedRows(), + vi.collectedRows(), nil, } } @@ -160,9 +135,9 @@ type recordReq struct { func (cmd *recordReq) handleCommand(w *worker) { for _, m := range cmd.ms { - ref := w.getMeasureRef(m.Measure) + ref := w.getMeasureRef(m.Measure().Name()) for v := range ref.views { - v.addSample(cmd.tm, m.Value) + v.addSample(cmd.tm, m.Value()) } } } diff --git a/vendor/go.opencensus.io/stats/view/worker_test.go b/vendor/go.opencensus.io/stats/view/worker_test.go index fd87de40a..9ea8bcb89 100644 --- a/vendor/go.opencensus.io/stats/view/worker_test.go +++ b/vendor/go.opencensus.io/stats/view/worker_test.go @@ -58,25 +58,11 @@ func Test_Worker_MeasureCreation(t *testing.T) { } } -func Test_Worker_ViewRegistration(t *testing.T) { +func Test_Worker_ViewSubscription(t *testing.T) { someError := errors.New("some error") sc1 := make(chan *Data) - type registerWant struct { - vID string - err error - } - type unregisterWant struct { - vID string - err error - } - type byNameWant struct { - name string - vID string - ok bool - } - type subscription struct { c chan *Data vID string @@ -84,20 +70,11 @@ func Test_Worker_ViewRegistration(t *testing.T) { } type testCase struct { label string - regs []registerWant subscriptions []subscription - unregs []unregisterWant - bynames []byNameWant } tcs := []testCase{ { "register and subscribe to v1ID", - []registerWant{ - { - "v1ID", - nil, - }, - }, []subscription{ { sc1, @@ -105,37 +82,9 @@ func Test_Worker_ViewRegistration(t *testing.T) { nil, }, }, - []unregisterWant{ - { - "v1ID", - someError, - }, - }, - []byNameWant{ - { - "VF1", - "v1ID", - true, - }, - { - "VF2", - "vNilID", - false, - }, - }, }, { "register v1ID+v2ID, susbsribe to v1ID", - []registerWant{ - { - "v1ID", - nil, - }, - { - "v2ID", - nil, - }, - }, []subscription{ { sc1, @@ -143,37 +92,9 @@ func Test_Worker_ViewRegistration(t *testing.T) { nil, }, }, - []unregisterWant{ - { - "v1ID", - someError, - }, - { - "v2ID", - someError, - }, - }, - []byNameWant{ - { - "VF1", - "v1ID", - true, - }, - { - "VF2", - "v2ID", - true, - }, - }, }, { "register to v1ID; subscribe to v1ID and view with same ID", - []registerWant{ - { - "v1ID", - nil, - }, - }, []subscription{ { sc1, @@ -186,80 +107,43 @@ func Test_Worker_ViewRegistration(t *testing.T) { someError, }, }, - []unregisterWant{ - { - "v1ID", - someError, - }, - { - "v1SameNameID", - nil, - }, - }, - []byNameWant{ - { - "VF1", - "v1ID", - true, - }, - }, }, } - mf1, _ := stats.Float64("MF1/Test_Worker_ViewRegistration", "desc MF1", "unit") - mf2, _ := stats.Float64("MF2/Test_Worker_ViewRegistration", "desc MF2", "unit") + mf1, _ := stats.Float64("MF1/Test_Worker_ViewSubscription", "desc MF1", "unit") + mf2, _ := stats.Float64("MF2/Test_Worker_ViewSubscription", "desc MF2", "unit") for _, tc := range tcs { t.Run(tc.label, func(t *testing.T) { restart() - v1, _ := New("VF1", "desc VF1", nil, mf1, nil) - v11, _ := New("VF1", "desc duplicate name VF1", nil, mf1, nil) - v2, _ := New("VF2", "desc VF2", nil, mf2, nil) - views := map[string]*View{ - "v1ID": v1, - "v1SameNameID": v11, - "v2ID": v2, - "vNilID": nil, - } - - for _, reg := range tc.regs { - v := views[reg.vID] - err := Register(v) - if (err != nil) != (reg.err != nil) { - t.Errorf("%v: Register() = %v, want %v", tc.label, err, reg.err) - } - v.subscribe() + "v1ID": { + Name: "VF1", + Measure: mf1, + Aggregation: &CountAggregation{}, + }, + "v1SameNameID": { + Name: "VF1", + Description: "desc duplicate name VF1", + Measure: mf1, + Aggregation: &SumAggregation{}, + }, + "v2ID": { + Name: "VF2", + Measure: mf2, + Aggregation: &CountAggregation{}, + }, + "vNilID": nil, } for _, s := range tc.subscriptions { v := views[s.vID] - err := v.Subscribe() + err := Subscribe(v) if (err != nil) != (s.err != nil) { t.Errorf("%v: Subscribe() = %v, want %v", tc.label, err, s.err) } } - - for _, unreg := range tc.unregs { - v := views[unreg.vID] - err := Unregister(v) - if (err != nil) != (unreg.err != nil) { - t.Errorf("%v: Unregister() = %v; want %v", tc.label, err, unreg.err) - } - } - - for _, byname := range tc.bynames { - v := Find(byname.name) - if v == nil && byname.ok { - t.Errorf("%v: ViewByName(%q) = nil, want non-nil view", tc.label, byname.name) - } - - wantV := views[byname.vID] - if v != wantV { - t.Errorf("%v: ViewByName(%q) = %v; want %v", tc.label, byname.name, v, wantV) - } - } }) } } @@ -283,14 +167,8 @@ func Test_Worker_RecordFloat64(t *testing.T) { t.Fatal(err) } - v1, err := New("VF1", "desc VF1", []tag.Key{k1, k2}, m, CountAggregation{}) - if err != nil { - t.Fatal(err) - } - v2, err := New("VF2", "desc VF2", []tag.Key{k1, k2}, m, CountAggregation{}) - if err != nil { - t.Fatal(err) - } + v1 := &View{"VF1", "desc VF1", []tag.Key{k1, k2}, m, CountAggregation{}} + v2 := &View{"VF2", "desc VF2", []tag.Key{k1, k2}, m, CountAggregation{}} type want struct { v *View @@ -365,13 +243,13 @@ func Test_Worker_RecordFloat64(t *testing.T) { for _, tc := range tcs { for _, v := range tc.registrations { if err := Register(v); err != nil { - t.Fatalf("%v: Register(%v) = %v; want no errors", tc.label, v.Name(), err) + t.Fatalf("%v: Register(%v) = %v; want no errors", tc.label, v.Name, err) } } for _, v := range tc.subscriptions { if err := v.Subscribe(); err != nil { - t.Fatalf("%v: Subscribe(%v) = %v; want no errors", tc.label, v.Name(), err) + t.Fatalf("%v: Subscribe(%v) = %v; want no errors", tc.label, v.Name, err) } } @@ -380,9 +258,9 @@ func Test_Worker_RecordFloat64(t *testing.T) { } for _, w := range tc.wants { - gotRows, err := w.v.RetrieveData() + gotRows, err := RetrieveData(w.v.Name) if (err != nil) != (w.err != nil) { - t.Fatalf("%v: RetrieveData(%v) = %v; want no errors", tc.label, w.v.Name(), err) + t.Fatalf("%v: RetrieveData(%v) = %v; want no errors", tc.label, w.v.Name, err) } for _, got := range gotRows { if !containsRow(w.rows, got) { @@ -401,13 +279,13 @@ func Test_Worker_RecordFloat64(t *testing.T) { // cleaning up for _, v := range tc.subscriptions { if err := v.Unsubscribe(); err != nil { - t.Fatalf("%v: Unsubscribing from view %v errored with %v; want no error", tc.label, v.Name(), err) + t.Fatalf("%v: Unsubscribing from view %v errored with %v; want no error", tc.label, v.Name, err) } } for _, v := range tc.registrations { if err := Unregister(v); err != nil { - t.Fatalf("%v: Unregistering view %v errrored with %v; want no error", tc.label, v.Name(), err) + t.Fatalf("%v: Unregistering view %v errrored with %v; want no error", tc.label, v.Name, err) } } } @@ -421,9 +299,6 @@ func TestReportUsage(t *testing.T) { t.Fatalf("stats.Int64() = %v", err) } - cum1, _ := New("cum1", "", nil, m, CountAggregation{}) - cum2, _ := New("cum1", "", nil, m, CountAggregation{}) - tests := []struct { name string view *View @@ -431,12 +306,12 @@ func TestReportUsage(t *testing.T) { }{ { name: "cum", - view: cum1, + view: &View{Name: "cum1", Measure: m, Aggregation: CountAggregation{}}, wantMaxCount: 8, }, { name: "cum2", - view: cum2, + view: &View{Name: "cum1", Measure: m, Aggregation: CountAggregation{}}, wantMaxCount: 8, }, } @@ -445,7 +320,8 @@ func TestReportUsage(t *testing.T) { restart() SetReportingPeriod(25 * time.Millisecond) - if err := tt.view.Subscribe(); err != nil { + err = Subscribe(tt.view) + if err != nil { t.Fatalf("%v: cannot subscribe: %v", tt.name, err) } @@ -502,14 +378,11 @@ func TestWorkerStarttime(t *testing.T) { if err != nil { t.Fatalf("stats.Int64() = %v", err) } - v, err := New("testview", "", nil, m, CountAggregation{}) - if err != nil { - t.Fatalf("New() = %v", err) - } + v, _ := New("testview", "", nil, m, CountAggregation{}) SetReportingPeriod(25 * time.Millisecond) if err := v.Subscribe(); err != nil { - t.Fatalf("cannot subscribe to %v: %v", v.Name(), err) + t.Fatalf("cannot subscribe to %v: %v", v.Name, err) } e := &vdExporter{} diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go index be62c7fbb..e57a05340 100644 --- a/vendor/go.opencensus.io/trace/basetypes.go +++ b/vendor/go.opencensus.io/trace/basetypes.go @@ -47,29 +47,41 @@ type Attribute interface { isAttribute() } -// BoolAttribute represents a bool-valued attribute. -type BoolAttribute struct { - Key string - Value bool +// BoolAttribute returns a bool-valued attribute. +func BoolAttribute(key string, value bool) Attribute { + return boolAttribute{key: key, value: value} } -func (b BoolAttribute) isAttribute() {} - -// Int64Attribute represents an int64-valued attribute. -type Int64Attribute struct { - Key string - Value int64 +type boolAttribute struct { + key string + value bool } -func (i Int64Attribute) isAttribute() {} +func (b boolAttribute) isAttribute() {} -// StringAttribute represents a string-valued attribute. -type StringAttribute struct { - Key string - Value string +type int64Attribute struct { + key string + value int64 } -func (s StringAttribute) isAttribute() {} +func (i int64Attribute) isAttribute() {} + +// Int64Attribute returns an int64-valued attribute. +func Int64Attribute(key string, value int64) Attribute { + return int64Attribute{key: key, value: value} +} + +type stringAttribute struct { + key string + value string +} + +func (s stringAttribute) isAttribute() {} + +// StringAttribute returns a string-valued attribute. +func StringAttribute(key string, value string) Attribute { + return stringAttribute{key: key, value: value} +} // LinkType specifies the relationship between the span that had the link // added, and the linked span. diff --git a/vendor/go.opencensus.io/trace/benchmark_test.go b/vendor/go.opencensus.io/trace/benchmark_test.go index 4c9ce6b1e..3d6945789 100644 --- a/vendor/go.opencensus.io/trace/benchmark_test.go +++ b/vendor/go.opencensus.io/trace/benchmark_test.go @@ -36,9 +36,9 @@ func BenchmarkSpanWithAnnotations_3(b *testing.B) { for i := 0; i < b.N; i++ { _, span := StartSpan(ctx, "/foo") span.SetAttributes( - BoolAttribute{Key: "key1", Value: false}, - StringAttribute{Key: "key2", Value: "hello"}, - Int64Attribute{Key: "key3", Value: 123}, + BoolAttribute("key1", false), + StringAttribute("key2", "hello"), + Int64Attribute("key3", 123), ) span.End() } @@ -51,12 +51,12 @@ func BenchmarkSpanWithAnnotations_6(b *testing.B) { for i := 0; i < b.N; i++ { _, span := StartSpan(ctx, "/foo") span.SetAttributes( - BoolAttribute{Key: "key1", Value: false}, - BoolAttribute{Key: "key2", Value: true}, - StringAttribute{Key: "key3", Value: "hello"}, - StringAttribute{Key: "key4", Value: "hello"}, - Int64Attribute{Key: "key5", Value: 123}, - Int64Attribute{Key: "key6", Value: 456}, + BoolAttribute("key1", false), + BoolAttribute("key2", true), + StringAttribute("key3", "hello"), + StringAttribute("key4", "hello"), + Int64Attribute("key5", 123), + Int64Attribute("key6", 456), ) span.End() } diff --git a/vendor/go.opencensus.io/trace/sampling.go b/vendor/go.opencensus.io/trace/sampling.go index a5847e3d4..8ff8db967 100644 --- a/vendor/go.opencensus.io/trace/sampling.go +++ b/vendor/go.opencensus.io/trace/sampling.go @@ -20,10 +20,8 @@ import ( const defaultSamplingProbability = 1e-4 -var defaultSampler Sampler - func init() { - defaultSampler = newDefaultSampler() + defaultSampler = ProbabilitySampler(defaultSamplingProbability) } func newDefaultSampler() Sampler { @@ -40,11 +38,8 @@ func SetDefaultSampler(sampler Sampler) { mu.Unlock() } -// Sampler is an interface for values that have a method that the trace library -// can call to determine whether to export a trace's spans. -type Sampler interface { - Sample(p SamplingParameters) SamplingDecision -} +// Sampler decides whether a trace should be sampled and exported. +type Sampler func(SamplingParameters) SamplingDecision // SamplingParameters contains the values passed to a Sampler. type SamplingParameters struct { @@ -69,47 +64,27 @@ func ProbabilitySampler(fraction float64) Sampler { } else if fraction >= 1 { return AlwaysSample() } - return probabilitySampler{ - traceIDUpperBound: uint64(fraction * (1 << 63)), - } -} -type probabilitySampler struct { - traceIDUpperBound uint64 -} - -var _ Sampler = (*probabilitySampler)(nil) - -func (s probabilitySampler) Sample(p SamplingParameters) (d SamplingDecision) { - if p.ParentContext.IsSampled() { - return SamplingDecision{Sample: true} - } - x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1 - return SamplingDecision{Sample: x < s.traceIDUpperBound} + traceIDUpperBound := uint64(fraction * (1 << 63)) + return Sampler(func(p SamplingParameters) SamplingDecision { + if p.ParentContext.IsSampled() { + return SamplingDecision{Sample: true} + } + x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1 + return SamplingDecision{Sample: x < traceIDUpperBound} + }) } // AlwaysSample returns a Sampler that samples every trace. func AlwaysSample() Sampler { - return always{} -} - -type always struct{} - -var _ Sampler = always{} - -func (a always) Sample(p SamplingParameters) SamplingDecision { - return SamplingDecision{Sample: true} + return func(p SamplingParameters) SamplingDecision { + return SamplingDecision{Sample: true} + } } // NeverSample returns a Sampler that samples no traces. func NeverSample() Sampler { - return never{} -} - -type never struct{} - -var _ Sampler = never{} - -func (n never) Sample(p SamplingParameters) SamplingDecision { - return SamplingDecision{Sample: false} + return func(p SamplingParameters) SamplingDecision { + return SamplingDecision{Sample: false} + } } diff --git a/vendor/go.opencensus.io/trace/sampling_test.go b/vendor/go.opencensus.io/trace/sampling_test.go deleted file mode 100644 index 5953a8fad..000000000 --- a/vendor/go.opencensus.io/trace/sampling_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import "testing" -import "reflect" - -func TestSetDefaultSampler(t *testing.T) { - tests := []struct { - name string - sampler Sampler - want Sampler - }{ - { - name: "when the sampler is set to nil, the default sampler should be used", - sampler: nil, - want: ProbabilitySampler(defaultSamplingProbability), - }, - { - name: "setting a NeverSample updates the sampler", - sampler: NeverSample(), - want: NeverSample(), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - SetDefaultSampler(tt.sampler) - if !reflect.DeepEqual(defaultSampler, tt.want) { - t.Errorf("%q. SetDefaultSampler() = %v, want %v", tt.name, defaultSampler, tt.want) - } - SetDefaultSampler(nil) // Need to reset the sampler between each test - }) - } -} diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go index 07735b629..b6ad0c60e 100644 --- a/vendor/go.opencensus.io/trace/trace.go +++ b/vendor/go.opencensus.io/trace/trace.go @@ -127,28 +127,6 @@ func StartSpan(ctx context.Context, name string) (context.Context, *Span) { return WithSpan(ctx, span), span } -// StartSpanWithOptions starts a new child span of the current span in the context. -// -// If there is no span in the context, creates a new trace and span. -// -// Deprecated: Use StartSpan(...), or WithSpan(ctx, NewSpan(...)). -func StartSpanWithOptions(ctx context.Context, name string, o StartOptions) (context.Context, *Span) { - parentSpan, _ := ctx.Value(contextKey{}).(*Span) - span := NewSpan(name, parentSpan, o) - return WithSpan(ctx, span), span -} - -// StartSpanWithRemoteParent starts a new child span with the given parent SpanContext. -// -// If there is an existing span in ctx, it is ignored -- the returned Span is a -// child of the span specified by parent. -// -// Deprecated: Use WithSpan(ctx, NewSpanWithRemoteParent(...)). -func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o StartOptions) (context.Context, *Span) { - span := NewSpanWithRemoteParent(name, parent, o) - return WithSpan(ctx, span), span -} - // NewSpan returns a new span. // // If parent is not nil, created span will be a child of the parent. @@ -187,7 +165,7 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa if o.Sampler != nil { sampler = o.Sampler } - span.spanContext.setIsSampled(sampler.Sample(SamplingParameters{ + span.spanContext.setIsSampled(sampler(SamplingParameters{ ParentContext: parent, TraceID: span.spanContext.TraceID, SpanID: span.spanContext.SpanID, @@ -228,7 +206,7 @@ func (s *Span) End() { s.exportOnce.Do(func() { // TODO: optimize to avoid this call if sd won't be used. sd := s.makeSpanData() - sd.EndTime = sd.StartTime.Add(time.Since(sd.StartTime)) + sd.EndTime = internal.MonotonicEndTime(sd.StartTime) if s.spanStore != nil { s.spanStore.finished(s, sd) } @@ -296,12 +274,12 @@ func (s *Span) SetAttributes(attributes ...Attribute) { func copyAttributes(m map[string]interface{}, attributes []Attribute) { for _, a := range attributes { switch a := a.(type) { - case BoolAttribute: - m[a.Key] = a.Value - case Int64Attribute: - m[a.Key] = a.Value - case StringAttribute: - m[a.Key] = a.Value + case boolAttribute: + m[a.key] = a.value + case int64Attribute: + m[a.key] = a.value + case stringAttribute: + m[a.key] = a.value } } } @@ -424,11 +402,12 @@ func (s *Span) String() string { } var ( - mu sync.Mutex // protects the variables below - traceIDRand *rand.Rand - traceIDAdd [2]uint64 - nextSpanID uint64 - spanIDInc uint64 + mu sync.Mutex // protects the variables below + traceIDRand *rand.Rand + traceIDAdd [2]uint64 + nextSpanID uint64 + spanIDInc uint64 + defaultSampler Sampler ) func init() { diff --git a/vendor/go.opencensus.io/trace/trace_test.go b/vendor/go.opencensus.io/trace/trace_test.go index 0399e705b..957ab7acb 100644 --- a/vendor/go.opencensus.io/trace/trace_test.go +++ b/vendor/go.opencensus.io/trace/trace_test.go @@ -109,7 +109,7 @@ func TestSampling(t *testing.T) { SpanID: sid, TraceOptions: test.parentTraceOptions, } - ctx, _ = StartSpanWithRemoteParent(context.Background(), "foo", sc, StartOptions{ + ctx, _ = startSpanWithRemoteParent(context.Background(), "foo", sc, StartOptions{ Sampler: test.sampler, }) } else if test.localParent { @@ -117,12 +117,12 @@ func TestSampling(t *testing.T) { if test.parentTraceOptions == 1 { sampler = AlwaysSample() } - ctx2, _ := StartSpanWithOptions(context.Background(), "foo", StartOptions{Sampler: sampler}) - ctx, _ = StartSpanWithOptions(ctx2, "foo", StartOptions{ + ctx2, _ := startSpanWithOptions(context.Background(), "foo", StartOptions{Sampler: sampler}) + ctx, _ = startSpanWithOptions(ctx2, "foo", StartOptions{ Sampler: test.sampler, }) } else { - ctx, _ = StartSpanWithOptions(context.Background(), "foo", StartOptions{ + ctx, _ = startSpanWithOptions(context.Background(), "foo", StartOptions{ Sampler: test.sampler, }) } @@ -159,7 +159,7 @@ func TestSampling(t *testing.T) { if test.parentTraceOptions == 1 { sampler = AlwaysSample() } - ctx2, _ := StartSpanWithOptions(context.Background(), "foo", StartOptions{Sampler: sampler}) + ctx2, _ := startSpanWithOptions(context.Background(), "foo", StartOptions{Sampler: sampler}) ctx, _ := StartSpan(ctx2, "foo") sc := FromContext(ctx).SpanContext() if (sc == SpanContext{}) { @@ -180,7 +180,7 @@ func TestSampling(t *testing.T) { func TestProbabilitySampler(t *testing.T) { exported := 0 for i := 0; i < 1000; i++ { - _, span := StartSpanWithOptions(context.Background(), "foo", StartOptions{ + _, span := startSpanWithOptions(context.Background(), "foo", StartOptions{ Sampler: ProbabilitySampler(0.3), }) if span.SpanContext().IsSampled() { @@ -192,18 +192,23 @@ func TestProbabilitySampler(t *testing.T) { } } +func startSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o StartOptions) (context.Context, *Span) { + span := NewSpanWithRemoteParent(name, parent, o) + return WithSpan(ctx, span), span +} + func TestStartSpanWithRemoteParent(t *testing.T) { sc := SpanContext{ TraceID: tid, SpanID: sid, TraceOptions: 0x0, } - ctx, _ := StartSpanWithRemoteParent(context.Background(), "StartSpanWithRemoteParent", sc, StartOptions{}) + ctx, _ := startSpanWithRemoteParent(context.Background(), "startSpanWithRemoteParent", sc, StartOptions{}) if err := checkChild(sc, FromContext(ctx)); err != nil { t.Error(err) } - ctx, _ = StartSpanWithRemoteParent(context.Background(), "StartSpanWithRemoteParent", sc, StartOptions{}) + ctx, _ = startSpanWithRemoteParent(context.Background(), "startSpanWithRemoteParent", sc, StartOptions{}) if err := checkChild(sc, FromContext(ctx)); err != nil { t.Error(err) } @@ -213,12 +218,12 @@ func TestStartSpanWithRemoteParent(t *testing.T) { SpanID: sid, TraceOptions: 0x1, } - ctx, _ = StartSpanWithRemoteParent(context.Background(), "StartSpanWithRemoteParent", sc, StartOptions{}) + ctx, _ = startSpanWithRemoteParent(context.Background(), "startSpanWithRemoteParent", sc, StartOptions{}) if err := checkChild(sc, FromContext(ctx)); err != nil { t.Error(err) } - ctx, _ = StartSpanWithRemoteParent(context.Background(), "StartSpanWithRemoteParent", sc, StartOptions{}) + ctx, _ = startSpanWithRemoteParent(context.Background(), "startSpanWithRemoteParent", sc, StartOptions{}) if err := checkChild(sc, FromContext(ctx)); err != nil { t.Error(err) } @@ -292,7 +297,7 @@ func checkTime(x *time.Time) bool { func TestSetSpanAttributes(t *testing.T) { span := startSpan() - span.SetAttributes(StringAttribute{"key1", "value1"}) + span.SetAttributes(StringAttribute("key1", "value1")) got, err := endSpan(span) if err != nil { t.Fatal(err) @@ -316,8 +321,8 @@ func TestSetSpanAttributes(t *testing.T) { func TestAnnotations(t *testing.T) { span := startSpan() - span.Annotatef([]Attribute{StringAttribute{"key1", "value1"}}, "%f", 1.5) - span.Annotate([]Attribute{StringAttribute{"key2", "value2"}}, "Annotate") + span.Annotatef([]Attribute{StringAttribute("key1", "value1")}, "%f", 1.5) + span.Annotate([]Attribute{StringAttribute("key2", "value2")}, "Annotate") got, err := endSpan(span) if err != nil { t.Fatal(err) @@ -510,12 +515,18 @@ func (e exporter) ExportSpan(s *SpanData) { e[s.Name] = s } +func startSpanWithOptions(ctx context.Context, name string, o StartOptions) (context.Context, *Span) { + parentSpan, _ := ctx.Value(contextKey{}).(*Span) + span := NewSpan(name, parentSpan, o) + return WithSpan(ctx, span), span +} + func Test_Issue328_EndSpanTwice(t *testing.T) { spans := make(exporter) RegisterExporter(&spans) defer UnregisterExporter(&spans) ctx := context.Background() - ctx, span := StartSpanWithOptions(ctx, "span-1", StartOptions{Sampler: AlwaysSample()}) + ctx, span := startSpanWithOptions(ctx, "span-1", StartOptions{Sampler: AlwaysSample()}) span.End() span.End() UnregisterExporter(&spans) @@ -528,12 +539,12 @@ func TestStartSpanAfterEnd(t *testing.T) { spans := make(exporter) RegisterExporter(&spans) defer UnregisterExporter(&spans) - ctx, span0 := StartSpanWithOptions(context.Background(), "parent", StartOptions{Sampler: AlwaysSample()}) - ctx1, span1 := StartSpanWithOptions(ctx, "span-1", StartOptions{Sampler: AlwaysSample()}) + ctx, span0 := startSpanWithOptions(context.Background(), "parent", StartOptions{Sampler: AlwaysSample()}) + ctx1, span1 := startSpanWithOptions(ctx, "span-1", StartOptions{Sampler: AlwaysSample()}) span1.End() // Start a new span with the context containing span-1 // even though span-1 is ended, we still add this as a new child of span-1 - _, span2 := StartSpanWithOptions(ctx1, "span-2", StartOptions{Sampler: AlwaysSample()}) + _, span2 := startSpanWithOptions(ctx1, "span-2", StartOptions{Sampler: AlwaysSample()}) span2.End() span0.End() UnregisterExporter(&spans) diff --git a/vendor/go.opencensus.io/zpages/code_string.go b/vendor/go.opencensus.io/zpages/code_string.go deleted file mode 100644 index c3107c3e5..000000000 --- a/vendor/go.opencensus.io/zpages/code_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type=code"; DO NOT EDIT. - -package zpages - -import "strconv" - -const _code_name = "OKCancelledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated" - -var _code_index = [...]uint8{0, 2, 11, 18, 33, 49, 57, 70, 86, 103, 121, 128, 138, 151, 159, 170, 178, 193} - -func (i code) String() string { - if i < 0 || i >= code(len(_code_index)-1) { - return "code(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _code_name[_code_index[i]:_code_index[i+1]] -} diff --git a/vendor/go.opencensus.io/zpages/codes.go b/vendor/go.opencensus.io/zpages/codes.go deleted file mode 100644 index 83b7a097a..000000000 --- a/vendor/go.opencensus.io/zpages/codes.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zpages - -type code int - -//go:generate stringer -type=code -const ( - OK code = iota - Cancelled - Unknown - InvalidArgument - DeadlineExceeded - NotFound - AlreadyExists - PermissionDenied - ResourceExhausted - FailedPrecondition - Aborted - OutOfRange - Unimplemented - Internal - Unavailable - DataLoss - Unauthenticated -) diff --git a/vendor/go.opencensus.io/plugin/ochttp/default_format.go b/vendor/go.opencensus.io/zpages/example_test.go similarity index 70% rename from vendor/go.opencensus.io/plugin/ochttp/default_format.go rename to vendor/go.opencensus.io/zpages/example_test.go index 992dd2dab..1af712b35 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/default_format.go +++ b/vendor/go.opencensus.io/zpages/example_test.go @@ -12,11 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -package ochttp +package zpages_test import ( - "go.opencensus.io/plugin/ochttp/propagation/b3" - "go.opencensus.io/trace/propagation" + "log" + "net/http" + + "go.opencensus.io/zpages" ) -var defaultFormat propagation.HTTPFormat = &b3.HTTPFormat{} +func Example() { + // Both /debug/tracez and /debug/rpcz will be served. + http.Handle("/debug/", http.StripPrefix("/debug", zpages.Handler)) + log.Fatal(http.ListenAndServe(":9999", nil)) +} diff --git a/vendor/go.opencensus.io/zpages/rpcz.go b/vendor/go.opencensus.io/zpages/rpcz.go index c0d70dd00..d3b3af795 100644 --- a/vendor/go.opencensus.io/zpages/rpcz.go +++ b/vendor/go.opencensus.io/zpages/rpcz.go @@ -58,16 +58,18 @@ var ( ) func init() { - for view := range viewType { - if err := view.Subscribe(); err != nil { - log.Printf("error subscribing to view %q: %v", view.Name(), err) - } + views := make([]*view.View, 0, len(viewType)) + for v := range viewType { + views = append(views, v) + } + err := view.Subscribe(views...) + if err != nil { + log.Printf("error subscribing to views: %v", err) } view.RegisterExporter(snapExporter{}) } -// RpczHandler is a handler for /rpcz. -func RpczHandler(w http.ResponseWriter, r *http.Request) { +func rpczHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/html; charset=utf-8") WriteHTMLRpczPage(w) } diff --git a/vendor/go.opencensus.io/zpages/tracez.go b/vendor/go.opencensus.io/zpages/tracez.go index 6a8dbd5a9..6469632f1 100644 --- a/vendor/go.opencensus.io/zpages/tracez.go +++ b/vendor/go.opencensus.io/zpages/tracez.go @@ -86,8 +86,7 @@ func canonicalCodeString(code int32) string { return canonicalCodes[code] } -// TracezHandler is a handler for /tracez. -func TracezHandler(w http.ResponseWriter, r *http.Request) { +func tracezHandler(w http.ResponseWriter, r *http.Request) { r.ParseForm() w.Header().Set("Content-Type", "text/html; charset=utf-8") name := r.Form.Get(spanNameQueryField) diff --git a/vendor/go.opencensus.io/zpages/zpages.go b/vendor/go.opencensus.io/zpages/zpages.go index d38461fea..fc47d6f9a 100644 --- a/vendor/go.opencensus.io/zpages/zpages.go +++ b/vendor/go.opencensus.io/zpages/zpages.go @@ -19,12 +19,6 @@ // // Users can also embed the HTML for stats and traces in custom status pages. // -// To add the handlers to the default HTTP request multiplexer with the patterns -// /rpcz and /tracez, call: -// zpages.AddDefaultHTTPHandlers() -// If your program does not already start an HTTP server, you can use: -// go func() { log.Fatal(http.ListenAndServe(":8080", nil)) }() -// // zpages are currrently work-in-process and cannot display minutely and // hourly stats correctly. // @@ -38,27 +32,15 @@ package zpages // import "go.opencensus.io/zpages" import ( "net/http" - "sync" ) -var once sync.Once - -// AddDefaultHTTPHandlers adds handlers for /rpcz and /tracez to the default HTTP request multiplexer. -// Deprecated: Use Handler. -func AddDefaultHTTPHandlers() { - once.Do(func() { - http.HandleFunc("/rpcz", RpczHandler) - http.HandleFunc("/tracez", TracezHandler) - }) -} - // Handler is an http.Handler that serves the zpages. var Handler http.Handler func init() { zpagesMux := http.NewServeMux() - zpagesMux.HandleFunc("/rpcz", RpczHandler) - zpagesMux.HandleFunc("/tracez", TracezHandler) + zpagesMux.HandleFunc("/rpcz", rpczHandler) + zpagesMux.HandleFunc("/tracez", tracezHandler) zpagesMux.Handle("/public/", http.FileServer(fs)) Handler = zpagesMux } diff --git a/vendor/golang.org/x/crypto/acme/acme.go b/vendor/golang.org/x/crypto/acme/acme.go index fa9c4b39e..1f4fb69ed 100644 --- a/vendor/golang.org/x/crypto/acme/acme.go +++ b/vendor/golang.org/x/crypto/acme/acme.go @@ -400,7 +400,7 @@ func (c *Client) RevokeAuthorization(ctx context.Context, url string) error { // WaitAuthorization polls an authorization at the given URL // until it is in one of the final states, StatusValid or StatusInvalid, -// or the context is done. +// the ACME CA responded with a 4xx error code, or the context is done. // // It returns a non-nil Authorization only if its Status is StatusValid. // In all other cases WaitAuthorization returns an error. @@ -412,6 +412,13 @@ func (c *Client) WaitAuthorization(ctx context.Context, url string) (*Authorizat if err != nil { return nil, err } + if res.StatusCode >= 400 && res.StatusCode <= 499 { + // Non-retriable error. For instance, Let's Encrypt may return 404 Not Found + // when requesting an expired authorization. + defer res.Body.Close() + return nil, responseError(res) + } + retry := res.Header.Get("Retry-After") if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusAccepted { res.Body.Close() diff --git a/vendor/golang.org/x/crypto/acme/acme_test.go b/vendor/golang.org/x/crypto/acme/acme_test.go index 89f2efaa5..63cb79b98 100644 --- a/vendor/golang.org/x/crypto/acme/acme_test.go +++ b/vendor/golang.org/x/crypto/acme/acme_test.go @@ -549,6 +549,34 @@ func TestWaitAuthorizationInvalid(t *testing.T) { } } +func TestWaitAuthorizationClientError(t *testing.T) { + const code = http.StatusBadRequest + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(code) + })) + defer ts.Close() + + ch := make(chan error, 1) + go func() { + var client Client + _, err := client.WaitAuthorization(context.Background(), ts.URL) + ch <- err + }() + + select { + case <-time.After(3 * time.Second): + t.Fatal("WaitAuthz took too long to return") + case err := <-ch: + res, ok := err.(*Error) + if !ok { + t.Fatalf("err is %v (%T); want a non-nil *Error", err, err) + } + if res.StatusCode != code { + t.Errorf("res.StatusCode = %d; want %d", res.StatusCode, code) + } + } +} + func TestWaitAuthorizationCancel(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Retry-After", "60") diff --git a/vendor/golang.org/x/crypto/acme/autocert/renewal.go b/vendor/golang.org/x/crypto/acme/autocert/renewal.go index 6c5da2bc8..2a3a0a706 100644 --- a/vendor/golang.org/x/crypto/acme/autocert/renewal.go +++ b/vendor/golang.org/x/crypto/acme/autocert/renewal.go @@ -102,7 +102,9 @@ func (dr *domainRenewal) do(ctx context.Context) (time.Duration, error) { if err != nil { return 0, err } - dr.m.cachePut(ctx, dr.domain, tlscert) + if err := dr.m.cachePut(ctx, dr.domain, tlscert); err != nil { + return 0, err + } dr.m.stateMu.Lock() defer dr.m.stateMu.Unlock() // m.state is guaranteed to be non-nil at this point diff --git a/vendor/golang.org/x/crypto/argon2/argon2.go b/vendor/golang.org/x/crypto/argon2/argon2.go index 798f5cbda..b423feaea 100644 --- a/vendor/golang.org/x/crypto/argon2/argon2.go +++ b/vendor/golang.org/x/crypto/argon2/argon2.go @@ -54,11 +54,12 @@ const ( // Key derives a key from the password, salt, and cost parameters using Argon2i // returning a byte slice of length keyLen that can be used as cryptographic -// key. The CPU cost and parallism degree must be greater than zero. +// key. The CPU cost and parallelism degree must be greater than zero. // // For example, you can get a derived key for e.g. AES-256 (which needs a -// 32-byte key) by doing: `key := argon2.Key([]byte("some password"), salt, 3, -// 32*1024, 4, 32)` +// 32-byte key) by doing: +// +// key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32) // // The draft RFC recommends[2] time=3, and memory=32*1024 is a sensible number. // If using that amount of memory (32 MB) is not possible in some contexts then @@ -76,12 +77,13 @@ func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint3 // IDKey derives a key from the password, salt, and cost parameters using // Argon2id returning a byte slice of length keyLen that can be used as -// cryptographic key. The CPU cost and parallism degree must be greater than +// cryptographic key. The CPU cost and parallelism degree must be greater than // zero. // // For example, you can get a derived key for e.g. AES-256 (which needs a -// 32-byte key) by doing: `key := argon2.IDKey([]byte("some password"), salt, 1, -// 64*1024, 4, 32)` +// 32-byte key) by doing: +// +// key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32) // // The draft RFC recommends[2] time=1, and memory=64*1024 is a sensible number. // If using that amount of memory (64 MB) is not possible in some contexts then diff --git a/vendor/golang.org/x/crypto/ripemd160/ripemd160_test.go b/vendor/golang.org/x/crypto/ripemd160/ripemd160_test.go index 5df1b2593..a1fbffdd5 100644 --- a/vendor/golang.org/x/crypto/ripemd160/ripemd160_test.go +++ b/vendor/golang.org/x/crypto/ripemd160/ripemd160_test.go @@ -50,15 +50,23 @@ func TestVectors(t *testing.T) { } } -func TestMillionA(t *testing.T) { +func millionA() string { md := New() for i := 0; i < 100000; i++ { io.WriteString(md, "aaaaaaaaaa") } - out := "52783243c1697bdbe16d37f97f68f08325dc1528" - s := fmt.Sprintf("%x", md.Sum(nil)) - if s != out { + return fmt.Sprintf("%x", md.Sum(nil)) +} + +func TestMillionA(t *testing.T) { + const out = "52783243c1697bdbe16d37f97f68f08325dc1528" + if s := millionA(); s != out { t.Fatalf("RIPEMD-160 (1 million 'a') = %s, expected %s", s, out) } - md.Reset() +} + +func BenchmarkMillionA(b *testing.B) { + for i := 0; i < b.N; i++ { + millionA() + } } diff --git a/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go b/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go index 7bc8e6c48..e0edc02f0 100644 --- a/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go +++ b/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go @@ -8,6 +8,10 @@ package ripemd160 +import ( + "math/bits" +) + // work buffer indices and roll amounts for one line var _n = [80]uint{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, @@ -59,16 +63,16 @@ func _Block(md *digest, p []byte) int { i := 0 for i < 16 { alpha = a + (b ^ c ^ d) + x[_n[i]] - s := _r[i] - alpha = (alpha<>(32-s)) + e - beta = c<<10 | c>>22 + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) a, b, c, d, e = e, alpha, b, beta, d // parallel line alpha = aa + (bb ^ (cc | ^dd)) + x[n_[i]] + 0x50a28be6 - s = r_[i] - alpha = (alpha<>(32-s)) + ee - beta = cc<<10 | cc>>22 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd i++ @@ -77,16 +81,16 @@ func _Block(md *digest, p []byte) int { // round 2 for i < 32 { alpha = a + (b&c | ^b&d) + x[_n[i]] + 0x5a827999 - s := _r[i] - alpha = (alpha<>(32-s)) + e - beta = c<<10 | c>>22 + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) a, b, c, d, e = e, alpha, b, beta, d // parallel line alpha = aa + (bb&dd | cc&^dd) + x[n_[i]] + 0x5c4dd124 - s = r_[i] - alpha = (alpha<>(32-s)) + ee - beta = cc<<10 | cc>>22 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd i++ @@ -95,16 +99,16 @@ func _Block(md *digest, p []byte) int { // round 3 for i < 48 { alpha = a + (b | ^c ^ d) + x[_n[i]] + 0x6ed9eba1 - s := _r[i] - alpha = (alpha<>(32-s)) + e - beta = c<<10 | c>>22 + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) a, b, c, d, e = e, alpha, b, beta, d // parallel line alpha = aa + (bb | ^cc ^ dd) + x[n_[i]] + 0x6d703ef3 - s = r_[i] - alpha = (alpha<>(32-s)) + ee - beta = cc<<10 | cc>>22 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd i++ @@ -113,16 +117,16 @@ func _Block(md *digest, p []byte) int { // round 4 for i < 64 { alpha = a + (b&d | c&^d) + x[_n[i]] + 0x8f1bbcdc - s := _r[i] - alpha = (alpha<>(32-s)) + e - beta = c<<10 | c>>22 + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) a, b, c, d, e = e, alpha, b, beta, d // parallel line alpha = aa + (bb&cc | ^bb&dd) + x[n_[i]] + 0x7a6d76e9 - s = r_[i] - alpha = (alpha<>(32-s)) + ee - beta = cc<<10 | cc>>22 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd i++ @@ -131,16 +135,16 @@ func _Block(md *digest, p []byte) int { // round 5 for i < 80 { alpha = a + (b ^ (c | ^d)) + x[_n[i]] + 0xa953fd4e - s := _r[i] - alpha = (alpha<>(32-s)) + e - beta = c<<10 | c>>22 + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) a, b, c, d, e = e, alpha, b, beta, d // parallel line alpha = aa + (bb ^ cc ^ dd) + x[n_[i]] - s = r_[i] - alpha = (alpha<>(32-s)) + ee - beta = cc<<10 | cc>>22 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd i++ diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go index 841f9860f..5a027d2da 100644 --- a/vendor/golang.org/x/crypto/sha3/shake.go +++ b/vendor/golang.org/x/crypto/sha3/shake.go @@ -40,7 +40,7 @@ func (d *state) Clone() ShakeHash { // least 32 bytes of its output are used. func NewShake128() ShakeHash { return &state{rate: 168, dsbyte: 0x1f} } -// NewShake256 creates a new SHAKE128 variable-output-length ShakeHash. +// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. // Its generic security strength is 256 bits against all attacks if // at least 64 bytes of its output are used. func NewShake256() ShakeHash { return &state{rate: 136, dsbyte: 0x1f} } diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/golang.org/x/crypto/ssh/terminal/util.go index 02dad484e..731c89a28 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/util.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/util.go @@ -108,9 +108,7 @@ func ReadPassword(fd int) ([]byte, error) { return nil, err } - defer func() { - unix.IoctlSetTermios(fd, ioctlWriteTermios, termios) - }() + defer unix.IoctlSetTermios(fd, ioctlWriteTermios, termios) return readPasswordLine(passwordReader(fd)) } diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go index a2e1b57dc..9e41b9f43 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go @@ -14,7 +14,7 @@ import ( // State contains the state of a terminal. type State struct { - state *unix.Termios + termios unix.Termios } // IsTerminal returns true if the given file descriptor is a terminal. @@ -75,47 +75,43 @@ func ReadPassword(fd int) ([]byte, error) { // restored. // see http://cr.illumos.org/~webrev/andy_js/1060/ func MakeRaw(fd int) (*State, error) { - oldTermiosPtr, err := unix.IoctlGetTermios(fd, unix.TCGETS) + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) if err != nil { return nil, err } - oldTermios := *oldTermiosPtr - newTermios := oldTermios - newTermios.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON - newTermios.Oflag &^= syscall.OPOST - newTermios.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN - newTermios.Cflag &^= syscall.CSIZE | syscall.PARENB - newTermios.Cflag |= syscall.CS8 - newTermios.Cc[unix.VMIN] = 1 - newTermios.Cc[unix.VTIME] = 0 + oldState := State{termios: *termios} - if err := unix.IoctlSetTermios(fd, unix.TCSETS, &newTermios); err != nil { + termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON + termios.Oflag &^= unix.OPOST + termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN + termios.Cflag &^= unix.CSIZE | unix.PARENB + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + + if err := unix.IoctlSetTermios(fd, unix.TCSETS, termios); err != nil { return nil, err } - return &State{ - state: oldTermiosPtr, - }, nil + return &oldState, nil } // Restore restores the terminal connected to the given file descriptor to a // previous state. func Restore(fd int, oldState *State) error { - return unix.IoctlSetTermios(fd, unix.TCSETS, oldState.state) + return unix.IoctlSetTermios(fd, unix.TCSETS, &oldState.termios) } // GetState returns the current state of a terminal which may be useful to // restore the terminal after a signal. func GetState(fd int) (*State, error) { - oldTermiosPtr, err := unix.IoctlGetTermios(fd, unix.TCGETS) + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) if err != nil { return nil, err } - return &State{ - state: oldTermiosPtr, - }, nil + return &State{termios: *termios}, nil } // GetSize returns the dimensions of the given terminal. diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go index 4933ac361..8618955df 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go @@ -89,9 +89,7 @@ func ReadPassword(fd int) ([]byte, error) { return nil, err } - defer func() { - windows.SetConsoleMode(windows.Handle(fd), old) - }() + defer windows.SetConsoleMode(windows.Handle(fd), old) var h windows.Handle p, _ := windows.GetCurrentProcess() diff --git a/vendor/golang.org/x/net/dns/dnsmessage/example_test.go b/vendor/golang.org/x/net/dns/dnsmessage/example_test.go index 5415c2d3a..8600a6bc4 100644 --- a/vendor/golang.org/x/net/dns/dnsmessage/example_test.go +++ b/vendor/golang.org/x/net/dns/dnsmessage/example_test.go @@ -37,20 +37,20 @@ func ExampleParser() { }, Answers: []dnsmessage.Resource{ { - dnsmessage.ResourceHeader{ + Header: dnsmessage.ResourceHeader{ Name: mustNewName("foo.bar.example.com."), Type: dnsmessage.TypeA, Class: dnsmessage.ClassINET, }, - &dnsmessage.AResource{[4]byte{127, 0, 0, 1}}, + Body: &dnsmessage.AResource{A: [4]byte{127, 0, 0, 1}}, }, { - dnsmessage.ResourceHeader{ + Header: dnsmessage.ResourceHeader{ Name: mustNewName("bar.example.com."), Type: dnsmessage.TypeA, Class: dnsmessage.ClassINET, }, - &dnsmessage.AResource{[4]byte{127, 0, 0, 2}}, + Body: &dnsmessage.AResource{A: [4]byte{127, 0, 0, 2}}, }, }, } diff --git a/vendor/golang.org/x/net/dns/dnsmessage/message.go b/vendor/golang.org/x/net/dns/dnsmessage/message.go index e98fda66e..624f9b69f 100644 --- a/vendor/golang.org/x/net/dns/dnsmessage/message.go +++ b/vendor/golang.org/x/net/dns/dnsmessage/message.go @@ -273,25 +273,25 @@ type Resource struct { // A ResourceBody is a DNS resource record minus the header. type ResourceBody interface { // pack packs a Resource except for its header. - pack(msg []byte, compression map[string]int) ([]byte, error) + pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) // realType returns the actual type of the Resource. This is used to // fill in the header Type field. realType() Type } -func (r *Resource) pack(msg []byte, compression map[string]int) ([]byte, error) { +func (r *Resource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { if r.Body == nil { return msg, errNilResouceBody } oldMsg := msg r.Header.Type = r.Body.realType() - msg, length, err := r.Header.pack(msg, compression) + msg, length, err := r.Header.pack(msg, compression, compressionOff) if err != nil { return msg, &nestedError{"ResourceHeader", err} } preLen := len(msg) - msg, err = r.Body.pack(msg, compression) + msg, err = r.Body.pack(msg, compression, compressionOff) if err != nil { return msg, &nestedError{"content", err} } @@ -852,6 +852,7 @@ func (m *Message) AppendPack(b []byte) ([]byte, error) { h.authorities = uint16(len(m.Authorities)) h.additionals = uint16(len(m.Additionals)) + compressionOff := len(b) msg := h.pack(b) // RFC 1035 allows (but does not require) compression for packing. RFC @@ -866,25 +867,25 @@ func (m *Message) AppendPack(b []byte) ([]byte, error) { for i := range m.Questions { var err error - if msg, err = m.Questions[i].pack(msg, compression); err != nil { + if msg, err = m.Questions[i].pack(msg, compression, compressionOff); err != nil { return nil, &nestedError{"packing Question", err} } } for i := range m.Answers { var err error - if msg, err = m.Answers[i].pack(msg, compression); err != nil { + if msg, err = m.Answers[i].pack(msg, compression, compressionOff); err != nil { return nil, &nestedError{"packing Answer", err} } } for i := range m.Authorities { var err error - if msg, err = m.Authorities[i].pack(msg, compression); err != nil { + if msg, err = m.Authorities[i].pack(msg, compression, compressionOff); err != nil { return nil, &nestedError{"packing Authority", err} } } for i := range m.Additionals { var err error - if msg, err = m.Additionals[i].pack(msg, compression); err != nil { + if msg, err = m.Additionals[i].pack(msg, compression, compressionOff); err != nil { return nil, &nestedError{"packing Additional", err} } } @@ -893,36 +894,69 @@ func (m *Message) AppendPack(b []byte) ([]byte, error) { } // A Builder allows incrementally packing a DNS message. +// +// Example usage: +// buf := make([]byte, 2, 514) +// b := NewBuilder(buf, Header{...}) +// b.EnableCompression() +// // Optionally start a section and add things to that section. +// // Repeat adding sections as necessary. +// buf, err := b.Finish() +// // If err is nil, buf[2:] will contain the built bytes. type Builder struct { - msg []byte - header header - section section + // msg is the storage for the message being built. + msg []byte + + // section keeps track of the current section being built. + section section + + // header keeps track of what should go in the header when Finish is + // called. + header header + + // start is the starting index of the bytes allocated in msg for header. + start int + + // compression is a mapping from name suffixes to their starting index + // in msg. compression map[string]int } -// Start initializes the builder. +// NewBuilder creates a new builder with compression disabled. // -// buf is optional (nil is fine), but if provided, Start takes ownership of buf. -func (b *Builder) Start(buf []byte, h Header) { - b.StartWithoutCompression(buf, h) - b.compression = map[string]int{} +// Note: Most users will want to immediately enable compression with the +// EnableCompression method. See that method's comment for why you may or may +// not want to enable compression. +// +// The DNS message is appended to the provided initial buffer buf (which may be +// nil) as it is built. The final message is returned by the (*Builder).Finish +// method, which may return the same underlying array if there was sufficient +// capacity in the slice. +func NewBuilder(buf []byte, h Header) Builder { + if buf == nil { + buf = make([]byte, 0, packStartingCap) + } + b := Builder{msg: buf, start: len(buf)} + b.header.id, b.header.bits = h.pack() + var hb [headerLen]byte + b.msg = append(b.msg, hb[:]...) + b.section = sectionHeader + return b } -// StartWithoutCompression initializes the builder with compression disabled. +// EnableCompression enables compression in the Builder. // -// This avoids compression related allocations, but can result in larger message -// sizes. Be careful with this mode as it can cause messages to exceed the UDP -// size limit. +// Leaving compression disabled avoids compression related allocations, but can +// result in larger message sizes. Be careful with this mode as it can cause +// messages to exceed the UDP size limit. // -// buf is optional (nil is fine), but if provided, Start takes ownership of buf. -func (b *Builder) StartWithoutCompression(buf []byte, h Header) { - *b = Builder{msg: buf} - b.header.id, b.header.bits = h.pack() - if cap(b.msg) < headerLen { - b.msg = make([]byte, 0, packStartingCap) - } - b.msg = b.msg[:headerLen] - b.section = sectionHeader +// According to RFC 1035, section 4.1.4, the use of compression is optional, but +// all implementations must accept both compressed and uncompressed DNS +// messages. +// +// Compression should be enabled before any sections are added for best results. +func (b *Builder) EnableCompression() { + b.compression = map[string]int{} } func (b *Builder) startCheck(s section) error { @@ -1003,7 +1037,7 @@ func (b *Builder) Question(q Question) error { if b.section > sectionQuestions { return ErrSectionDone } - msg, err := q.pack(b.msg, b.compression) + msg, err := q.pack(b.msg, b.compression, b.start) if err != nil { return err } @@ -1030,12 +1064,12 @@ func (b *Builder) CNAMEResource(h ResourceHeader, r CNAMEResource) error { return err } h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) + msg, length, err := h.pack(b.msg, b.compression, b.start) if err != nil { return &nestedError{"ResourceHeader", err} } preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { + if msg, err = r.pack(msg, b.compression, b.start); err != nil { return &nestedError{"CNAMEResource body", err} } if err := h.fixLen(msg, length, preLen); err != nil { @@ -1054,12 +1088,12 @@ func (b *Builder) MXResource(h ResourceHeader, r MXResource) error { return err } h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) + msg, length, err := h.pack(b.msg, b.compression, b.start) if err != nil { return &nestedError{"ResourceHeader", err} } preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { + if msg, err = r.pack(msg, b.compression, b.start); err != nil { return &nestedError{"MXResource body", err} } if err := h.fixLen(msg, length, preLen); err != nil { @@ -1078,12 +1112,12 @@ func (b *Builder) NSResource(h ResourceHeader, r NSResource) error { return err } h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) + msg, length, err := h.pack(b.msg, b.compression, b.start) if err != nil { return &nestedError{"ResourceHeader", err} } preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { + if msg, err = r.pack(msg, b.compression, b.start); err != nil { return &nestedError{"NSResource body", err} } if err := h.fixLen(msg, length, preLen); err != nil { @@ -1102,12 +1136,12 @@ func (b *Builder) PTRResource(h ResourceHeader, r PTRResource) error { return err } h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) + msg, length, err := h.pack(b.msg, b.compression, b.start) if err != nil { return &nestedError{"ResourceHeader", err} } preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { + if msg, err = r.pack(msg, b.compression, b.start); err != nil { return &nestedError{"PTRResource body", err} } if err := h.fixLen(msg, length, preLen); err != nil { @@ -1126,12 +1160,12 @@ func (b *Builder) SOAResource(h ResourceHeader, r SOAResource) error { return err } h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) + msg, length, err := h.pack(b.msg, b.compression, b.start) if err != nil { return &nestedError{"ResourceHeader", err} } preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { + if msg, err = r.pack(msg, b.compression, b.start); err != nil { return &nestedError{"SOAResource body", err} } if err := h.fixLen(msg, length, preLen); err != nil { @@ -1150,12 +1184,12 @@ func (b *Builder) TXTResource(h ResourceHeader, r TXTResource) error { return err } h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) + msg, length, err := h.pack(b.msg, b.compression, b.start) if err != nil { return &nestedError{"ResourceHeader", err} } preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { + if msg, err = r.pack(msg, b.compression, b.start); err != nil { return &nestedError{"TXTResource body", err} } if err := h.fixLen(msg, length, preLen); err != nil { @@ -1174,12 +1208,12 @@ func (b *Builder) SRVResource(h ResourceHeader, r SRVResource) error { return err } h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) + msg, length, err := h.pack(b.msg, b.compression, b.start) if err != nil { return &nestedError{"ResourceHeader", err} } preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { + if msg, err = r.pack(msg, b.compression, b.start); err != nil { return &nestedError{"SRVResource body", err} } if err := h.fixLen(msg, length, preLen); err != nil { @@ -1198,12 +1232,12 @@ func (b *Builder) AResource(h ResourceHeader, r AResource) error { return err } h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) + msg, length, err := h.pack(b.msg, b.compression, b.start) if err != nil { return &nestedError{"ResourceHeader", err} } preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { + if msg, err = r.pack(msg, b.compression, b.start); err != nil { return &nestedError{"AResource body", err} } if err := h.fixLen(msg, length, preLen); err != nil { @@ -1222,12 +1256,12 @@ func (b *Builder) AAAAResource(h ResourceHeader, r AAAAResource) error { return err } h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) + msg, length, err := h.pack(b.msg, b.compression, b.start) if err != nil { return &nestedError{"ResourceHeader", err} } preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { + if msg, err = r.pack(msg, b.compression, b.start); err != nil { return &nestedError{"AAAAResource body", err} } if err := h.fixLen(msg, length, preLen); err != nil { @@ -1246,7 +1280,8 @@ func (b *Builder) Finish() ([]byte, error) { return nil, ErrNotStarted } b.section = sectionDone - b.header.pack(b.msg[:0]) + // Space for the header was allocated in NewBuilder. + b.header.pack(b.msg[b.start:b.start]) return b.msg, nil } @@ -1279,9 +1314,9 @@ type ResourceHeader struct { // pack packs all of the fields in a ResourceHeader except for the length. The // length bytes are returned as a slice so they can be filled in after the rest // of the Resource has been packed. -func (h *ResourceHeader) pack(oldMsg []byte, compression map[string]int) (msg []byte, length []byte, err error) { +func (h *ResourceHeader) pack(oldMsg []byte, compression map[string]int, compressionOff int) (msg []byte, length []byte, err error) { msg = oldMsg - if msg, err = h.Name.pack(msg, compression); err != nil { + if msg, err = h.Name.pack(msg, compression, compressionOff); err != nil { return oldMsg, nil, &nestedError{"Name", err} } msg = packType(msg, h.Type) @@ -1506,7 +1541,7 @@ func (n Name) String() string { // // The compression map will be updated with new domain suffixes. If compression // is nil, compression will not be used. -func (n *Name) pack(msg []byte, compression map[string]int) ([]byte, error) { +func (n *Name) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { oldMsg := msg // Add a trailing dot to canonicalize name. @@ -1558,7 +1593,7 @@ func (n *Name) pack(msg []byte, compression map[string]int) ([]byte, error) { // Miss. Add the suffix to the compression table if the // offset can be stored in the available 14 bytes. if len(msg) <= int(^uint16(0)>>2) { - compression[string(n.Data[i:])] = len(msg) + compression[string(n.Data[i:])] = len(msg) - compressionOff } } } @@ -1681,8 +1716,8 @@ type Question struct { Class Class } -func (q *Question) pack(msg []byte, compression map[string]int) ([]byte, error) { - msg, err := q.Name.pack(msg, compression) +func (q *Question) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + msg, err := q.Name.pack(msg, compression, compressionOff) if err != nil { return msg, &nestedError{"Name", err} } @@ -1761,8 +1796,8 @@ func (r *CNAMEResource) realType() Type { return TypeCNAME } -func (r *CNAMEResource) pack(msg []byte, compression map[string]int) ([]byte, error) { - return r.CNAME.pack(msg, compression) +func (r *CNAMEResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + return r.CNAME.pack(msg, compression, compressionOff) } func unpackCNAMEResource(msg []byte, off int) (CNAMEResource, error) { @@ -1783,10 +1818,10 @@ func (r *MXResource) realType() Type { return TypeMX } -func (r *MXResource) pack(msg []byte, compression map[string]int) ([]byte, error) { +func (r *MXResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { oldMsg := msg msg = packUint16(msg, r.Pref) - msg, err := r.MX.pack(msg, compression) + msg, err := r.MX.pack(msg, compression, compressionOff) if err != nil { return oldMsg, &nestedError{"MXResource.MX", err} } @@ -1814,8 +1849,8 @@ func (r *NSResource) realType() Type { return TypeNS } -func (r *NSResource) pack(msg []byte, compression map[string]int) ([]byte, error) { - return r.NS.pack(msg, compression) +func (r *NSResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + return r.NS.pack(msg, compression, compressionOff) } func unpackNSResource(msg []byte, off int) (NSResource, error) { @@ -1835,8 +1870,8 @@ func (r *PTRResource) realType() Type { return TypePTR } -func (r *PTRResource) pack(msg []byte, compression map[string]int) ([]byte, error) { - return r.PTR.pack(msg, compression) +func (r *PTRResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + return r.PTR.pack(msg, compression, compressionOff) } func unpackPTRResource(msg []byte, off int) (PTRResource, error) { @@ -1866,13 +1901,13 @@ func (r *SOAResource) realType() Type { return TypeSOA } -func (r *SOAResource) pack(msg []byte, compression map[string]int) ([]byte, error) { +func (r *SOAResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { oldMsg := msg - msg, err := r.NS.pack(msg, compression) + msg, err := r.NS.pack(msg, compression, compressionOff) if err != nil { return oldMsg, &nestedError{"SOAResource.NS", err} } - msg, err = r.MBox.pack(msg, compression) + msg, err = r.MBox.pack(msg, compression, compressionOff) if err != nil { return oldMsg, &nestedError{"SOAResource.MBox", err} } @@ -1925,7 +1960,7 @@ func (r *TXTResource) realType() Type { return TypeTXT } -func (r *TXTResource) pack(msg []byte, compression map[string]int) ([]byte, error) { +func (r *TXTResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { return packText(msg, r.Txt), nil } @@ -1959,12 +1994,12 @@ func (r *SRVResource) realType() Type { return TypeSRV } -func (r *SRVResource) pack(msg []byte, compression map[string]int) ([]byte, error) { +func (r *SRVResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { oldMsg := msg msg = packUint16(msg, r.Priority) msg = packUint16(msg, r.Weight) msg = packUint16(msg, r.Port) - msg, err := r.Target.pack(msg, nil) + msg, err := r.Target.pack(msg, nil, compressionOff) if err != nil { return oldMsg, &nestedError{"SRVResource.Target", err} } @@ -2000,7 +2035,7 @@ func (r *AResource) realType() Type { return TypeA } -func (r *AResource) pack(msg []byte, compression map[string]int) ([]byte, error) { +func (r *AResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { return packBytes(msg, r.A[:]), nil } @@ -2021,7 +2056,7 @@ func (r *AAAAResource) realType() Type { return TypeAAAA } -func (r *AAAAResource) pack(msg []byte, compression map[string]int) ([]byte, error) { +func (r *AAAAResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { return packBytes(msg, r.AAAA[:]), nil } diff --git a/vendor/golang.org/x/net/dns/dnsmessage/message_test.go b/vendor/golang.org/x/net/dns/dnsmessage/message_test.go index 2bb763420..d4eca26f1 100644 --- a/vendor/golang.org/x/net/dns/dnsmessage/message_test.go +++ b/vendor/golang.org/x/net/dns/dnsmessage/message_test.go @@ -62,7 +62,7 @@ func TestQuestionPackUnpack(t *testing.T) { Type: TypeA, Class: ClassINET, } - buf, err := want.pack(make([]byte, 1, 50), map[string]int{}) + buf, err := want.pack(make([]byte, 1, 50), map[string]int{}, 1) if err != nil { t.Fatal("Packing failed:", err) } @@ -129,7 +129,7 @@ func TestNamePackUnpack(t *testing.T) { for _, test := range tests { in := mustNewName(test.in) want := mustNewName(test.want) - buf, err := in.pack(make([]byte, 0, 30), map[string]int{}) + buf, err := in.pack(make([]byte, 0, 30), map[string]int{}, 0) if err != test.err { t.Errorf("Packing of %q: got err = %v, want err = %v", test.in, err, test.err) continue @@ -248,6 +248,40 @@ func TestDNSPackUnpack(t *testing.T) { } } +func TestDNSAppendPackUnpack(t *testing.T) { + wants := []Message{ + { + Questions: []Question{ + { + Name: mustNewName("."), + Type: TypeAAAA, + Class: ClassINET, + }, + }, + Answers: []Resource{}, + Authorities: []Resource{}, + Additionals: []Resource{}, + }, + largeTestMsg(), + } + for i, want := range wants { + b := make([]byte, 2, 514) + b, err := want.AppendPack(b) + if err != nil { + t.Fatalf("%d: packing failed: %v", i, err) + } + b = b[2:] + var got Message + err = got.Unpack(b) + if err != nil { + t.Fatalf("%d: unpacking failed: %v", i, err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("%d: got = %+v, want = %+v", i, &got, &want) + } + } +} + func TestSkipAll(t *testing.T) { msg := largeTestMsg() buf, err := msg.Pack() @@ -412,7 +446,7 @@ func TestVeryLongTxt(t *testing.T) { }, &TXTResource{loremIpsum}, } - buf, err := want.pack(make([]byte, 0, 8000), map[string]int{}) + buf, err := want.pack(make([]byte, 0, 8000), map[string]int{}, 0) if err != nil { t.Fatal("Packing failed:", err) } @@ -434,6 +468,26 @@ func TestVeryLongTxt(t *testing.T) { } } +func TestStartAppends(t *testing.T) { + buf := make([]byte, 2, 514) + wantBuf := []byte{4, 44} + copy(buf, wantBuf) + + b := NewBuilder(buf, Header{}) + b.EnableCompression() + + buf, err := b.Finish() + if err != nil { + t.Fatal("Building failed:", err) + } + if got, want := len(buf), headerLen+2; got != want { + t.Errorf("Got len(buf} = %d, want = %d", got, want) + } + if string(buf[:2]) != string(wantBuf) { + t.Errorf("Original data not preserved, got = %v, want = %v", buf[:2], wantBuf) + } +} + func TestStartError(t *testing.T) { tests := []struct { name string @@ -514,8 +568,8 @@ func TestBuilder(t *testing.T) { t.Fatal("Packing without builder:", err) } - var b Builder - b.Start(nil, msg.Header) + b := NewBuilder(nil, msg.Header) + b.EnableCompression() if err := b.StartQuestions(); err != nil { t.Fatal("b.StartQuestions():", err) @@ -653,9 +707,7 @@ func TestResourcePack(t *testing.T) { } } -func BenchmarkParsing(b *testing.B) { - b.ReportAllocs() - +func benchmarkParsingSetup() ([]byte, error) { name := mustNewName("foo.bar.example.com.") msg := Message{ Header: Header{Response: true, Authoritative: true}, @@ -700,111 +752,148 @@ func BenchmarkParsing(b *testing.B) { buf, err := msg.Pack() if err != nil { - b.Fatal("msg.Pack():", err) + return nil, fmt.Errorf("msg.Pack(): %v", err) + } + return buf, nil +} + +func benchmarkParsing(tb testing.TB, buf []byte) { + var p Parser + if _, err := p.Start(buf); err != nil { + tb.Fatal("p.Start(buf):", err) } - for i := 0; i < b.N; i++ { - var p Parser - if _, err := p.Start(buf); err != nil { - b.Fatal("p.Start(buf):", err) + for { + _, err := p.Question() + if err == ErrSectionDone { + break + } + if err != nil { + tb.Fatal("p.Question():", err) + } + } + + for { + h, err := p.AnswerHeader() + if err == ErrSectionDone { + break + } + if err != nil { + panic(err) } - for { - _, err := p.Question() - if err == ErrSectionDone { - break + switch h.Type { + case TypeA: + if _, err := p.AResource(); err != nil { + tb.Fatal("p.AResource():", err) } - if err != nil { - b.Fatal("p.Question():", err) + case TypeAAAA: + if _, err := p.AAAAResource(); err != nil { + tb.Fatal("p.AAAAResource():", err) } - } - - for { - h, err := p.AnswerHeader() - if err == ErrSectionDone { - break + case TypeCNAME: + if _, err := p.CNAMEResource(); err != nil { + tb.Fatal("p.CNAMEResource():", err) } - if err != nil { - panic(err) - } - - switch h.Type { - case TypeA: - if _, err := p.AResource(); err != nil { - b.Fatal("p.AResource():", err) - } - case TypeAAAA: - if _, err := p.AAAAResource(); err != nil { - b.Fatal("p.AAAAResource():", err) - } - case TypeCNAME: - if _, err := p.CNAMEResource(); err != nil { - b.Fatal("p.CNAMEResource():", err) - } - case TypeNS: - if _, err := p.NSResource(); err != nil { - b.Fatal("p.NSResource():", err) - } - default: - b.Fatalf("unknown type: %T", h) + case TypeNS: + if _, err := p.NSResource(); err != nil { + tb.Fatal("p.NSResource():", err) } + default: + tb.Fatalf("unknown type: %T", h) } } } -func BenchmarkBuilding(b *testing.B) { - b.ReportAllocs() +func BenchmarkParsing(b *testing.B) { + buf, err := benchmarkParsingSetup() + if err != nil { + b.Fatal(err) + } + b.ReportAllocs() + for i := 0; i < b.N; i++ { + benchmarkParsing(b, buf) + } +} + +func TestParsingAllocs(t *testing.T) { + buf, err := benchmarkParsingSetup() + if err != nil { + t.Fatal(err) + } + + if allocs := testing.AllocsPerRun(100, func() { benchmarkParsing(t, buf) }); allocs > 0.5 { + t.Errorf("Allocations during parsing: got = %f, want ~0", allocs) + } +} + +func benchmarkBuildingSetup() (Name, []byte) { name := mustNewName("foo.bar.example.com.") buf := make([]byte, 0, packStartingCap) + return name, buf +} +func benchmarkBuilding(tb testing.TB, name Name, buf []byte) { + bld := NewBuilder(buf, Header{Response: true, Authoritative: true}) + + if err := bld.StartQuestions(); err != nil { + tb.Fatal("bld.StartQuestions():", err) + } + q := Question{ + Name: name, + Type: TypeA, + Class: ClassINET, + } + if err := bld.Question(q); err != nil { + tb.Fatalf("bld.Question(%+v): %v", q, err) + } + + hdr := ResourceHeader{ + Name: name, + Class: ClassINET, + } + if err := bld.StartAnswers(); err != nil { + tb.Fatal("bld.StartQuestions():", err) + } + + ar := AResource{[4]byte{}} + if err := bld.AResource(hdr, ar); err != nil { + tb.Fatalf("bld.AResource(%+v, %+v): %v", hdr, ar, err) + } + + aaar := AAAAResource{[16]byte{}} + if err := bld.AAAAResource(hdr, aaar); err != nil { + tb.Fatalf("bld.AAAAResource(%+v, %+v): %v", hdr, aaar, err) + } + + cnr := CNAMEResource{name} + if err := bld.CNAMEResource(hdr, cnr); err != nil { + tb.Fatalf("bld.CNAMEResource(%+v, %+v): %v", hdr, cnr, err) + } + + nsr := NSResource{name} + if err := bld.NSResource(hdr, nsr); err != nil { + tb.Fatalf("bld.NSResource(%+v, %+v): %v", hdr, nsr, err) + } + + if _, err := bld.Finish(); err != nil { + tb.Fatal("bld.Finish():", err) + } +} + +func BenchmarkBuilding(b *testing.B) { + name, buf := benchmarkBuildingSetup() + b.ReportAllocs() for i := 0; i < b.N; i++ { - var bld Builder - bld.StartWithoutCompression(buf, Header{Response: true, Authoritative: true}) + benchmarkBuilding(b, name, buf) + } +} - if err := bld.StartQuestions(); err != nil { - b.Fatal("bld.StartQuestions():", err) - } - q := Question{ - Name: name, - Type: TypeA, - Class: ClassINET, - } - if err := bld.Question(q); err != nil { - b.Fatalf("bld.Question(%+v): %v", q, err) - } - - hdr := ResourceHeader{ - Name: name, - Class: ClassINET, - } - if err := bld.StartAnswers(); err != nil { - b.Fatal("bld.StartQuestions():", err) - } - - ar := AResource{[4]byte{}} - if err := bld.AResource(hdr, ar); err != nil { - b.Fatalf("bld.AResource(%+v, %+v): %v", hdr, ar, err) - } - - aaar := AAAAResource{[16]byte{}} - if err := bld.AAAAResource(hdr, aaar); err != nil { - b.Fatalf("bld.AAAAResource(%+v, %+v): %v", hdr, aaar, err) - } - - cnr := CNAMEResource{name} - if err := bld.CNAMEResource(hdr, cnr); err != nil { - b.Fatalf("bld.CNAMEResource(%+v, %+v): %v", hdr, cnr, err) - } - - nsr := NSResource{name} - if err := bld.NSResource(hdr, nsr); err != nil { - b.Fatalf("bld.NSResource(%+v, %+v): %v", hdr, nsr, err) - } - - if _, err := bld.Finish(); err != nil { - b.Fatal("bld.Finish():", err) - } +func TestBuildingAllocs(t *testing.T) { + name, buf := benchmarkBuildingSetup() + if allocs := testing.AllocsPerRun(100, func() { benchmarkBuilding(t, name, buf) }); allocs > 0.5 { + t.Errorf("Allocations during building: got = %f, want ~0", allocs) } } diff --git a/vendor/golang.org/x/net/http2/h2demo/service.yaml b/vendor/golang.org/x/net/http2/h2demo/service.yaml index 8e7671e4a..2b7d54119 100644 --- a/vendor/golang.org/x/net/http2/h2demo/service.yaml +++ b/vendor/golang.org/x/net/http2/h2demo/service.yaml @@ -3,6 +3,7 @@ kind: Service metadata: name: h2demo spec: + externalTrafficPolicy: Local ports: - port: 80 targetPort: 80 diff --git a/vendor/golang.org/x/net/route/syscall.go b/vendor/golang.org/x/net/route/syscall.go index c211188b1..5f69ea63d 100644 --- a/vendor/golang.org/x/net/route/syscall.go +++ b/vendor/golang.org/x/net/route/syscall.go @@ -20,7 +20,7 @@ func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { p = unsafe.Pointer(&zero) } - _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), newlen) if errno != 0 { return error(errno) } diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go index bb72a527e..a46ee0eaa 100644 --- a/vendor/golang.org/x/net/trace/trace.go +++ b/vendor/golang.org/x/net/trace/trace.go @@ -368,7 +368,11 @@ func New(family, title string) Trace { } func (tr *trace) Finish() { - tr.Elapsed = time.Now().Sub(tr.Start) + elapsed := time.Now().Sub(tr.Start) + tr.mu.Lock() + tr.Elapsed = elapsed + tr.mu.Unlock() + if DebugUseAfterFinish { buf := make([]byte, 4<<10) // 4 KB should be enough n := runtime.Stack(buf, false) @@ -381,14 +385,17 @@ func (tr *trace) Finish() { m.Remove(tr) f := getFamily(tr.Family, true) + tr.mu.RLock() // protects tr fields in Cond.match calls for _, b := range f.Buckets { if b.Cond.match(tr) { b.Add(tr) } } + tr.mu.RUnlock() + // Add a sample of elapsed time as microseconds to the family's timeseries h := new(histogram) - h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3) + h.addMeasurement(elapsed.Nanoseconds() / 1e3) f.LatencyMu.Lock() f.Latency.Add(h) f.LatencyMu.Unlock() @@ -684,25 +691,20 @@ type trace struct { // Title is the title of this trace. Title string - // Timing information. - Start time.Time - Elapsed time.Duration // zero while active + // Start time of the this trace. + Start time.Time - // Trace information if non-zero. - traceID uint64 - spanID uint64 - - // Whether this trace resulted in an error. - IsError bool - - // Append-only sequence of events (modulo discards). mu sync.RWMutex - events []event + events []event // Append-only sequence of events (modulo discards). maxEvents int + recycler func(interface{}) + IsError bool // Whether this trace resulted in an error. + Elapsed time.Duration // Elapsed time for this trace, zero while active. + traceID uint64 // Trace information if non-zero. + spanID uint64 - refs int32 // how many buckets this is in - recycler func(interface{}) - disc discarded // scratch space to avoid allocation + refs int32 // how many buckets this is in + disc discarded // scratch space to avoid allocation finishStack []byte // where finish was called, if DebugUseAfterFinish is set @@ -714,14 +716,18 @@ func (tr *trace) reset() { tr.Family = "" tr.Title = "" tr.Start = time.Time{} + + tr.mu.Lock() tr.Elapsed = 0 tr.traceID = 0 tr.spanID = 0 tr.IsError = false tr.maxEvents = 0 tr.events = nil - tr.refs = 0 tr.recycler = nil + tr.mu.Unlock() + + tr.refs = 0 tr.disc = 0 tr.finishStack = nil for i := range tr.eventsBuf { @@ -801,21 +807,31 @@ func (tr *trace) LazyPrintf(format string, a ...interface{}) { tr.addEvent(&lazySprintf{format, a}, false, false) } -func (tr *trace) SetError() { tr.IsError = true } +func (tr *trace) SetError() { + tr.mu.Lock() + tr.IsError = true + tr.mu.Unlock() +} func (tr *trace) SetRecycler(f func(interface{})) { + tr.mu.Lock() tr.recycler = f + tr.mu.Unlock() } func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.mu.Lock() tr.traceID, tr.spanID = traceID, spanID + tr.mu.Unlock() } func (tr *trace) SetMaxEvents(m int) { + tr.mu.Lock() // Always keep at least three events: first, discarded count, last. if len(tr.events) == 0 && m > 3 { tr.maxEvents = m } + tr.mu.Unlock() } func (tr *trace) ref() { @@ -824,6 +840,7 @@ func (tr *trace) ref() { func (tr *trace) unref() { if atomic.AddInt32(&tr.refs, -1) == 0 { + tr.mu.RLock() if tr.recycler != nil { // freeTrace clears tr, so we hold tr.recycler and tr.events here. go func(f func(interface{}), es []event) { @@ -834,6 +851,7 @@ func (tr *trace) unref() { } }(tr.recycler, tr.events) } + tr.mu.RUnlock() freeTrace(tr) } @@ -844,7 +862,10 @@ func (tr *trace) When() string { } func (tr *trace) ElapsedTime() string { + tr.mu.RLock() t := tr.Elapsed + tr.mu.RUnlock() + if t == 0 { // Active trace. t = time.Since(tr.Start) diff --git a/vendor/golang.org/x/sys/plan9/syscall_plan9.go b/vendor/golang.org/x/sys/plan9/syscall_plan9.go index d39d07de1..84e147148 100644 --- a/vendor/golang.org/x/sys/plan9/syscall_plan9.go +++ b/vendor/golang.org/x/sys/plan9/syscall_plan9.go @@ -12,6 +12,7 @@ package plan9 import ( + "bytes" "syscall" "unsafe" ) @@ -50,12 +51,11 @@ func atoi(b []byte) (n uint) { } func cstring(s []byte) string { - for i := range s { - if s[i] == 0 { - return string(s[0:i]) - } + i := bytes.IndexByte(s, 0) + if i == -1 { + i = len(s) } - return string(s) + return string(s[:i]) } func errstr() string { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 9a8e6e411..c464783d8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -23,8 +23,11 @@ package unix //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - ts := Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} - return Pselect(nfd, r, w, e, &ts, nil) + var ts *Timespec + if timeout != nil { + ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} + } + return Pselect(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo.go new file mode 100644 index 000000000..df9c12371 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo.go @@ -0,0 +1,21 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build gccgo +// +build 386 arm + +package unix + +import ( + "syscall" + "unsafe" +) + +func seek(fd int, offset int64, whence int) (newoffset int64, err syscall.Errno) { + offsetLow := uint32(offset & 0xffffffff) + offsetHigh := uint32((offset >> 32) & 0xffffffff) + _, _, err = Syscall6(SYS__LLSEEK, uintptr(fd), uintptr(offsetHigh), uintptr(offsetLow), uintptr(unsafe.Pointer(&newoffset)), uintptr(whence), 0) + return newoffset, err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 46aa4ff9c..15a69cbdd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -26,8 +26,11 @@ package unix //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - ts := Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} - return Pselect(nfd, r, w, e, &ts, nil) + var ts *Timespec + if timeout != nil { + ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} + } + return Pselect(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_test.go b/vendor/golang.org/x/sys/unix/syscall_linux_test.go index 78d28792d..ff7ad82b1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_test.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_test.go @@ -292,6 +292,10 @@ func TestSchedSetaffinity(t *testing.T) { t.Errorf("CpuClr: didn't clear CPU %d in set: %v", cpu, newMask) } + if runtime.NumCPU() < 2 { + t.Skip("skipping setaffinity tests on single CPU system") + } + err = unix.SchedSetaffinity(0, &newMask) if err != nil { t.Fatalf("SchedSetaffinity: %v", err) diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index cd8f3a9c2..80b05a406 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -7,6 +7,7 @@ package unix import ( + "bytes" "runtime" "sync" "syscall" @@ -52,12 +53,11 @@ func errnoErr(e syscall.Errno) error { // clen returns the index of the first NULL byte in n or len(n) if n contains no NULL byte. func clen(n []byte) int { - for i := 0; i < len(n); i++ { - if n[i] == 0 { - return i - } + i := bytes.IndexByte(n, 0) + if i == -1 { + i = len(n) } - return len(n) + return i } // Mmap manager, for use by operating system-specific implementations. diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go index 2eba7f16b..2c32e1e9c 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go @@ -33,6 +33,7 @@ It has these top-level messages: ListAppProfilesResponse UpdateAppProfileRequest DeleteAppProfileRequest + UpdateAppProfileMetadata CreateTableRequest CreateTableFromSnapshotRequest DropRowRangeRequest @@ -794,6 +795,20 @@ func (m *DeleteAppProfileRequest) GetIgnoreWarnings() bool { return false } +// This is a private alpha release of Cloud Bigtable replication. This feature +// is not currently available to most Cloud Bigtable customers. This feature +// might be changed in backward-incompatible ways and is not recommended for +// production use. It is not subject to any SLA or deprecation policy. +// +// The metadata for the Operation returned by UpdateAppProfile. +type UpdateAppProfileMetadata struct { +} + +func (m *UpdateAppProfileMetadata) Reset() { *m = UpdateAppProfileMetadata{} } +func (m *UpdateAppProfileMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateAppProfileMetadata) ProtoMessage() {} +func (*UpdateAppProfileMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + func init() { proto.RegisterType((*CreateInstanceRequest)(nil), "google.bigtable.admin.v2.CreateInstanceRequest") proto.RegisterType((*GetInstanceRequest)(nil), "google.bigtable.admin.v2.GetInstanceRequest") @@ -816,6 +831,7 @@ func init() { proto.RegisterType((*ListAppProfilesResponse)(nil), "google.bigtable.admin.v2.ListAppProfilesResponse") proto.RegisterType((*UpdateAppProfileRequest)(nil), "google.bigtable.admin.v2.UpdateAppProfileRequest") proto.RegisterType((*DeleteAppProfileRequest)(nil), "google.bigtable.admin.v2.DeleteAppProfileRequest") + proto.RegisterType((*UpdateAppProfileMetadata)(nil), "google.bigtable.admin.v2.UpdateAppProfileMetadata") } // Reference imports to suppress errors if they are not otherwise used. @@ -1617,103 +1633,103 @@ func init() { } var fileDescriptor0 = []byte{ - // 1559 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0xcd, 0x6f, 0xdc, 0x44, - 0x1b, 0xd7, 0x6c, 0xfa, 0xf6, 0x6d, 0x9e, 0xcd, 0xd7, 0x3b, 0x6f, 0xf3, 0x21, 0xd3, 0x8f, 0xd4, - 0xad, 0xda, 0x74, 0x1b, 0x6c, 0xb2, 0xa0, 0xb6, 0x4a, 0x48, 0x45, 0x9b, 0x96, 0x28, 0x28, 0x55, - 0xa3, 0x6d, 0x29, 0x6a, 0x15, 0xb1, 0x9a, 0x64, 0x27, 0x8b, 0x89, 0xd7, 0x36, 0xb6, 0x37, 0x50, - 0xa1, 0x5e, 0x10, 0x42, 0xa8, 0x12, 0x1c, 0x40, 0xe2, 0x52, 0xc1, 0x85, 0x0b, 0xaa, 0x10, 0x88, - 0x0b, 0x37, 0xae, 0x20, 0xc1, 0x91, 0xbf, 0x00, 0x89, 0x33, 0xe2, 0xc6, 0x15, 0xcd, 0x97, 0xd7, - 0xf6, 0xda, 0x6b, 0xa7, 0x55, 0xa5, 0x9e, 0xba, 0x9e, 0x79, 0xe6, 0x99, 0xdf, 0xf3, 0x3c, 0xbf, - 0x67, 0xe6, 0x37, 0x29, 0x9c, 0x6f, 0xbb, 0x6e, 0xdb, 0xa6, 0xe6, 0x96, 0xd5, 0x0e, 0xc9, 0x96, - 0x4d, 0x4d, 0xd2, 0xea, 0x58, 0x8e, 0xb9, 0x57, 0x8f, 0x46, 0x9a, 0x96, 0x13, 0x84, 0xc4, 0xd9, - 0xa6, 0x4d, 0x3e, 0x65, 0x78, 0xbe, 0x1b, 0xba, 0x78, 0x46, 0xac, 0x33, 0x94, 0x95, 0x21, 0x26, - 0xf7, 0xea, 0xda, 0x11, 0xe9, 0x91, 0x78, 0x96, 0x49, 0x1c, 0xc7, 0x0d, 0x49, 0x68, 0xb9, 0x4e, - 0x20, 0xd6, 0x69, 0x67, 0x72, 0xf7, 0x53, 0xdb, 0x48, 0xc3, 0x63, 0xd2, 0xd0, 0x22, 0x1d, 0x73, - 0x6f, 0x81, 0xfd, 0xd3, 0xf4, 0x5c, 0xdb, 0xda, 0xbe, 0x27, 0xe7, 0xb5, 0xe4, 0x7c, 0x62, 0xee, - 0xa4, 0x9c, 0xb3, 0x5d, 0xa7, 0xed, 0x77, 0x1d, 0xc7, 0x72, 0xda, 0xa6, 0xeb, 0x51, 0x3f, 0x81, - 0xe4, 0x39, 0x69, 0xc4, 0xbf, 0xb6, 0xba, 0x3b, 0x26, 0xed, 0x78, 0xa1, 0xf2, 0x30, 0x9b, 0x9e, - 0xdc, 0xb1, 0xa8, 0xdd, 0x6a, 0x76, 0x48, 0xb0, 0x2b, 0x2d, 0x8e, 0xa7, 0x2d, 0x42, 0xab, 0x43, - 0x83, 0x90, 0x74, 0x3c, 0x61, 0xa0, 0xff, 0x56, 0x81, 0xc9, 0x15, 0x9f, 0x92, 0x90, 0xae, 0xc9, - 0xc8, 0x1a, 0xf4, 0x9d, 0x2e, 0x0d, 0x42, 0x3c, 0x05, 0x07, 0x3d, 0xe2, 0x53, 0x27, 0x9c, 0x41, - 0xb3, 0x68, 0x6e, 0xb8, 0x21, 0xbf, 0xf0, 0x71, 0xa8, 0x46, 0xb9, 0xb6, 0x5a, 0x33, 0x15, 0x3e, - 0x09, 0x6a, 0x68, 0xad, 0x85, 0x2f, 0xc1, 0x21, 0xf5, 0x35, 0x33, 0x34, 0x8b, 0xe6, 0xaa, 0x75, - 0xdd, 0xc8, 0xab, 0x83, 0x11, 0xed, 0x1a, 0xad, 0xc1, 0x77, 0xe0, 0xd0, 0xb6, 0xdd, 0x0d, 0x42, - 0xea, 0x07, 0x33, 0x07, 0x66, 0x87, 0xe6, 0xaa, 0xf5, 0xe5, 0xfc, 0xf5, 0x99, 0xd8, 0x8d, 0x15, - 0xb9, 0xfe, 0x9a, 0x13, 0xfa, 0xf7, 0x1a, 0x91, 0x3b, 0xed, 0x4d, 0x18, 0x4d, 0x4c, 0xe1, 0x09, - 0x18, 0xda, 0xa5, 0xf7, 0x64, 0x84, 0xec, 0x27, 0xbe, 0x00, 0xff, 0xd9, 0x23, 0x76, 0x97, 0xf2, - 0xc0, 0xaa, 0xf5, 0x13, 0x03, 0xb6, 0x16, 0x9e, 0x1a, 0xc2, 0x7e, 0xb1, 0x72, 0x11, 0xe9, 0x73, - 0x80, 0x57, 0x69, 0x98, 0xce, 0x24, 0x86, 0x03, 0x0e, 0xe9, 0x50, 0xb9, 0x0b, 0xff, 0xad, 0x5f, - 0x87, 0xc3, 0xeb, 0x56, 0x10, 0x99, 0x06, 0x45, 0x59, 0x3f, 0x0a, 0xe0, 0x91, 0x36, 0x6d, 0x86, - 0xee, 0x2e, 0x75, 0x64, 0xd2, 0x87, 0xd9, 0xc8, 0x2d, 0x36, 0xa0, 0x7f, 0x8b, 0x60, 0x32, 0xe5, - 0x2f, 0xf0, 0x5c, 0x27, 0xa0, 0xf8, 0x15, 0x18, 0x56, 0x99, 0x0d, 0x66, 0x10, 0x4f, 0x67, 0x99, - 0x72, 0xf4, 0x16, 0xe1, 0xb3, 0x30, 0xb1, 0x43, 0x2c, 0x9b, 0xb6, 0x9a, 0xb6, 0xbb, 0x2d, 0xc8, - 0x39, 0x53, 0x99, 0x1d, 0x9a, 0x1b, 0x6e, 0x8c, 0x8b, 0xf1, 0x75, 0x35, 0x8c, 0x4f, 0xc3, 0xb8, - 0x43, 0xdf, 0x0b, 0x9b, 0x31, 0xa8, 0x43, 0x1c, 0xea, 0x28, 0x1b, 0xde, 0x88, 0xe0, 0x3e, 0x44, - 0x70, 0x64, 0x83, 0xf8, 0xa1, 0x45, 0xec, 0xd7, 0xbd, 0x56, 0x06, 0xf9, 0xe2, 0x1c, 0x42, 0x8f, - 0xc1, 0xa1, 0x25, 0xa8, 0x76, 0xb9, 0x63, 0xde, 0x0c, 0xb2, 0x96, 0x9a, 0x72, 0xa1, 0xba, 0xc1, - 0x78, 0x95, 0xf5, 0xcb, 0x75, 0x12, 0xec, 0x36, 0x40, 0x98, 0xb3, 0xdf, 0xfa, 0x39, 0x98, 0xbc, - 0x4a, 0x6d, 0xda, 0x8f, 0x2a, 0xab, 0x90, 0x0f, 0x10, 0x1c, 0x16, 0x24, 0x54, 0x7c, 0x28, 0xae, - 0xa4, 0xe4, 0x63, 0xaf, 0x7d, 0x86, 0xe5, 0xc8, 0x5a, 0x0b, 0x2f, 0xc1, 0x7f, 0xe5, 0x87, 0x6c, - 0x9e, 0x12, 0x0c, 0x54, 0x2b, 0xf4, 0x33, 0xf0, 0xbf, 0x55, 0x1a, 0xa6, 0x80, 0x64, 0xa1, 0x5e, - 0x87, 0xff, 0x33, 0xba, 0xa8, 0x66, 0x78, 0x42, 0xf6, 0x7d, 0x83, 0x04, 0x9b, 0x7b, 0xee, 0x24, - 0xf9, 0x96, 0x63, 0xad, 0x2c, 0xb8, 0x57, 0x22, 0x9a, 0x68, 0xc9, 0xd3, 0x60, 0x5e, 0x0d, 0x0e, - 0x8b, 0xda, 0x96, 0x48, 0xd2, 0xdf, 0x08, 0xa6, 0x92, 0xe7, 0xcb, 0x75, 0x1a, 0x92, 0x16, 0x09, - 0x09, 0xbe, 0x0b, 0x13, 0xae, 0x6f, 0xb5, 0x2d, 0x87, 0xd8, 0x4d, 0x5f, 0xb8, 0x90, 0x3c, 0x35, - 0xf7, 0x79, 0x56, 0x35, 0xc6, 0x95, 0x23, 0x05, 0x65, 0x19, 0x46, 0xa4, 0xcb, 0x26, 0x3b, 0xad, - 0x73, 0xc9, 0x7b, 0x4b, 0x1d, 0xe5, 0x8d, 0xaa, 0xb4, 0x67, 0x23, 0x8c, 0xfa, 0x3b, 0x96, 0x63, - 0x05, 0x6f, 0x89, 0xd5, 0x43, 0x85, 0xab, 0x41, 0x98, 0xb3, 0x01, 0xfd, 0x1f, 0x04, 0x53, 0xc9, - 0x8e, 0x8c, 0x42, 0x26, 0xb9, 0x21, 0x9f, 0xcf, 0x0f, 0x79, 0x50, 0x93, 0x3f, 0x5b, 0x91, 0xff, - 0x85, 0xd4, 0x45, 0x28, 0x99, 0x11, 0x05, 0x7e, 0x27, 0x37, 0x70, 0xa3, 0xa8, 0xd6, 0x49, 0x92, - 0x3d, 0x5b, 0x01, 0xff, 0x81, 0x60, 0x52, 0xd4, 0x25, 0x1d, 0xf0, 0x7a, 0x6e, 0xc0, 0x25, 0xba, - 0xf7, 0x99, 0x8a, 0xf1, 0x17, 0x04, 0xd3, 0xa2, 0x12, 0x97, 0x3d, 0x6f, 0xc3, 0x77, 0x77, 0x2c, - 0xbb, 0x50, 0xdf, 0x9c, 0x82, 0x31, 0xe2, 0x79, 0x4d, 0x4f, 0x58, 0xf7, 0xce, 0xe8, 0x11, 0x12, - 0xb9, 0x58, 0x6b, 0xe1, 0x6b, 0x50, 0x8d, 0x59, 0x49, 0x58, 0xa7, 0xf2, 0xd3, 0x13, 0xdb, 0x1f, - 0x7a, 0x8e, 0xf0, 0x19, 0x18, 0xb7, 0xda, 0x8e, 0xeb, 0xd3, 0xe6, 0xbb, 0xc4, 0x67, 0x0a, 0x90, - 0x49, 0x1e, 0x34, 0x77, 0xa8, 0x31, 0x26, 0x86, 0xdf, 0x90, 0xa3, 0xec, 0xdc, 0x5a, 0xa5, 0x61, - 0x7f, 0x14, 0x59, 0xe7, 0xd6, 0x0d, 0x98, 0x62, 0xa7, 0x71, 0xcf, 0xf8, 0x49, 0xcf, 0xf7, 0x07, - 0x08, 0xa6, 0xfb, 0x3c, 0xca, 0x23, 0x7e, 0x15, 0x46, 0x62, 0x89, 0x50, 0xc7, 0x7c, 0xb9, 0x4c, - 0x54, 0x7b, 0x99, 0xc8, 0x3c, 0xc1, 0x2b, 0x59, 0x27, 0xf8, 0xcf, 0x08, 0xa6, 0x05, 0x6f, 0xfb, - 0xb3, 0x91, 0xaa, 0x0a, 0x7a, 0xcc, 0xaa, 0x3c, 0x89, 0x7a, 0xc8, 0x2a, 0xe9, 0x50, 0x66, 0x49, - 0x6f, 0xc3, 0xb4, 0xb8, 0x8a, 0x4a, 0x55, 0x35, 0xcb, 0x6f, 0x25, 0xcb, 0x6f, 0xfd, 0xd1, 0x34, - 0x4c, 0x5e, 0x91, 0xa1, 0xaa, 0x23, 0xf7, 0x32, 0x8b, 0x18, 0x7f, 0x8a, 0x60, 0x2c, 0x79, 0x09, - 0xe1, 0xfd, 0x5e, 0x57, 0xda, 0x51, 0xb5, 0x20, 0xf6, 0x6c, 0x31, 0x6e, 0xa8, 0x67, 0x8b, 0x3e, - 0xff, 0xc1, 0xef, 0x7f, 0x7e, 0x5e, 0x39, 0xad, 0x9f, 0x60, 0x8f, 0xa5, 0xf7, 0x05, 0xbd, 0x96, - 0x3d, 0xdf, 0x7d, 0x9b, 0x6e, 0x87, 0x81, 0x59, 0xbb, 0x1f, 0x3d, 0xa0, 0x82, 0x45, 0x54, 0xc3, - 0x0f, 0x10, 0x54, 0x63, 0x82, 0x19, 0xcf, 0xe7, 0xa3, 0xe9, 0xd7, 0xd5, 0x5a, 0x09, 0x49, 0xa8, - 0x9f, 0xe5, 0x78, 0x4e, 0x62, 0x81, 0x87, 0x25, 0x32, 0x86, 0xa6, 0x07, 0xc6, 0xac, 0xdd, 0xc7, - 0x0f, 0x11, 0x8c, 0x26, 0x34, 0x34, 0x1e, 0x70, 0xbe, 0x67, 0x89, 0x77, 0xcd, 0x2c, 0x6d, 0x2f, - 0x9a, 0x27, 0x85, 0x6e, 0x50, 0xb6, 0xf0, 0x47, 0x08, 0xc6, 0x92, 0xd7, 0x28, 0x2e, 0x11, 0x7f, - 0xa9, 0x1c, 0xc9, 0x9a, 0x69, 0xc5, 0x39, 0x62, 0x35, 0x63, 0x4f, 0x8d, 0xcc, 0x6b, 0x1d, 0x3f, - 0xa6, 0x0e, 0x28, 0xa2, 0xd4, 0xcb, 0x1c, 0xde, 0xf9, 0x7a, 0x8d, 0xc3, 0x8b, 0x1e, 0xe0, 0x03, - 0x71, 0xf6, 0x5e, 0x02, 0x1f, 0x22, 0x18, 0x4b, 0xaa, 0xf9, 0x41, 0x9c, 0xcf, 0xd4, 0xfd, 0xda, - 0x54, 0x5f, 0xeb, 0x5f, 0x63, 0xaf, 0x70, 0x55, 0xbe, 0x5a, 0x09, 0x72, 0x7d, 0x89, 0x60, 0x34, - 0xa1, 0x09, 0xf0, 0x3e, 0xc5, 0x43, 0x51, 0x96, 0x96, 0x39, 0x96, 0x0b, 0xfa, 0x7c, 0x36, 0x95, - 0x12, 0x68, 0x4c, 0xa5, 0xb0, 0x17, 0xd5, 0xcb, 0x01, 0x7f, 0x86, 0x00, 0x7a, 0x4f, 0x07, 0x7c, - 0x6e, 0x60, 0x23, 0xa6, 0x90, 0x15, 0xab, 0x02, 0xfd, 0x25, 0x8e, 0xce, 0xc0, 0xf3, 0x45, 0x99, - 0x8a, 0xa0, 0xb1, 0xa4, 0x7d, 0x8d, 0x60, 0x24, 0xfe, 0xae, 0xc0, 0xcf, 0x0f, 0x6e, 0xb0, 0xd4, - 0x73, 0x46, 0x33, 0xca, 0x9a, 0xcb, 0x76, 0x4c, 0xa2, 0x2c, 0x99, 0x43, 0x76, 0x88, 0x8d, 0x26, - 0x84, 0x14, 0x2e, 0x4e, 0x48, 0x51, 0x35, 0x2f, 0x70, 0x24, 0x0b, 0xda, 0xbe, 0xf2, 0xc5, 0xba, - 0xf3, 0x13, 0x04, 0xa3, 0x89, 0x07, 0xce, 0x20, 0x9e, 0x65, 0xbd, 0x84, 0x72, 0xc9, 0x2e, 0x93, - 0x53, 0xdb, 0x5f, 0x09, 0x7f, 0x40, 0x30, 0x91, 0x56, 0x60, 0x78, 0xa1, 0x88, 0xfa, 0x7d, 0x37, - 0xa2, 0x56, 0xea, 0x12, 0xd7, 0xaf, 0x72, 0x8c, 0x97, 0x74, 0xb3, 0x4c, 0x01, 0x63, 0xe2, 0x63, - 0x31, 0x2e, 0x1b, 0xf0, 0x57, 0x08, 0x46, 0x13, 0x62, 0x6b, 0x50, 0x0e, 0xb3, 0x54, 0x59, 0x49, - 0xb4, 0xb2, 0xc8, 0xd8, 0x2c, 0xcc, 0x68, 0x0c, 0x2a, 0x4b, 0xea, 0xf7, 0x08, 0xc6, 0x53, 0x7a, - 0x0c, 0xbf, 0x30, 0x98, 0xeb, 0xfd, 0x62, 0x50, 0x5b, 0xd8, 0xc7, 0x0a, 0xd9, 0x20, 0x49, 0xc4, - 0xe5, 0xf3, 0x8b, 0x7f, 0x44, 0x30, 0x91, 0x16, 0x6d, 0x83, 0x68, 0x90, 0x23, 0xf0, 0x8a, 0xda, - 0x66, 0x83, 0xe3, 0x7b, 0xad, 0xbe, 0xc4, 0xf1, 0xc5, 0x6a, 0x6a, 0x94, 0xcf, 0x6e, 0x92, 0x0b, - 0x5f, 0x20, 0x98, 0x48, 0xab, 0xb4, 0x41, 0xc0, 0x73, 0x14, 0x5d, 0x6e, 0x57, 0xc9, 0x8c, 0xd6, - 0xf6, 0xcd, 0x81, 0x8f, 0x11, 0x8c, 0x30, 0x4d, 0x44, 0x3a, 0x1b, 0xfc, 0x8f, 0xca, 0x3d, 0x35, - 0x60, 0x91, 0x8e, 0xb1, 0xb7, 0x60, 0xc4, 0x27, 0x15, 0x8a, 0xc9, 0x94, 0x8d, 0x98, 0x8d, 0xee, - 0x8e, 0x3a, 0x07, 0xe1, 0xd3, 0xc0, 0xed, 0xfa, 0xdb, 0xf9, 0x97, 0x6b, 0x3b, 0xe6, 0x99, 0x9d, - 0x39, 0x0c, 0xca, 0xcd, 0x41, 0x50, 0x6e, 0x3e, 0x35, 0x28, 0x41, 0x0a, 0xca, 0x77, 0x08, 0xf0, - 0x2d, 0x1a, 0xf0, 0x41, 0xea, 0x77, 0xac, 0x20, 0xe0, 0x7f, 0x1d, 0x9a, 0x4b, 0x6d, 0xd6, 0x6f, - 0xa2, 0x60, 0x9d, 0x2d, 0x61, 0x29, 0x9b, 0x61, 0x85, 0x43, 0x5d, 0xd6, 0x2f, 0x96, 0x83, 0x1a, - 0xf6, 0x79, 0x5a, 0x44, 0xb5, 0x2b, 0x3f, 0x21, 0x38, 0xb2, 0xed, 0x76, 0x72, 0x09, 0x75, 0x45, - 0xcb, 0x94, 0xf2, 0x1b, 0x8c, 0x45, 0x1b, 0xe8, 0xee, 0xb2, 0x5c, 0xd7, 0x76, 0x6d, 0xe2, 0xb4, - 0x0d, 0xd7, 0x6f, 0x9b, 0x6d, 0xea, 0x70, 0x8e, 0x99, 0x62, 0x8a, 0x78, 0x56, 0xd0, 0xff, 0xff, - 0x18, 0x4b, 0xfc, 0xc7, 0xa3, 0xca, 0xb1, 0x55, 0xb1, 0x7e, 0xc5, 0x76, 0xbb, 0x2d, 0x43, 0x6d, - 0x65, 0xf0, 0x3d, 0x8c, 0xdb, 0xf5, 0x5f, 0x95, 0xc1, 0x26, 0x37, 0xd8, 0x54, 0x06, 0x9b, 0xdc, - 0x60, 0xf3, 0x76, 0x7d, 0xeb, 0x20, 0xdf, 0xeb, 0xc5, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x2f, - 0xdb, 0x53, 0xda, 0xa2, 0x19, 0x00, 0x00, + // 1566 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0xcf, 0x6f, 0xdc, 0xc4, + 0x17, 0xd7, 0x6c, 0xfa, 0xed, 0xb7, 0x79, 0x9b, 0x5f, 0xdf, 0xf9, 0x36, 0xc9, 0xca, 0xf4, 0x47, + 0xea, 0x56, 0x6d, 0xba, 0x0d, 0x36, 0x59, 0x50, 0x5b, 0x25, 0xa4, 0xa2, 0x4d, 0x4b, 0x14, 0x94, + 0xaa, 0xd1, 0xb6, 0x14, 0xb5, 0x8a, 0x58, 0x4d, 0xb2, 0x93, 0xc5, 0xc4, 0x6b, 0x1b, 0xdb, 0x1b, + 0xa8, 0x50, 0x2f, 0x08, 0x21, 0x54, 0x09, 0x0e, 0x20, 0x71, 0xa9, 0xe0, 0xc2, 0x05, 0x55, 0x08, + 0xc4, 0x85, 0x1b, 0x57, 0x90, 0xe0, 0xc8, 0x5f, 0x80, 0xc4, 0x19, 0x71, 0xe3, 0x8a, 0x66, 0x3c, + 0xe3, 0xb5, 0xbd, 0xfe, 0xb5, 0xad, 0x2a, 0xf5, 0xd4, 0xf5, 0xcc, 0x9b, 0x37, 0x9f, 0xf9, 0xbc, + 0xcf, 0x7b, 0xf3, 0x26, 0x85, 0xf3, 0x1d, 0xdb, 0xee, 0x98, 0x54, 0xdf, 0x36, 0x3a, 0x3e, 0xd9, + 0x36, 0xa9, 0x4e, 0xda, 0x5d, 0xc3, 0xd2, 0xf7, 0x1b, 0xe1, 0x48, 0xcb, 0xb0, 0x3c, 0x9f, 0x58, + 0x3b, 0xb4, 0xc5, 0xa7, 0x34, 0xc7, 0xb5, 0x7d, 0x1b, 0xd7, 0x82, 0x75, 0x9a, 0xb4, 0xd2, 0x82, + 0xc9, 0xfd, 0x86, 0x72, 0x44, 0x78, 0x24, 0x8e, 0xa1, 0x13, 0xcb, 0xb2, 0x7d, 0xe2, 0x1b, 0xb6, + 0xe5, 0x05, 0xeb, 0x94, 0x33, 0x99, 0xfb, 0xc9, 0x6d, 0x84, 0xe1, 0x31, 0x61, 0x68, 0x90, 0xae, + 0xbe, 0xbf, 0xc8, 0xfe, 0x69, 0x39, 0xb6, 0x69, 0xec, 0xdc, 0x13, 0xf3, 0x4a, 0x7c, 0x3e, 0x36, + 0x77, 0x52, 0xcc, 0x99, 0xb6, 0xd5, 0x71, 0x7b, 0x96, 0x65, 0x58, 0x1d, 0xdd, 0x76, 0xa8, 0x1b, + 0x43, 0xf2, 0x9c, 0x30, 0xe2, 0x5f, 0xdb, 0xbd, 0x5d, 0x9d, 0x76, 0x1d, 0x5f, 0x7a, 0x98, 0x4b, + 0x4e, 0xee, 0x1a, 0xd4, 0x6c, 0xb7, 0xba, 0xc4, 0xdb, 0x13, 0x16, 0xc7, 0x93, 0x16, 0xbe, 0xd1, + 0xa5, 0x9e, 0x4f, 0xba, 0x4e, 0x60, 0xa0, 0xfe, 0x56, 0x81, 0xe9, 0x55, 0x97, 0x12, 0x9f, 0xae, + 0x8b, 0x93, 0x35, 0xe9, 0x3b, 0x3d, 0xea, 0xf9, 0x78, 0x06, 0x0e, 0x3a, 0xc4, 0xa5, 0x96, 0x5f, + 0x43, 0x73, 0x68, 0x7e, 0xb4, 0x29, 0xbe, 0xf0, 0x71, 0xa8, 0x86, 0x5c, 0x1b, 0xed, 0x5a, 0x85, + 0x4f, 0x82, 0x1c, 0x5a, 0x6f, 0xe3, 0x4b, 0x70, 0x48, 0x7e, 0xd5, 0x46, 0xe6, 0xd0, 0x7c, 0xb5, + 0xa1, 0x6a, 0x59, 0x71, 0xd0, 0xc2, 0x5d, 0xc3, 0x35, 0xf8, 0x0e, 0x1c, 0xda, 0x31, 0x7b, 0x9e, + 0x4f, 0x5d, 0xaf, 0x76, 0x60, 0x6e, 0x64, 0xbe, 0xda, 0x58, 0xc9, 0x5e, 0x9f, 0x8a, 0x5d, 0x5b, + 0x15, 0xeb, 0xaf, 0x59, 0xbe, 0x7b, 0xaf, 0x19, 0xba, 0x53, 0xde, 0x84, 0xf1, 0xd8, 0x14, 0x9e, + 0x82, 0x91, 0x3d, 0x7a, 0x4f, 0x9c, 0x90, 0xfd, 0xc4, 0x17, 0xe0, 0x3f, 0xfb, 0xc4, 0xec, 0x51, + 0x7e, 0xb0, 0x6a, 0xe3, 0x44, 0xce, 0xd6, 0x81, 0xa7, 0x66, 0x60, 0xbf, 0x54, 0xb9, 0x88, 0xd4, + 0x79, 0xc0, 0x6b, 0xd4, 0x4f, 0x32, 0x89, 0xe1, 0x80, 0x45, 0xba, 0x54, 0xec, 0xc2, 0x7f, 0xab, + 0xd7, 0xe1, 0xf0, 0x86, 0xe1, 0x85, 0xa6, 0x5e, 0x11, 0xeb, 0x47, 0x01, 0x1c, 0xd2, 0xa1, 0x2d, + 0xdf, 0xde, 0xa3, 0x96, 0x20, 0x7d, 0x94, 0x8d, 0xdc, 0x62, 0x03, 0xea, 0xb7, 0x08, 0xa6, 0x13, + 0xfe, 0x3c, 0xc7, 0xb6, 0x3c, 0x8a, 0x5f, 0x81, 0x51, 0xc9, 0xac, 0x57, 0x43, 0x9c, 0xce, 0x32, + 0xe1, 0xe8, 0x2f, 0xc2, 0x67, 0x61, 0x6a, 0x97, 0x18, 0x26, 0x6d, 0xb7, 0x4c, 0x7b, 0x27, 0x10, + 0x67, 0xad, 0x32, 0x37, 0x32, 0x3f, 0xda, 0x9c, 0x0c, 0xc6, 0x37, 0xe4, 0x30, 0x3e, 0x0d, 0x93, + 0x16, 0x7d, 0xcf, 0x6f, 0x45, 0xa0, 0x8e, 0x70, 0xa8, 0xe3, 0x6c, 0x78, 0x33, 0x84, 0xfb, 0x10, + 0xc1, 0x91, 0x4d, 0xe2, 0xfa, 0x06, 0x31, 0x5f, 0x77, 0xda, 0x29, 0xe2, 0x8b, 0x6a, 0x08, 0x3d, + 0x86, 0x86, 0x96, 0xa1, 0xda, 0xe3, 0x8e, 0x79, 0x32, 0x88, 0x58, 0x2a, 0xd2, 0x85, 0xcc, 0x06, + 0xed, 0x55, 0x96, 0x2f, 0xd7, 0x89, 0xb7, 0xd7, 0x84, 0xc0, 0x9c, 0xfd, 0x56, 0xcf, 0xc1, 0xf4, + 0x55, 0x6a, 0xd2, 0x41, 0x54, 0x69, 0x81, 0x7c, 0x80, 0xe0, 0x70, 0x20, 0x42, 0xa9, 0x87, 0xe2, + 0x48, 0x0a, 0x3d, 0xf6, 0xd3, 0x67, 0x54, 0x8c, 0xac, 0xb7, 0xf1, 0x32, 0xfc, 0x57, 0x7c, 0x88, + 0xe4, 0x29, 0xa1, 0x40, 0xb9, 0x42, 0x3d, 0x03, 0xff, 0x5b, 0xa3, 0x7e, 0x02, 0x48, 0x1a, 0xea, + 0x0d, 0xf8, 0x3f, 0x93, 0x8b, 0x4c, 0x86, 0x27, 0x54, 0xdf, 0x37, 0x28, 0x50, 0x73, 0xdf, 0x9d, + 0x10, 0xdf, 0x4a, 0x24, 0x95, 0x03, 0xed, 0x95, 0x38, 0x4d, 0xb8, 0xe4, 0x69, 0x28, 0xaf, 0x0e, + 0x87, 0x83, 0xd8, 0x96, 0x20, 0xe9, 0x6f, 0x04, 0x33, 0xf1, 0xfa, 0x72, 0x9d, 0xfa, 0xa4, 0x4d, + 0x7c, 0x82, 0xef, 0xc2, 0x94, 0xed, 0x1a, 0x1d, 0xc3, 0x22, 0x66, 0xcb, 0x0d, 0x5c, 0x08, 0x9d, + 0xea, 0x43, 0xd6, 0xaa, 0xe6, 0xa4, 0x74, 0x24, 0xa1, 0xac, 0xc0, 0x98, 0x70, 0xd9, 0x62, 0xd5, + 0x3a, 0x53, 0xbc, 0xb7, 0x64, 0x29, 0x6f, 0x56, 0x85, 0x3d, 0x1b, 0x61, 0xd2, 0xdf, 0x35, 0x2c, + 0xc3, 0x7b, 0x2b, 0x58, 0x3d, 0x52, 0xb8, 0x1a, 0x02, 0x73, 0x36, 0xa0, 0xfe, 0x83, 0x60, 0x26, + 0x9e, 0x91, 0xe1, 0x91, 0x49, 0xe6, 0x91, 0xcf, 0x67, 0x1f, 0x39, 0x2f, 0xc9, 0x9f, 0xad, 0x93, + 0xff, 0x85, 0xe4, 0x45, 0x28, 0x94, 0x11, 0x1e, 0xfc, 0x4e, 0xe6, 0xc1, 0xb5, 0xa2, 0x58, 0xc7, + 0x45, 0xf6, 0x6c, 0x1d, 0xf8, 0x0f, 0x04, 0xd3, 0x41, 0x5c, 0x92, 0x07, 0xde, 0xc8, 0x3c, 0x70, + 0x89, 0xec, 0x7d, 0xa6, 0xce, 0xf8, 0x0b, 0x82, 0xd9, 0x20, 0x12, 0x97, 0x1d, 0x67, 0xd3, 0xb5, + 0x77, 0x0d, 0xb3, 0xb0, 0xbf, 0x39, 0x05, 0x13, 0xc4, 0x71, 0x5a, 0x4e, 0x60, 0xdd, 0xaf, 0xd1, + 0x63, 0x24, 0x74, 0xb1, 0xde, 0xc6, 0xd7, 0xa0, 0x1a, 0xb1, 0x12, 0xb0, 0x4e, 0x65, 0xd3, 0x13, + 0xd9, 0x1f, 0xfa, 0x8e, 0xf0, 0x19, 0x98, 0x34, 0x3a, 0x96, 0xed, 0xd2, 0xd6, 0xbb, 0xc4, 0x65, + 0x1d, 0x20, 0x6b, 0x79, 0xd0, 0xfc, 0xa1, 0xe6, 0x44, 0x30, 0xfc, 0x86, 0x18, 0x65, 0x75, 0x6b, + 0x8d, 0xfa, 0x83, 0xa7, 0x48, 0xab, 0x5b, 0x37, 0x60, 0x86, 0x55, 0xe3, 0xbe, 0xf1, 0x93, 0xd6, + 0xf7, 0x07, 0x08, 0x66, 0x07, 0x3c, 0x8a, 0x12, 0xbf, 0x06, 0x63, 0x11, 0x22, 0x64, 0x99, 0x2f, + 0xc7, 0x44, 0xb5, 0xcf, 0x44, 0x6a, 0x05, 0xaf, 0xa4, 0x55, 0xf0, 0x9f, 0x11, 0xcc, 0x06, 0xba, + 0x1d, 0x64, 0x23, 0x11, 0x15, 0xf4, 0x98, 0x51, 0x79, 0x92, 0xee, 0x21, 0x2d, 0xa4, 0x23, 0xa9, + 0x21, 0xbd, 0x0d, 0xb3, 0xc1, 0x55, 0x54, 0x2a, 0xaa, 0x69, 0x7e, 0x2b, 0xa9, 0x7e, 0x15, 0xa8, + 0x25, 0xf9, 0x91, 0xa9, 0xdd, 0x78, 0x34, 0x0b, 0xd3, 0x57, 0x04, 0x0d, 0xb2, 0x1c, 0x5f, 0x66, + 0x6c, 0xe0, 0x4f, 0x11, 0x4c, 0xc4, 0x2f, 0x28, 0x3c, 0xec, 0x55, 0xa6, 0x1c, 0x95, 0x0b, 0x22, + 0x4f, 0x1a, 0xed, 0x86, 0x7c, 0xd2, 0xa8, 0x0b, 0x1f, 0xfc, 0xfe, 0xe7, 0xe7, 0x95, 0xd3, 0xea, + 0x09, 0xf6, 0x90, 0x7a, 0x3f, 0x90, 0xde, 0x8a, 0xe3, 0xda, 0x6f, 0xd3, 0x1d, 0xdf, 0xd3, 0xeb, + 0xf7, 0xc3, 0xc7, 0x95, 0xb7, 0x84, 0xea, 0xf8, 0x01, 0x82, 0x6a, 0xa4, 0x99, 0xc6, 0x0b, 0xd9, + 0x68, 0x06, 0x7b, 0x6e, 0xa5, 0x44, 0xbb, 0xa8, 0x9e, 0xe5, 0x78, 0x4e, 0xe2, 0x00, 0x0f, 0x23, + 0x39, 0x82, 0xa6, 0x0f, 0x46, 0xaf, 0xdf, 0xc7, 0x0f, 0x11, 0x8c, 0xc7, 0xfa, 0x6b, 0x9c, 0x53, + 0xfb, 0xd3, 0x1a, 0x7b, 0x45, 0x2f, 0x6d, 0x1f, 0x24, 0x56, 0x02, 0x5d, 0x1e, 0x5b, 0xf8, 0x23, + 0x04, 0x13, 0xf1, 0x2b, 0x16, 0x97, 0x38, 0x7f, 0x29, 0x8e, 0x44, 0xcc, 0x94, 0x62, 0x8e, 0x58, + 0xcc, 0xd8, 0x33, 0x24, 0xf5, 0xca, 0xc7, 0x8f, 0xd9, 0x23, 0x14, 0x49, 0xea, 0x65, 0x0e, 0xef, + 0x7c, 0xa3, 0xce, 0xe1, 0x85, 0x8f, 0xf3, 0x5c, 0x9c, 0xfd, 0x57, 0xc2, 0x87, 0x08, 0x26, 0xe2, + 0x9d, 0x7e, 0x9e, 0xe6, 0x53, 0xdf, 0x04, 0xca, 0xcc, 0x40, 0x59, 0xb8, 0xc6, 0x5e, 0xe8, 0x32, + 0x7c, 0xf5, 0x12, 0xe2, 0xfa, 0x12, 0xc1, 0x78, 0xac, 0x5f, 0xc0, 0x43, 0x36, 0x16, 0x45, 0x2c, + 0xad, 0x70, 0x2c, 0x17, 0xd4, 0x85, 0x74, 0x29, 0xc5, 0xd0, 0xe8, 0xb2, 0xfb, 0x5e, 0x92, 0xaf, + 0x0a, 0xfc, 0x19, 0x02, 0xe8, 0x3f, 0x2b, 0xf0, 0xb9, 0xdc, 0x44, 0x4c, 0x20, 0x2b, 0xee, 0x18, + 0xd4, 0x97, 0x38, 0x3a, 0x0d, 0x2f, 0x14, 0x31, 0x15, 0x42, 0x63, 0xa4, 0x7d, 0x8d, 0x60, 0x2c, + 0xfa, 0xe6, 0xc0, 0xcf, 0xe7, 0x27, 0x58, 0xe2, 0xa9, 0xa3, 0x68, 0x65, 0xcd, 0x45, 0x3a, 0xc6, + 0x51, 0x96, 0xe4, 0x90, 0x15, 0xb1, 0xf1, 0x58, 0x93, 0x85, 0x8b, 0x09, 0x29, 0x8a, 0xe6, 0x05, + 0x8e, 0x64, 0x51, 0x19, 0x8a, 0x2f, 0x96, 0x9d, 0x9f, 0x20, 0x18, 0x8f, 0x3d, 0x7e, 0xf2, 0x74, + 0x96, 0xf6, 0x4a, 0xca, 0x14, 0xbb, 0x20, 0xa7, 0x3e, 0x5c, 0x08, 0x7f, 0x40, 0x30, 0x95, 0xec, + 0xce, 0xf0, 0x62, 0x91, 0xf4, 0x07, 0x6e, 0x4b, 0xa5, 0xd4, 0x05, 0xaf, 0x5e, 0xe5, 0x18, 0x2f, + 0xa9, 0x7a, 0x99, 0x00, 0x46, 0x1a, 0x93, 0xa5, 0x68, 0x4b, 0x81, 0xbf, 0x42, 0x30, 0x1e, 0x6b, + 0xc4, 0xf2, 0x38, 0x4c, 0xeb, 0xd8, 0x4a, 0xa2, 0x15, 0x41, 0xc6, 0x7a, 0x21, 0xa3, 0x11, 0xa8, + 0x8c, 0xd4, 0xef, 0x11, 0x4c, 0x26, 0x7a, 0x35, 0xfc, 0x42, 0xbe, 0xd6, 0x07, 0x1b, 0x45, 0x65, + 0x71, 0x88, 0x15, 0x22, 0x41, 0xe2, 0x88, 0xcb, 0xf3, 0x8b, 0x7f, 0x44, 0x30, 0x95, 0x6c, 0x58, + 0xf2, 0x64, 0x90, 0xd1, 0xfc, 0x15, 0xa5, 0xcd, 0x26, 0xc7, 0xf7, 0x5a, 0x63, 0x99, 0xe3, 0x8b, + 0xc4, 0x54, 0x2b, 0xcf, 0x6e, 0x5c, 0x0b, 0x5f, 0x20, 0x98, 0x4a, 0x76, 0x70, 0x79, 0xc0, 0x33, + 0xba, 0xbd, 0xcc, 0xac, 0x12, 0x8c, 0xd6, 0x87, 0xd6, 0xc0, 0xc7, 0x08, 0xc6, 0x58, 0x4f, 0x44, + 0xba, 0x9b, 0xfc, 0x0f, 0xce, 0xfd, 0x6e, 0xc0, 0x20, 0x5d, 0x6d, 0x7f, 0x51, 0x8b, 0x4e, 0x4a, + 0x14, 0xd3, 0x09, 0x9b, 0x60, 0x36, 0xbc, 0x3b, 0x1a, 0x1c, 0x84, 0x4b, 0x3d, 0xbb, 0xe7, 0xee, + 0x64, 0x5f, 0xae, 0x9d, 0x88, 0x67, 0x56, 0x73, 0x18, 0x94, 0x9b, 0x79, 0x50, 0x6e, 0x3e, 0x35, + 0x28, 0x5e, 0x02, 0xca, 0x77, 0x08, 0xf0, 0x2d, 0xea, 0xf1, 0x41, 0xea, 0x76, 0x0d, 0xcf, 0xe3, + 0x7f, 0x39, 0x9a, 0x4f, 0x6c, 0x36, 0x68, 0x22, 0x61, 0x9d, 0x2d, 0x61, 0x29, 0x92, 0x61, 0x95, + 0x43, 0x5d, 0x51, 0x2f, 0x96, 0x83, 0xea, 0x0f, 0x78, 0x5a, 0x42, 0xf5, 0x2b, 0x3f, 0x21, 0x38, + 0xb2, 0x63, 0x77, 0x33, 0x05, 0x75, 0x45, 0x49, 0x6d, 0xe5, 0x37, 0x99, 0x8a, 0x36, 0xd1, 0xdd, + 0x15, 0xb1, 0xae, 0x63, 0x9b, 0xc4, 0xea, 0x68, 0xb6, 0xdb, 0xd1, 0x3b, 0xd4, 0xe2, 0x1a, 0xd3, + 0x83, 0x29, 0xe2, 0x18, 0xde, 0xe0, 0xff, 0x71, 0x2c, 0xf3, 0x1f, 0x8f, 0x2a, 0xc7, 0xd6, 0x82, + 0xf5, 0xab, 0xa6, 0xdd, 0x6b, 0x6b, 0x72, 0x2b, 0x8d, 0xef, 0xa1, 0xdd, 0x6e, 0xfc, 0x2a, 0x0d, + 0xb6, 0xb8, 0xc1, 0x96, 0x34, 0xd8, 0xe2, 0x06, 0x5b, 0xb7, 0x1b, 0xdb, 0x07, 0xf9, 0x5e, 0x2f, + 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x1c, 0xa0, 0x3f, 0x27, 0xbe, 0x19, 0x00, 0x00, } diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/agent.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/agent.pb.go index 204423db3..de8066895 100644 --- a/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/agent.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/agent.pb.go @@ -250,8 +250,7 @@ func (m *Agent) GetClassificationThreshold() float32 { return 0 } -// The request message for -// [Agents.GetAgent][google.cloud.dialogflow.v2beta1.Agents.GetAgent]. +// The request message for [Agents.GetAgent][google.cloud.dialogflow.v2beta1.Agents.GetAgent]. type GetAgentRequest struct { // Required. The project that the agent to fetch is associated with. // Format: `projects/`. @@ -270,8 +269,7 @@ func (m *GetAgentRequest) GetParent() string { return "" } -// The request message for -// [Agents.SearchAgents][google.cloud.dialogflow.v2beta1.Agents.SearchAgents]. +// The request message for [Agents.SearchAgents][google.cloud.dialogflow.v2beta1.Agents.SearchAgents]. type SearchAgentsRequest struct { // Required. The project to list agents from. // Format: `projects/`. @@ -309,8 +307,7 @@ func (m *SearchAgentsRequest) GetPageToken() string { return "" } -// The response message for -// [Agents.SearchAgents][google.cloud.dialogflow.v2beta1.Agents.SearchAgents]. +// The response message for [Agents.SearchAgents][google.cloud.dialogflow.v2beta1.Agents.SearchAgents]. type SearchAgentsResponse struct { // The list of agents. There will be a maximum number of items returned based // on the page_size field in the request. @@ -339,8 +336,7 @@ func (m *SearchAgentsResponse) GetNextPageToken() string { return "" } -// The request message for -// [Agents.TrainAgent][google.cloud.dialogflow.v2beta1.Agents.TrainAgent]. +// The request message for [Agents.TrainAgent][google.cloud.dialogflow.v2beta1.Agents.TrainAgent]. type TrainAgentRequest struct { // Required. The project that the agent to train is associated with. // Format: `projects/`. @@ -359,15 +355,11 @@ func (m *TrainAgentRequest) GetParent() string { return "" } -// The request message for -// [Agents.ExportAgent][google.cloud.dialogflow.v2beta1.Agents.ExportAgent]. +// The request message for [Agents.ExportAgent][google.cloud.dialogflow.v2beta1.Agents.ExportAgent]. type ExportAgentRequest struct { // Required. The project that the agent to export is associated with. // Format: `projects/`. Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` - // Warning: Exporting agents to a URI is not implemented yet. - // This feature is coming soon. - // // Optional. The Google Cloud Storage URI to export the agent to. // Note: The URI must start with // "gs://". If left unspecified, the serialized agent is returned inline. @@ -393,8 +385,7 @@ func (m *ExportAgentRequest) GetAgentUri() string { return "" } -// The response message for -// [Agents.ExportAgent][google.cloud.dialogflow.v2beta1.Agents.ExportAgent]. +// The response message for [Agents.ExportAgent][google.cloud.dialogflow.v2beta1.Agents.ExportAgent]. type ExportAgentResponse struct { // Required. The exported agent. // @@ -510,8 +501,7 @@ func _ExportAgentResponse_OneofSizer(msg proto.Message) (n int) { return n } -// The request message for -// [Agents.ImportAgent][google.cloud.dialogflow.v2beta1.Agents.ImportAgent]. +// The request message for [Agents.ImportAgent][google.cloud.dialogflow.v2beta1.Agents.ImportAgent]. type ImportAgentRequest struct { // Required. The project that the agent to import is associated with. // Format: `projects/`. @@ -637,8 +627,7 @@ func _ImportAgentRequest_OneofSizer(msg proto.Message) (n int) { return n } -// The request message for -// [Agents.RestoreAgent][google.cloud.dialogflow.v2beta1.Agents.RestoreAgent]. +// The request message for [Agents.RestoreAgent][google.cloud.dialogflow.v2beta1.Agents.RestoreAgent]. type RestoreAgentRequest struct { // Required. The project that the agent to restore is associated with. // Format: `projects/`. @@ -807,8 +796,7 @@ type AgentsClient interface { // Exports the specified agent to a ZIP file. // // - // Operation ExportAgent(ctx context.Context, in *ExportAgentRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) // Imports the specified agent from a ZIP file. @@ -916,8 +904,7 @@ type AgentsServer interface { // Exports the specified agent to a ZIP file. // // - // Operation ExportAgent(context.Context, *ExportAgentRequest) (*google_longrunning.Operation, error) // Imports the specified agent from a ZIP file. diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/context.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/context.pb.go index dafb43531..63066cbbb 100644 --- a/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/context.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/context.pb.go @@ -24,8 +24,13 @@ var _ = math.Inf // Represents a context. type Context struct { // Required. The unique identifier of the context. Format: - // `projects//agent/sessions//contexts/`. - // Note: The Context ID is always converted to lowercase. + // `projects//agent/sessions//contexts/`, + // or + // `projects//agent/runtimes//sessions//contexts/`. + // Note: Runtimes are under construction and will be available soon. + // The Context ID is always converted to lowercase. + // If is not specified, we assume default 'sandbox' runtime. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // Optional. The number of conversational query requests after which the // context expires. If set to `0` (the default) the context expires @@ -64,11 +69,13 @@ func (m *Context) GetParameters() *google_protobuf4.Struct { return nil } -// The request message for -// [Contexts.ListContexts][google.cloud.dialogflow.v2beta1.Contexts.ListContexts]. +// The request message for [Contexts.ListContexts][google.cloud.dialogflow.v2beta1.Contexts.ListContexts]. type ListContextsRequest struct { // Required. The session to list all contexts from. - // Format: `projects//agent/sessions/`. + // Format: `projects//agent/sessions/` or + // `projects//agent/runtimes//sessions/`. + // Note: Runtimes are under construction and will be available soon. + // If is not specified, we assume default 'sandbox' runtime. Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` // Optional. The maximum number of items to return in a single page. By // default 100 and at most 1000. @@ -103,8 +110,7 @@ func (m *ListContextsRequest) GetPageToken() string { return "" } -// The response message for -// [Contexts.ListContexts][google.cloud.dialogflow.v2beta1.Contexts.ListContexts]. +// The response message for [Contexts.ListContexts][google.cloud.dialogflow.v2beta1.Contexts.ListContexts]. type ListContextsResponse struct { // The list of contexts. There will be a maximum number of items // returned based on the page_size field in the request. @@ -133,11 +139,14 @@ func (m *ListContextsResponse) GetNextPageToken() string { return "" } -// The request message for -// [Contexts.GetContext][google.cloud.dialogflow.v2beta1.Contexts.GetContext]. +// The request message for [Contexts.GetContext][google.cloud.dialogflow.v2beta1.Contexts.GetContext]. type GetContextRequest struct { // Required. The name of the context. Format: - // `projects//agent/sessions//contexts/`. + // `projects//agent/sessions//contexts/` + // or `projects//agent/runtimes//sessions//contexts/`. Note: Runtimes are under construction and will + // be available soon. If is not specified, we assume default + // 'sandbox' runtime. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` } @@ -153,11 +162,13 @@ func (m *GetContextRequest) GetName() string { return "" } -// The request message for -// [Contexts.CreateContext][google.cloud.dialogflow.v2beta1.Contexts.CreateContext]. +// The request message for [Contexts.CreateContext][google.cloud.dialogflow.v2beta1.Contexts.CreateContext]. type CreateContextRequest struct { // Required. The session to create a context for. - // Format: `projects//agent/sessions/`. + // Format: `projects//agent/sessions/` or + // `projects//agent/runtimes//sessions/`. + // Note: Runtimes are under construction and will be available soon. + // If is not specified, we assume default 'sandbox' runtime. Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` // Required. The context to create. Context *Context `protobuf:"bytes,2,opt,name=context" json:"context,omitempty"` @@ -182,8 +193,7 @@ func (m *CreateContextRequest) GetContext() *Context { return nil } -// The request message for -// [Contexts.UpdateContext][google.cloud.dialogflow.v2beta1.Contexts.UpdateContext]. +// The request message for [Contexts.UpdateContext][google.cloud.dialogflow.v2beta1.Contexts.UpdateContext]. type UpdateContextRequest struct { // Required. The context to update. Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` @@ -210,11 +220,14 @@ func (m *UpdateContextRequest) GetUpdateMask() *google_protobuf3.FieldMask { return nil } -// The request message for -// [Contexts.DeleteContext][google.cloud.dialogflow.v2beta1.Contexts.DeleteContext]. +// The request message for [Contexts.DeleteContext][google.cloud.dialogflow.v2beta1.Contexts.DeleteContext]. type DeleteContextRequest struct { // Required. The name of the context to delete. Format: - // `projects//agent/sessions//contexts/`. + // `projects//agent/sessions//contexts/` + // or `projects//agent/runtimes//sessions//contexts/`. Note: Runtimes are under construction and will + // be available soon. If is not specified, we assume default + // 'sandbox' runtime. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` } @@ -230,11 +243,13 @@ func (m *DeleteContextRequest) GetName() string { return "" } -// The request message for -// [Contexts.DeleteAllContexts][google.cloud.dialogflow.v2beta1.Contexts.DeleteAllContexts]. +// The request message for [Contexts.DeleteAllContexts][google.cloud.dialogflow.v2beta1.Contexts.DeleteAllContexts]. type DeleteAllContextsRequest struct { // Required. The name of the session to delete all contexts from. Format: - // `projects//agent/sessions/`. + // `projects//agent/sessions/` or `projects//agent/runtimes//sessions/`. Note: Runtimes are + // under construction and will be available soon. If is not + // specified we assume default 'sandbox' runtime. Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` } @@ -513,51 +528,55 @@ var _Contexts_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("google/cloud/dialogflow/v2beta1/context.proto", fileDescriptor1) } var fileDescriptor1 = []byte{ - // 731 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x96, 0xcd, 0x6e, 0xd3, 0x4a, - 0x14, 0xc7, 0x35, 0xe9, 0xbd, 0x6d, 0x7a, 0xda, 0xdc, 0xab, 0x0e, 0x51, 0x89, 0xd2, 0xa2, 0x46, - 0x46, 0x40, 0x14, 0x09, 0x5b, 0x84, 0x6f, 0x2a, 0x2a, 0xb5, 0x09, 0xad, 0x2a, 0x81, 0xa8, 0x52, - 0x60, 0xc1, 0x26, 0x9a, 0x26, 0x27, 0x96, 0xa9, 0xe3, 0x31, 0x9e, 0x09, 0x94, 0xa2, 0x6e, 0x10, - 0x6f, 0xc0, 0x82, 0x1d, 0x0b, 0x16, 0x2c, 0xfa, 0x06, 0x88, 0x05, 0xe2, 0x19, 0x78, 0x05, 0x1e, - 0x82, 0x25, 0xf2, 0x78, 0x9c, 0x8f, 0x26, 0x25, 0x71, 0x77, 0xf1, 0xcc, 0x7f, 0xce, 0xf9, 0x9d, - 0x33, 0xff, 0x63, 0x07, 0xae, 0xda, 0x9c, 0xdb, 0x2e, 0x5a, 0x0d, 0x97, 0x77, 0x9a, 0x56, 0xd3, - 0x61, 0x2e, 0xb7, 0x5b, 0x2e, 0x7f, 0x6d, 0xbd, 0x2a, 0xef, 0xa1, 0x64, 0xd7, 0xac, 0x06, 0xf7, - 0x24, 0x1e, 0x48, 0xd3, 0x0f, 0xb8, 0xe4, 0x74, 0x25, 0x92, 0x9b, 0x4a, 0x6e, 0xf6, 0xe4, 0xa6, - 0x96, 0xe7, 0x97, 0x75, 0x3c, 0xe6, 0x3b, 0x16, 0xf3, 0x3c, 0x2e, 0x99, 0x74, 0xb8, 0x27, 0xa2, - 0xe3, 0xf9, 0x25, 0xbd, 0xab, 0x9e, 0xf6, 0x3a, 0x2d, 0x0b, 0xdb, 0xbe, 0x7c, 0xa3, 0x37, 0x0b, - 0x27, 0x37, 0x5b, 0x0e, 0xba, 0xcd, 0x7a, 0x9b, 0x89, 0x7d, 0xad, 0x58, 0x3e, 0xa9, 0x10, 0x32, - 0xe8, 0x34, 0x34, 0x9b, 0x71, 0x04, 0x33, 0x95, 0x08, 0x96, 0x52, 0xf8, 0xc7, 0x63, 0x6d, 0xcc, - 0x91, 0x02, 0x29, 0xce, 0xd6, 0xd4, 0x6f, 0x7a, 0x09, 0xfe, 0x73, 0x9d, 0x16, 0x0a, 0x9f, 0x79, - 0xf5, 0x06, 0xef, 0x78, 0x32, 0x97, 0x2a, 0x90, 0xe2, 0xbf, 0xb5, 0x4c, 0xbc, 0x5a, 0x09, 0x17, - 0xe9, 0x6d, 0x00, 0x9f, 0x05, 0xac, 0x8d, 0x12, 0x03, 0x91, 0x9b, 0x2a, 0x90, 0xe2, 0x5c, 0xf9, - 0xbc, 0xa9, 0xcb, 0x8e, 0x13, 0x9b, 0xbb, 0x2a, 0x71, 0xad, 0x4f, 0x6a, 0x38, 0x70, 0xee, 0xa1, - 0x23, 0xa4, 0x46, 0x10, 0x35, 0x7c, 0xd9, 0x41, 0x21, 0xe9, 0x22, 0x4c, 0xfb, 0x2c, 0x40, 0x4f, - 0x6a, 0x18, 0xfd, 0x44, 0x97, 0x60, 0xd6, 0x67, 0x36, 0xd6, 0x85, 0x73, 0x88, 0x9a, 0x24, 0x1d, - 0x2e, 0xec, 0x3a, 0x87, 0x48, 0x2f, 0x84, 0x10, 0x36, 0xd6, 0x25, 0xdf, 0x47, 0x4f, 0x41, 0xcc, - 0xd6, 0x94, 0xfc, 0x49, 0xb8, 0x60, 0xbc, 0x27, 0x90, 0x1d, 0xcc, 0x25, 0x7c, 0xee, 0x09, 0xa4, - 0x55, 0x48, 0xeb, 0xfb, 0x12, 0x39, 0x52, 0x98, 0x2a, 0xce, 0x95, 0x8b, 0xe6, 0x98, 0x1b, 0x33, - 0x75, 0x90, 0x5a, 0xf7, 0x24, 0xbd, 0x0c, 0xff, 0x7b, 0x78, 0x20, 0xeb, 0x7d, 0x08, 0x29, 0x85, - 0x90, 0x09, 0x97, 0x77, 0xba, 0x18, 0x57, 0x60, 0x61, 0x0b, 0x63, 0x88, 0xb8, 0xde, 0x11, 0xad, - 0x37, 0x02, 0xc8, 0x56, 0x02, 0x64, 0x12, 0x4f, 0x68, 0x4f, 0xeb, 0xcd, 0x06, 0xcc, 0x68, 0x18, - 0x95, 0x38, 0x49, 0x15, 0xf1, 0x41, 0xe3, 0x23, 0x81, 0xec, 0x53, 0xbf, 0x39, 0x9c, 0xb4, 0x2f, - 0x38, 0x39, 0x63, 0x70, 0xba, 0x0a, 0x73, 0x1d, 0x15, 0x5b, 0xb9, 0x53, 0x43, 0xe6, 0x87, 0x5c, - 0xb2, 0x19, 0x1a, 0xf8, 0x11, 0x13, 0xfb, 0x35, 0x88, 0xe4, 0xe1, 0x6f, 0xa3, 0x04, 0xd9, 0x2a, - 0xba, 0x38, 0x04, 0x36, 0xaa, 0x73, 0x65, 0xc8, 0x45, 0xda, 0x75, 0xd7, 0x9d, 0xd0, 0x59, 0xe5, - 0xef, 0x69, 0x48, 0xc7, 0x5a, 0xfa, 0x8d, 0xc0, 0x7c, 0xbf, 0x55, 0xe8, 0x8d, 0xb1, 0xd5, 0x8e, - 0x70, 0x71, 0xfe, 0x66, 0xc2, 0x53, 0x91, 0x1f, 0x8d, 0xb5, 0x77, 0x3f, 0x7f, 0x7d, 0x48, 0xdd, - 0xa1, 0xb7, 0xba, 0xaf, 0x93, 0xb7, 0x11, 0xe4, 0x7d, 0x3f, 0xe0, 0x2f, 0xb0, 0x21, 0x85, 0x55, - 0xb2, 0x98, 0x8d, 0x9e, 0xb4, 0x04, 0x0a, 0x11, 0xbe, 0x29, 0xac, 0xd2, 0x91, 0xd5, 0x75, 0xe2, - 0x31, 0x01, 0xe8, 0x59, 0x8c, 0x96, 0xc7, 0x52, 0x0c, 0xf9, 0x31, 0x3f, 0xf1, 0xed, 0x8e, 0x82, - 0x0d, 0xef, 0xe0, 0x6f, 0xa8, 0x5d, 0x52, 0xab, 0x74, 0x44, 0xbf, 0x12, 0xc8, 0x0c, 0xd8, 0x9c, - 0x8e, 0xef, 0xda, 0xa8, 0xb1, 0x48, 0x80, 0xbc, 0xa5, 0x90, 0xd7, 0x8d, 0x33, 0xf6, 0xf7, 0x5e, - 0xd7, 0xd0, 0x3f, 0x08, 0x64, 0x06, 0xa6, 0x65, 0x02, 0xf6, 0x51, 0xd3, 0x95, 0x80, 0xfd, 0xb1, - 0x62, 0xdf, 0x2e, 0xaf, 0xf5, 0xd8, 0xe3, 0x6f, 0x4d, 0x92, 0xb6, 0xf7, 0x6a, 0xf8, 0x44, 0x20, - 0x33, 0x30, 0x58, 0x13, 0xd4, 0x30, 0x6a, 0x10, 0xf3, 0x8b, 0x43, 0x83, 0xfc, 0x20, 0xfc, 0x4c, - 0xc5, 0x06, 0x29, 0x9d, 0xd5, 0x20, 0x5f, 0x08, 0x2c, 0x0c, 0x4d, 0x33, 0xbd, 0x3b, 0x21, 0xe4, - 0xf0, 0x1b, 0x20, 0x09, 0x68, 0x12, 0x5b, 0x6c, 0x1c, 0x13, 0xb8, 0xd8, 0xe0, 0xed, 0x71, 0x60, - 0x1b, 0xf3, 0x1a, 0x68, 0x27, 0x4c, 0xbf, 0x43, 0x9e, 0x6f, 0xeb, 0x03, 0x36, 0x77, 0x99, 0x67, - 0x9b, 0x3c, 0xb0, 0x2d, 0x1b, 0x3d, 0x05, 0x67, 0x45, 0x5b, 0xcc, 0x77, 0xc4, 0xa9, 0xff, 0x35, - 0x56, 0x7b, 0x4b, 0xbf, 0x09, 0xf9, 0x9c, 0x4a, 0x55, 0x37, 0x8f, 0x53, 0x2b, 0x5b, 0x51, 0xcc, - 0x8a, 0x82, 0xa8, 0xf6, 0x20, 0x9e, 0x45, 0x87, 0xf6, 0xa6, 0x55, 0xfc, 0xeb, 0x7f, 0x02, 0x00, - 0x00, 0xff, 0xff, 0x69, 0x4e, 0x3f, 0xa2, 0xca, 0x08, 0x00, 0x00, + // 793 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xcd, 0x6f, 0xd3, 0x48, + 0x14, 0xd7, 0xb8, 0xbb, 0xfd, 0x98, 0x34, 0xbb, 0xea, 0x6c, 0xd4, 0x8d, 0xd2, 0xae, 0x1a, 0x79, + 0xb5, 0x4b, 0x14, 0x09, 0x5b, 0x98, 0x2f, 0x41, 0x05, 0x52, 0x9b, 0xd0, 0xaa, 0x52, 0x91, 0xaa, + 0xb4, 0x70, 0xe8, 0x25, 0x9a, 0x26, 0x2f, 0x96, 0xa9, 0x33, 0x63, 0x3c, 0x13, 0x28, 0x45, 0x39, + 0xf0, 0x71, 0xe1, 0xc4, 0x01, 0x09, 0xc4, 0x09, 0x89, 0x03, 0x87, 0xfe, 0x3b, 0xfc, 0x0b, 0x3d, + 0x72, 0xe0, 0xc8, 0x0d, 0x64, 0x7b, 0x9c, 0x8f, 0xc6, 0x25, 0x49, 0xcb, 0xcd, 0x7e, 0xf3, 0x7b, + 0x6f, 0x7e, 0xbf, 0x37, 0xbf, 0x79, 0x36, 0xbe, 0x68, 0x73, 0x6e, 0xbb, 0x60, 0xd6, 0x5c, 0xde, + 0xaa, 0x9b, 0x75, 0x87, 0xba, 0xdc, 0x6e, 0xb8, 0xfc, 0xb1, 0xf9, 0xc8, 0xda, 0x03, 0x49, 0x2f, + 0x99, 0x35, 0xce, 0x24, 0x1c, 0x48, 0xc3, 0xf3, 0xb9, 0xe4, 0x64, 0x29, 0x82, 0x1b, 0x21, 0xdc, + 0xe8, 0xc2, 0x0d, 0x05, 0xcf, 0x2d, 0xaa, 0x7a, 0xd4, 0x73, 0x4c, 0xca, 0x18, 0x97, 0x54, 0x3a, + 0x9c, 0x89, 0x28, 0x3d, 0xb7, 0xa0, 0x56, 0xc3, 0xb7, 0xbd, 0x56, 0xc3, 0x84, 0xa6, 0x27, 0x9f, + 0xa8, 0xc5, 0xfc, 0xc9, 0xc5, 0x86, 0x03, 0x6e, 0xbd, 0xda, 0xa4, 0x62, 0x5f, 0x21, 0x16, 0x4f, + 0x22, 0x84, 0xf4, 0x5b, 0x35, 0xc5, 0x4d, 0x6f, 0xe3, 0xa9, 0x52, 0x44, 0x96, 0x10, 0xfc, 0x1b, + 0xa3, 0x4d, 0xc8, 0xa2, 0x3c, 0x2a, 0xcc, 0x54, 0xc2, 0x67, 0xf2, 0x1f, 0xfe, 0xc3, 0x75, 0x1a, + 0x20, 0x3c, 0xca, 0xaa, 0x35, 0xde, 0x62, 0x32, 0xab, 0xe5, 0x51, 0xe1, 0xf7, 0x4a, 0x3a, 0x8e, + 0x96, 0x82, 0x20, 0xb9, 0x8e, 0xb1, 0x47, 0x7d, 0xda, 0x04, 0x09, 0xbe, 0xc8, 0x4e, 0xe4, 0x51, + 0x21, 0x65, 0xfd, 0x6d, 0x28, 0xd9, 0xf1, 0xc6, 0xc6, 0x76, 0xb8, 0x71, 0xa5, 0x07, 0xaa, 0x3b, + 0xf8, 0xaf, 0x4d, 0x47, 0x48, 0x45, 0x41, 0x54, 0xe0, 0x61, 0x0b, 0x84, 0x24, 0xf3, 0x78, 0xd2, + 0xa3, 0x3e, 0x30, 0xa9, 0xc8, 0xa8, 0x37, 0xb2, 0x80, 0x67, 0x3c, 0x6a, 0x43, 0x55, 0x38, 0x87, + 0xa0, 0x98, 0x4c, 0x07, 0x81, 0x6d, 0xe7, 0x10, 0xc8, 0x3f, 0x01, 0x09, 0x1b, 0xaa, 0x92, 0xef, + 0x03, 0x0b, 0x49, 0xcc, 0x54, 0x42, 0xf8, 0x4e, 0x10, 0xd0, 0x5f, 0x22, 0x9c, 0xe9, 0xdf, 0x4b, + 0x78, 0x9c, 0x09, 0x20, 0x65, 0x3c, 0xad, 0xce, 0x4b, 0x64, 0x51, 0x7e, 0xa2, 0x90, 0xb2, 0x0a, + 0xc6, 0x90, 0x13, 0x33, 0x54, 0x91, 0x4a, 0x27, 0x93, 0xfc, 0x8f, 0xff, 0x64, 0x70, 0x20, 0xab, + 0x3d, 0x14, 0xb4, 0x90, 0x42, 0x3a, 0x08, 0x6f, 0x75, 0x68, 0x5c, 0xc0, 0x73, 0xeb, 0x10, 0x93, + 0x88, 0xf5, 0x26, 0xb4, 0x5e, 0xf7, 0x71, 0xa6, 0xe4, 0x03, 0x95, 0x70, 0x02, 0x7b, 0x5a, 0x6f, + 0x56, 0xf1, 0x94, 0x22, 0x13, 0x6e, 0x3c, 0x8e, 0x8a, 0x38, 0x51, 0x7f, 0x87, 0x70, 0xe6, 0x9e, + 0x57, 0x1f, 0xdc, 0xb4, 0xa7, 0x38, 0x3a, 0x63, 0x71, 0xb2, 0x8c, 0x53, 0xad, 0xb0, 0x76, 0xe8, + 0x4e, 0x45, 0x32, 0x37, 0xe0, 0x92, 0xb5, 0xc0, 0xc0, 0x77, 0xa9, 0xd8, 0xaf, 0xe0, 0x08, 0x1e, + 0x3c, 0xeb, 0x45, 0x9c, 0x29, 0x83, 0x0b, 0x03, 0xc4, 0x92, 0x3a, 0x67, 0xe1, 0x6c, 0x84, 0x5d, + 0x71, 0xdd, 0x11, 0x9d, 0x65, 0x7d, 0x4f, 0xe1, 0xe9, 0x18, 0x4b, 0x9e, 0x69, 0x78, 0xb6, 0xd7, + 0x2a, 0xe4, 0xca, 0x50, 0xb5, 0x09, 0x2e, 0xce, 0x5d, 0x1d, 0x33, 0x2b, 0xf2, 0xa3, 0xfe, 0x02, + 0x3d, 0xff, 0x7c, 0xfc, 0x46, 0x6b, 0x93, 0x6b, 0x9d, 0x79, 0xf2, 0x34, 0x62, 0x79, 0xcb, 0xf3, + 0xf9, 0x03, 0xa8, 0x49, 0x61, 0x16, 0x4d, 0x6a, 0x03, 0x93, 0xa6, 0x00, 0x21, 0x82, 0x51, 0x61, + 0x16, 0xdb, 0xf1, 0xd0, 0x11, 0xbb, 0x25, 0xb2, 0x32, 0x3c, 0xd3, 0x6f, 0x31, 0xe9, 0x34, 0x21, + 0x08, 0x24, 0x15, 0x21, 0x5f, 0x11, 0xc6, 0x5d, 0xa3, 0x12, 0x6b, 0xa8, 0x96, 0x01, 0x57, 0xe7, + 0x46, 0xf6, 0x48, 0xa2, 0xe4, 0xe0, 0x28, 0x7f, 0x26, 0xb8, 0x43, 0xd5, 0x2c, 0xb6, 0xfb, 0x25, + 0x27, 0x67, 0x26, 0x0a, 0xee, 0x2d, 0x42, 0x5e, 0x69, 0x38, 0xdd, 0x77, 0xe5, 0xc8, 0xf0, 0x13, + 0x4c, 0xba, 0xa2, 0x63, 0x08, 0x7f, 0x1b, 0x09, 0x7f, 0x8d, 0xf4, 0x33, 0x1e, 0xf6, 0xcd, 0xf8, + 0x7a, 0xed, 0x6e, 0xea, 0xe7, 0x3f, 0xf5, 0x4e, 0x35, 0xf2, 0x5e, 0xc3, 0xe9, 0xbe, 0x49, 0x30, + 0x42, 0x2f, 0x92, 0x26, 0xc7, 0x18, 0xbd, 0xf8, 0x14, 0xf5, 0xe2, 0x03, 0xb2, 0x6e, 0x77, 0x85, + 0xc4, 0x5f, 0xd2, 0x71, 0xdc, 0xd0, 0xed, 0xc9, 0x8e, 0xb5, 0x31, 0x6a, 0xa9, 0xa1, 0xf6, 0xe8, + 0xf6, 0xe6, 0x18, 0xe1, 0x74, 0xdf, 0x30, 0x1a, 0xa1, 0x37, 0x49, 0xc3, 0x2b, 0x37, 0x3f, 0x30, + 0xfc, 0xee, 0x04, 0x9f, 0xf6, 0xce, 0x75, 0x28, 0x9e, 0xf9, 0x3a, 0x14, 0x7f, 0xc1, 0x75, 0xf8, + 0x82, 0xf0, 0xdc, 0xc0, 0x1c, 0x25, 0x37, 0x46, 0x94, 0x3a, 0x38, 0x7b, 0xc7, 0x92, 0x3b, 0xde, + 0xc0, 0x2b, 0x9e, 0xdf, 0xfa, 0xab, 0x47, 0x08, 0xff, 0x5b, 0xe3, 0xcd, 0x61, 0xf2, 0x56, 0x67, + 0x95, 0xac, 0xad, 0x40, 0xc4, 0x16, 0xda, 0xdd, 0x50, 0x09, 0x36, 0x77, 0x29, 0xb3, 0x0d, 0xee, + 0xdb, 0xa6, 0x0d, 0x2c, 0x94, 0x68, 0x46, 0x4b, 0xd4, 0x73, 0xc4, 0xa9, 0xff, 0x8a, 0xcb, 0xdd, + 0xd0, 0x37, 0x84, 0x3e, 0x6a, 0x5a, 0x79, 0xed, 0x48, 0x5b, 0x5a, 0x8f, 0x6a, 0x96, 0x42, 0x12, + 0xe5, 0x2e, 0x89, 0xfb, 0x51, 0xd2, 0xde, 0x64, 0x58, 0xff, 0xf2, 0x8f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x61, 0xec, 0x01, 0x66, 0x8a, 0x0a, 0x00, 0x00, } diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/entity_type.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/entity_type.pb.go index 31d44cbf8..6364ec6d7 100644 --- a/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/entity_type.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/entity_type.pb.go @@ -175,8 +175,7 @@ func (m *EntityType_Entity) GetSynonyms() []string { return nil } -// The request message for -// [EntityTypes.ListEntityTypes][google.cloud.dialogflow.v2beta1.EntityTypes.ListEntityTypes]. +// The request message for [EntityTypes.ListEntityTypes][google.cloud.dialogflow.v2beta1.EntityTypes.ListEntityTypes]. type ListEntityTypesRequest struct { // Required. The agent to list all entity types from. // Format: `projects//agent`. @@ -227,8 +226,7 @@ func (m *ListEntityTypesRequest) GetPageToken() string { return "" } -// The response message for -// [EntityTypes.ListEntityTypes][google.cloud.dialogflow.v2beta1.EntityTypes.ListEntityTypes]. +// The response message for [EntityTypes.ListEntityTypes][google.cloud.dialogflow.v2beta1.EntityTypes.ListEntityTypes]. type ListEntityTypesResponse struct { // The list of agent entity types. There will be a maximum number of items // returned based on the page_size field in the request. @@ -257,8 +255,7 @@ func (m *ListEntityTypesResponse) GetNextPageToken() string { return "" } -// The request message for -// [EntityTypes.GetEntityType][google.cloud.dialogflow.v2beta1.EntityTypes.GetEntityType]. +// The request message for [EntityTypes.GetEntityType][google.cloud.dialogflow.v2beta1.EntityTypes.GetEntityType]. type GetEntityTypeRequest struct { // Required. The name of the entity type. // Format: `projects//agent/entityTypes/`. @@ -290,8 +287,7 @@ func (m *GetEntityTypeRequest) GetLanguageCode() string { return "" } -// The request message for -// [EntityTypes.CreateEntityType][google.cloud.dialogflow.v2beta1.EntityTypes.CreateEntityType]. +// The request message for [EntityTypes.CreateEntityType][google.cloud.dialogflow.v2beta1.EntityTypes.CreateEntityType]. type CreateEntityTypeRequest struct { // Required. The agent to create a entity type for. // Format: `projects//agent`. @@ -332,8 +328,7 @@ func (m *CreateEntityTypeRequest) GetLanguageCode() string { return "" } -// The request message for -// [EntityTypes.UpdateEntityType][google.cloud.dialogflow.v2beta1.EntityTypes.UpdateEntityType]. +// The request message for [EntityTypes.UpdateEntityType][google.cloud.dialogflow.v2beta1.EntityTypes.UpdateEntityType]. type UpdateEntityTypeRequest struct { // Required. The entity type to update. // Format: `projects//agent/entityTypes/`. @@ -374,8 +369,7 @@ func (m *UpdateEntityTypeRequest) GetUpdateMask() *google_protobuf3.FieldMask { return nil } -// The request message for -// [EntityTypes.DeleteEntityType][google.cloud.dialogflow.v2beta1.EntityTypes.DeleteEntityType]. +// The request message for [EntityTypes.DeleteEntityType][google.cloud.dialogflow.v2beta1.EntityTypes.DeleteEntityType]. type DeleteEntityTypeRequest struct { // Required. The name of the entity type to delete. // Format: `projects//agent/entityTypes/`. @@ -394,8 +388,7 @@ func (m *DeleteEntityTypeRequest) GetName() string { return "" } -// The request message for -// [EntityTypes.BatchUpdateEntityTypes][google.cloud.dialogflow.v2beta1.EntityTypes.BatchUpdateEntityTypes]. +// The request message for [EntityTypes.BatchUpdateEntityTypes][google.cloud.dialogflow.v2beta1.EntityTypes.BatchUpdateEntityTypes]. type BatchUpdateEntityTypesRequest struct { // Required. The name of the agent to update or create entity types in. // Format: `projects//agent`. @@ -553,8 +546,7 @@ func _BatchUpdateEntityTypesRequest_OneofSizer(msg proto.Message) (n int) { return n } -// The response message for -// [EntityTypes.BatchUpdateEntityTypes][google.cloud.dialogflow.v2beta1.EntityTypes.BatchUpdateEntityTypes]. +// The response message for [EntityTypes.BatchUpdateEntityTypes][google.cloud.dialogflow.v2beta1.EntityTypes.BatchUpdateEntityTypes]. type BatchUpdateEntityTypesResponse struct { // The collection of updated or created entity types. EntityTypes []*EntityType `protobuf:"bytes,1,rep,name=entity_types,json=entityTypes" json:"entity_types,omitempty"` @@ -572,8 +564,7 @@ func (m *BatchUpdateEntityTypesResponse) GetEntityTypes() []*EntityType { return nil } -// The request message for -// [EntityTypes.BatchDeleteEntityTypes][google.cloud.dialogflow.v2beta1.EntityTypes.BatchDeleteEntityTypes]. +// The request message for [EntityTypes.BatchDeleteEntityTypes][google.cloud.dialogflow.v2beta1.EntityTypes.BatchDeleteEntityTypes]. type BatchDeleteEntityTypesRequest struct { // Required. The name of the agent to delete all entities types for. Format: // `projects//agent`. @@ -602,8 +593,7 @@ func (m *BatchDeleteEntityTypesRequest) GetEntityTypeNames() []string { return nil } -// The request message for -// [EntityTypes.BatchCreateEntities][google.cloud.dialogflow.v2beta1.EntityTypes.BatchCreateEntities]. +// The request message for [EntityTypes.BatchCreateEntities][google.cloud.dialogflow.v2beta1.EntityTypes.BatchCreateEntities]. type BatchCreateEntitiesRequest struct { // Required. The name of the entity type to create entities in. Format: // `projects//agent/entityTypes/`. @@ -644,8 +634,7 @@ func (m *BatchCreateEntitiesRequest) GetLanguageCode() string { return "" } -// The response message for -// [EntityTypes.BatchCreateEntities][google.cloud.dialogflow.v2beta1.EntityTypes.BatchCreateEntities]. +// The response message for [EntityTypes.BatchCreateEntities][google.cloud.dialogflow.v2beta1.EntityTypes.BatchCreateEntities]. type BatchUpdateEntitiesRequest struct { // Required. The name of the entity type to update the entities in. Format: // `projects//agent/entityTypes/`. @@ -695,8 +684,7 @@ func (m *BatchUpdateEntitiesRequest) GetUpdateMask() *google_protobuf3.FieldMask return nil } -// The request message for -// [EntityTypes.BatchDeleteEntities][google.cloud.dialogflow.v2beta1.EntityTypes.BatchDeleteEntities]. +// The request message for [EntityTypes.BatchDeleteEntities][google.cloud.dialogflow.v2beta1.EntityTypes.BatchDeleteEntities]. type BatchDeleteEntitiesRequest struct { // Required. The name of the entity type to delete entries for. Format: // `projects//agent/entityTypes/`. @@ -800,8 +788,7 @@ type EntityTypesClient interface { DeleteEntityType(ctx context.Context, in *DeleteEntityTypeRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) // Updates/Creates multiple entity types in the specified agent. // - // Operation BatchUpdateEntityTypes(ctx context.Context, in *BatchUpdateEntityTypesRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) // Deletes entity types in the specified agent. @@ -940,8 +927,7 @@ type EntityTypesServer interface { DeleteEntityType(context.Context, *DeleteEntityTypeRequest) (*google_protobuf2.Empty, error) // Updates/Creates multiple entity types in the specified agent. // - // Operation BatchUpdateEntityTypes(context.Context, *BatchUpdateEntityTypesRequest) (*google_longrunning.Operation, error) // Deletes entity types in the specified agent. diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/intent.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/intent.pb.go index b343de048..9184b14bc 100644 --- a/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/intent.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/intent.pb.go @@ -185,7 +185,19 @@ type Intent struct { // Note: If `ml_enabled` setting is set to false, then this intent is not // taken into account during inference in `ML ONLY` match mode. Also, // auto-markup in the UI is turned off. + // DEPRECATED! Please use `ml_disabled` field instead. + // NOTE: If neither `ml_enabled` nor `ml_disabled` field is set, then the + // default value is determined as follows: + // - Before April 15th, 2018 the default is: + // ml_enabled = false / ml_disabled = true. + // - After April 15th, 2018 the default is: + // ml_enabled = true / ml_disabled = false. MlEnabled bool `protobuf:"varint,5,opt,name=ml_enabled,json=mlEnabled" json:"ml_enabled,omitempty"` + // Optional. Indicates whether Machine Learning is disabled for the intent. + // Note: If `ml_disabled` setting is set to true, then this intent is not + // taken into account during inference in `ML ONLY` match mode. Also, + // auto-markup in the UI is turned off. + MlDisabled bool `protobuf:"varint,19,opt,name=ml_disabled,json=mlDisabled" json:"ml_disabled,omitempty"` // Optional. The list of context names required for this intent to be // triggered. // Format: `projects//agent/sessions/-/contexts/`. @@ -277,6 +289,13 @@ func (m *Intent) GetMlEnabled() bool { return false } +func (m *Intent) GetMlDisabled() bool { + if m != nil { + return m.MlDisabled + } + return false +} + func (m *Intent) GetInputContextNames() []string { if m != nil { return m.InputContextNames @@ -1564,8 +1583,7 @@ func (m *Intent_FollowupIntentInfo) GetParentFollowupIntentName() string { return "" } -// The request message for -// [Intents.ListIntents][google.cloud.dialogflow.v2beta1.Intents.ListIntents]. +// The request message for [Intents.ListIntents][google.cloud.dialogflow.v2beta1.Intents.ListIntents]. type ListIntentsRequest struct { // Required. The agent to list all intents from. // Format: `projects//agent`. @@ -1625,8 +1643,7 @@ func (m *ListIntentsRequest) GetPageToken() string { return "" } -// The response message for -// [Intents.ListIntents][google.cloud.dialogflow.v2beta1.Intents.ListIntents]. +// The response message for [Intents.ListIntents][google.cloud.dialogflow.v2beta1.Intents.ListIntents]. type ListIntentsResponse struct { // The list of agent intents. There will be a maximum number of items // returned based on the page_size field in the request. @@ -1655,8 +1672,7 @@ func (m *ListIntentsResponse) GetNextPageToken() string { return "" } -// The request message for -// [Intents.GetIntent][google.cloud.dialogflow.v2beta1.Intents.GetIntent]. +// The request message for [Intents.GetIntent][google.cloud.dialogflow.v2beta1.Intents.GetIntent]. type GetIntentRequest struct { // Required. The name of the intent. // Format: `projects//agent/intents/`. @@ -1697,8 +1713,7 @@ func (m *GetIntentRequest) GetIntentView() IntentView { return IntentView_INTENT_VIEW_UNSPECIFIED } -// The request message for -// [Intents.CreateIntent][google.cloud.dialogflow.v2beta1.Intents.CreateIntent]. +// The request message for [Intents.CreateIntent][google.cloud.dialogflow.v2beta1.Intents.CreateIntent]. type CreateIntentRequest struct { // Required. The agent to create a intent for. // Format: `projects//agent`. @@ -1748,8 +1763,7 @@ func (m *CreateIntentRequest) GetIntentView() IntentView { return IntentView_INTENT_VIEW_UNSPECIFIED } -// The request message for -// [Intents.UpdateIntent][google.cloud.dialogflow.v2beta1.Intents.UpdateIntent]. +// The request message for [Intents.UpdateIntent][google.cloud.dialogflow.v2beta1.Intents.UpdateIntent]. type UpdateIntentRequest struct { // Required. The intent to update. // Format: `projects//agent/intents/`. @@ -1799,8 +1813,7 @@ func (m *UpdateIntentRequest) GetIntentView() IntentView { return IntentView_INTENT_VIEW_UNSPECIFIED } -// The request message for -// [Intents.DeleteIntent][google.cloud.dialogflow.v2beta1.Intents.DeleteIntent]. +// The request message for [Intents.DeleteIntent][google.cloud.dialogflow.v2beta1.Intents.DeleteIntent]. type DeleteIntentRequest struct { // Required. The name of the intent to delete. // Format: `projects//agent/intents/`. @@ -1819,8 +1832,7 @@ func (m *DeleteIntentRequest) GetName() string { return "" } -// The request message for -// [Intents.BatchUpdateIntents][google.cloud.dialogflow.v2beta1.Intents.BatchUpdateIntents]. +// The request message for [Intents.BatchUpdateIntents][google.cloud.dialogflow.v2beta1.Intents.BatchUpdateIntents]. type BatchUpdateIntentsRequest struct { // Required. The name of the agent to update or create intents in. // Format: `projects//agent`. @@ -1981,8 +1993,7 @@ func _BatchUpdateIntentsRequest_OneofSizer(msg proto.Message) (n int) { return n } -// The response message for -// [Intents.BatchUpdateIntents][google.cloud.dialogflow.v2beta1.Intents.BatchUpdateIntents]. +// The response message for [Intents.BatchUpdateIntents][google.cloud.dialogflow.v2beta1.Intents.BatchUpdateIntents]. type BatchUpdateIntentsResponse struct { // The collection of updated or created intents. Intents []*Intent `protobuf:"bytes,1,rep,name=intents" json:"intents,omitempty"` @@ -2000,8 +2011,7 @@ func (m *BatchUpdateIntentsResponse) GetIntents() []*Intent { return nil } -// The request message for -// [Intents.BatchDeleteIntents][google.cloud.dialogflow.v2beta1.Intents.BatchDeleteIntents]. +// The request message for [Intents.BatchDeleteIntents][google.cloud.dialogflow.v2beta1.Intents.BatchDeleteIntents]. type BatchDeleteIntentsRequest struct { // Required. The name of the agent to delete all entities types for. Format: // `projects//agent`. @@ -2112,8 +2122,7 @@ type IntentsClient interface { DeleteIntent(ctx context.Context, in *DeleteIntentRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) // Updates/Creates multiple intents in the specified agent. // - // Operation + // Operation BatchUpdateIntents(ctx context.Context, in *BatchUpdateIntentsRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) // Deletes intents in the specified agent. // @@ -2207,8 +2216,7 @@ type IntentsServer interface { DeleteIntent(context.Context, *DeleteIntentRequest) (*google_protobuf2.Empty, error) // Updates/Creates multiple intents in the specified agent. // - // Operation + // Operation BatchUpdateIntents(context.Context, *BatchUpdateIntentsRequest) (*google_longrunning.Operation, error) // Deletes intents in the specified agent. // @@ -2386,14 +2394,14 @@ var _Intents_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("google/cloud/dialogflow/v2beta1/intent.proto", fileDescriptor3) } var fileDescriptor3 = []byte{ - // 2577 bytes of a gzipped FileDescriptorProto + // 2592 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0xcd, 0x73, 0x23, 0x47, 0xd9, 0xf7, 0xe8, 0xc3, 0x96, 0x1e, 0xc9, 0xb2, 0xdc, 0xde, 0x6c, 0xb4, 0x93, 0xa4, 0xe2, 0x28, 0x6f, 0xf2, 0x3a, 0x7e, 0x13, 0xe9, 0x8d, 0x92, 0x37, 0x1f, 0xbb, 0x6f, 0x12, 0x64, 0x5b, 0x5e, 0x0b, 0xcb, 0x96, 0x76, 0x2c, 0x6f, 0xd8, 0x14, 0x30, 0x35, 0x92, 0xda, 0xda, 0xc6, 0xa3, 0x99, - 0xc9, 0x74, 0xcb, 0x1b, 0x05, 0x52, 0x45, 0x51, 0x05, 0x1c, 0xb8, 0x50, 0x70, 0xa2, 0xb8, 0xc1, - 0x29, 0x14, 0x07, 0x2a, 0x37, 0xfe, 0x04, 0x0e, 0x9c, 0x38, 0xe6, 0x40, 0x51, 0x45, 0xf1, 0x27, - 0x50, 0xdc, 0xa0, 0xfa, 0x63, 0xa4, 0x91, 0xa5, 0x45, 0x92, 0xbd, 0x70, 0xe1, 0x36, 0xfd, 0x74, + 0xc9, 0x74, 0xcb, 0x1b, 0x05, 0x52, 0x45, 0x51, 0x05, 0x1c, 0xb8, 0x50, 0x70, 0xa0, 0x28, 0x6e, + 0x70, 0x0a, 0xc5, 0x81, 0xca, 0x8d, 0x3f, 0x81, 0x03, 0x27, 0x8e, 0x39, 0x50, 0x54, 0x51, 0xfc, + 0x0d, 0xdc, 0xa0, 0xfa, 0x63, 0xa4, 0x91, 0xa5, 0x45, 0x92, 0xbd, 0x70, 0xe1, 0x36, 0xfd, 0x74, 0xf7, 0xef, 0xf9, 0xea, 0xfe, 0x3d, 0xdd, 0x2d, 0xc1, 0xab, 0x5d, 0xd7, 0xed, 0xda, 0xb8, 0xd8, 0xb6, 0xdd, 0x7e, 0xa7, 0xd8, 0x21, 0x96, 0xed, 0x76, 0xcf, 0x6c, 0xf7, 0x51, 0xf1, 0xa2, 0xd4, 0xc2, 0xcc, 0x7a, 0xbd, 0x48, 0x1c, 0x86, 0x1d, 0x56, 0xf0, 0x7c, 0x97, 0xb9, 0xe8, 0x79, 0x39, @@ -2402,151 +2410,151 @@ var fileDescriptor3 = []byte{ 0xf0, 0x27, 0x4a, 0x9b, 0xfe, 0xa2, 0x1a, 0x6e, 0xbb, 0x4e, 0xd7, 0xef, 0x3b, 0x0e, 0x71, 0xba, 0x45, 0xd7, 0xc3, 0xfe, 0x18, 0xe6, 0x33, 0x6a, 0x90, 0x68, 0xb5, 0xfa, 0x67, 0x45, 0xdc, 0xf3, 0xd8, 0x40, 0x75, 0x6e, 0x5e, 0xee, 0x3c, 0x23, 0xd8, 0xee, 0x98, 0x3d, 0x8b, 0x9e, 0xab, 0x11, - 0xcf, 0x5e, 0x1e, 0x41, 0x99, 0xdf, 0x6f, 0x2b, 0x0b, 0xf2, 0x7f, 0x7d, 0x05, 0x96, 0xab, 0x22, - 0x00, 0x08, 0x41, 0xcc, 0xb1, 0x7a, 0x38, 0xa7, 0x6d, 0x6a, 0x5b, 0x49, 0x43, 0x7c, 0xa3, 0x17, - 0x20, 0xdd, 0x21, 0xd4, 0xb3, 0xad, 0x81, 0x29, 0xfa, 0x22, 0xa2, 0x2f, 0xa5, 0x64, 0xc7, 0x7c, - 0xc8, 0x03, 0x58, 0x7d, 0x84, 0x5b, 0x0f, 0x5d, 0xf7, 0xdc, 0xa4, 0xcc, 0x62, 0x38, 0xb7, 0xbc, - 0xa9, 0x6d, 0x65, 0x4a, 0x6f, 0x16, 0x66, 0x44, 0xb2, 0x20, 0xd5, 0x16, 0x3e, 0x94, 0x93, 0x4f, - 0xf8, 0x5c, 0x23, 0xfd, 0x28, 0xd4, 0x42, 0x3a, 0x24, 0x3c, 0x9f, 0xb8, 0x3e, 0x61, 0x83, 0x5c, - 0x74, 0x53, 0xdb, 0x8a, 0x1b, 0xc3, 0x36, 0x7a, 0x1e, 0x52, 0x84, 0x9a, 0x67, 0x96, 0x6d, 0xb7, - 0xac, 0xf6, 0x79, 0x2e, 0xb6, 0xa9, 0x6d, 0x25, 0x0c, 0x20, 0x74, 0x5f, 0x49, 0xd0, 0x73, 0x00, - 0x3d, 0xdb, 0xc4, 0x8e, 0xd5, 0xb2, 0x71, 0x27, 0x17, 0x17, 0xfd, 0xc9, 0x9e, 0x5d, 0x91, 0x02, - 0x54, 0x80, 0x0d, 0xe2, 0x78, 0x7d, 0x66, 0xaa, 0x8c, 0x08, 0xff, 0x68, 0x6e, 0x65, 0x33, 0xba, - 0x95, 0x34, 0xd6, 0x45, 0xd7, 0xae, 0xec, 0xe1, 0x5e, 0x52, 0x74, 0x13, 0x96, 0xf1, 0x05, 0x76, - 0x18, 0xcd, 0x25, 0xc4, 0x10, 0xd5, 0x42, 0x16, 0x64, 0x99, 0x6f, 0x11, 0x9e, 0x3a, 0xd3, 0x7b, - 0xe8, 0x5b, 0x14, 0xd3, 0x5c, 0x72, 0x33, 0xba, 0x95, 0x2a, 0xbd, 0x35, 0x6f, 0x04, 0x9a, 0x6a, - 0x7e, 0x43, 0x4c, 0x37, 0xd6, 0xd8, 0x58, 0x5b, 0xa8, 0xb6, 0xda, 0x7c, 0x45, 0xe4, 0x40, 0x84, - 0x5f, 0xb5, 0xd0, 0x3d, 0x58, 0x73, 0xfb, 0x2c, 0xe4, 0x03, 0xcd, 0xa5, 0x84, 0xe6, 0xad, 0x99, - 0x9a, 0x95, 0x6b, 0x46, 0x46, 0x02, 0xa8, 0x26, 0x45, 0x2f, 0x41, 0xc6, 0xc7, 0x14, 0x87, 0x10, - 0xd3, 0x22, 0x70, 0xab, 0x42, 0x3a, 0x1c, 0x76, 0x0f, 0xc0, 0xb3, 0x7c, 0xab, 0x87, 0x19, 0xf6, - 0x69, 0x6e, 0x55, 0x28, 0x7d, 0x7d, 0x5e, 0x77, 0x1b, 0xc1, 0x4c, 0x23, 0x04, 0x82, 0x0e, 0x21, - 0xd1, 0xc3, 0x94, 0x5a, 0x5d, 0x4c, 0x73, 0x19, 0x01, 0x58, 0x9c, 0x17, 0xf0, 0x48, 0xce, 0x33, - 0x86, 0x00, 0xe8, 0x02, 0xf4, 0x0e, 0x3e, 0xb3, 0xfa, 0x36, 0x33, 0x7d, 0x4c, 0x3d, 0xd7, 0xa1, - 0xd8, 0xf4, 0x6c, 0x8b, 0x9d, 0xb9, 0x7e, 0x8f, 0xe6, 0xd6, 0x36, 0xa3, 0x5b, 0x99, 0xd2, 0x3b, - 0x0b, 0xc2, 0x17, 0x1a, 0x0a, 0xc0, 0xc8, 0x29, 0x6c, 0x43, 0x41, 0x07, 0x1d, 0x14, 0xbd, 0x0b, - 0xb7, 0x7c, 0xd7, 0x65, 0xe6, 0x99, 0x6b, 0xdb, 0xee, 0xa3, 0xbe, 0x67, 0x4a, 0x6e, 0x91, 0x7b, - 0x27, 0x2b, 0x92, 0x77, 0x93, 0x0f, 0xd8, 0x57, 0xfd, 0x52, 0x83, 0xd8, 0x46, 0xef, 0xc1, 0x33, - 0x9e, 0xe5, 0xf3, 0xc1, 0x53, 0x27, 0xaf, 0x8b, 0xc9, 0x39, 0x39, 0x64, 0xca, 0x74, 0x1b, 0x6e, - 0x5c, 0x9e, 0x47, 0x9c, 0x33, 0x37, 0x87, 0x44, 0x28, 0x6f, 0xcf, 0xeb, 0xeb, 0x38, 0x72, 0xd5, - 0x39, 0x73, 0x0d, 0x74, 0x36, 0x21, 0xd3, 0x7f, 0x19, 0x85, 0xcc, 0xf8, 0xaa, 0x9d, 0xca, 0x1e, - 0x75, 0x88, 0xb1, 0x81, 0x27, 0x59, 0x23, 0x53, 0xba, 0x73, 0xb5, 0xfd, 0x50, 0x68, 0x0e, 0x3c, - 0x6c, 0x08, 0x20, 0x74, 0x0f, 0xe2, 0x9e, 0xe5, 0x33, 0x9a, 0x8b, 0x0a, 0xb7, 0xae, 0x8a, 0xd8, - 0xb0, 0x7c, 0x66, 0x48, 0x24, 0xb4, 0x0d, 0xeb, 0x8c, 0xf4, 0x30, 0x35, 0xad, 0x4e, 0x07, 0x77, - 0xcc, 0xb6, 0xdb, 0x77, 0x98, 0x60, 0x93, 0xb8, 0xb1, 0x26, 0x3a, 0xca, 0x5c, 0xbe, 0xcb, 0xc5, - 0x3a, 0x83, 0x18, 0x9f, 0xca, 0x7d, 0xe5, 0xfb, 0x20, 0xf0, 0x95, 0x7f, 0x73, 0x3e, 0xc2, 0x0e, - 0x23, 0x6c, 0x60, 0x0e, 0x5d, 0x4e, 0x1a, 0x20, 0x45, 0xdc, 0x03, 0x74, 0x03, 0xe2, 0x96, 0x4d, - 0x2c, 0x2a, 0x98, 0x2c, 0x69, 0xc8, 0x06, 0x27, 0xd8, 0x3e, 0xc5, 0xbe, 0xd9, 0xc1, 0x67, 0xc4, - 0xc1, 0x1d, 0xc5, 0x63, 0x29, 0x2e, 0xdb, 0x93, 0xa2, 0xfc, 0xdb, 0x10, 0x53, 0x00, 0xd9, 0xe6, - 0x83, 0x46, 0xc5, 0x3c, 0x3d, 0x3e, 0x69, 0x54, 0x76, 0xab, 0xfb, 0xd5, 0xca, 0x5e, 0x76, 0x09, - 0xa5, 0x60, 0xa5, 0xf2, 0xb5, 0xf2, 0x51, 0xa3, 0x56, 0xc9, 0x6a, 0x28, 0x0d, 0x89, 0x66, 0xe5, - 0xa8, 0x51, 0x2b, 0x37, 0x2b, 0xd9, 0x88, 0xfe, 0xc3, 0x08, 0x24, 0x87, 0x9b, 0xed, 0xaa, 0xf4, - 0x7e, 0x03, 0xe2, 0x17, 0x96, 0xdd, 0xc7, 0x81, 0xd9, 0xa2, 0x81, 0x5e, 0x84, 0xd5, 0x60, 0x83, - 0xc9, 0xde, 0x98, 0xe8, 0x4d, 0x2b, 0xe1, 0x7d, 0x31, 0xe8, 0x6d, 0xc8, 0x85, 0x42, 0x62, 0x8e, - 0x69, 0x8a, 0x8b, 0xf1, 0x4f, 0x8d, 0xe2, 0xb3, 0x17, 0xd2, 0xf9, 0x2c, 0x24, 0x7b, 0x96, 0xd3, - 0xb1, 0x98, 0xeb, 0x0f, 0x44, 0x39, 0xe1, 0xcc, 0x1d, 0x08, 0x50, 0x0e, 0x56, 0x3c, 0xdf, 0xed, - 0x79, 0x2c, 0x60, 0xeb, 0xa0, 0x89, 0x9e, 0x86, 0x15, 0x42, 0x4d, 0x9b, 0x50, 0x96, 0x4b, 0x88, - 0x59, 0xcb, 0x84, 0xd6, 0x08, 0x65, 0xfa, 0x4f, 0x74, 0x58, 0x51, 0xdb, 0x18, 0x7d, 0x35, 0x94, - 0xbc, 0xd4, 0xfc, 0x65, 0x2a, 0x60, 0x81, 0x26, 0xfe, 0x84, 0x1d, 0x2c, 0xa9, 0xa4, 0x1f, 0x41, - 0x9c, 0xf4, 0xac, 0xae, 0x0c, 0x5c, 0xaa, 0xf4, 0x7f, 0x8b, 0x82, 0x55, 0xf9, 0xe4, 0x83, 0x25, - 0x43, 0xa2, 0xa0, 0x36, 0xac, 0x7e, 0xdc, 0x27, 0xed, 0x73, 0xd3, 0xc7, 0x9e, 0x4d, 0xb0, 0x5c, - 0x2a, 0xa9, 0xd2, 0xff, 0x2f, 0x0a, 0x7b, 0x8f, 0x83, 0x18, 0x12, 0xe3, 0x60, 0xc9, 0x48, 0x7f, - 0x1c, 0x6a, 0x73, 0xff, 0xdb, 0x96, 0x2f, 0x57, 0xda, 0x15, 0xfc, 0xdf, 0xb5, 0xfc, 0x0e, 0xf7, - 0x9f, 0x63, 0xa0, 0x37, 0x60, 0xc5, 0xb3, 0x06, 0xb6, 0x6b, 0xc9, 0x02, 0x9b, 0x2a, 0x3d, 0x1d, - 0xc0, 0x05, 0xa7, 0x8d, 0xc2, 0x89, 0x38, 0x6d, 0x1c, 0x2c, 0x19, 0xc1, 0x48, 0x64, 0x43, 0x96, - 0x92, 0x9e, 0x67, 0xe3, 0x21, 0x37, 0xf3, 0x44, 0xf2, 0xd9, 0x1f, 0x2c, 0x6a, 0xcc, 0x89, 0xc0, - 0x09, 0x78, 0x98, 0xfb, 0xba, 0x46, 0xc7, 0x45, 0xe8, 0x23, 0x80, 0x96, 0x45, 0x49, 0xdb, 0x14, - 0x4e, 0x27, 0x84, 0x9e, 0x77, 0x17, 0xd5, 0xb3, 0xc3, 0x11, 0x94, 0xe7, 0xc9, 0x56, 0xd0, 0x40, - 0x26, 0xa4, 0x68, 0xbf, 0xdb, 0xc5, 0x54, 0x1c, 0xd7, 0x72, 0x49, 0x01, 0x7e, 0x67, 0x61, 0x27, - 0x46, 0x10, 0x07, 0x4b, 0x46, 0x18, 0x11, 0x51, 0xd8, 0xb0, 0x89, 0x73, 0x6e, 0xba, 0x7d, 0x66, - 0x8e, 0xe4, 0xe2, 0x18, 0x90, 0x2a, 0x95, 0x17, 0x55, 0x54, 0x23, 0xce, 0x79, 0xbd, 0xcf, 0x46, - 0xfa, 0x0e, 0x96, 0x8c, 0x75, 0xfb, 0xb2, 0x10, 0x7d, 0x03, 0x52, 0x7c, 0x0b, 0x99, 0x14, 0xdb, - 0xb8, 0xcd, 0x72, 0x29, 0xa1, 0xec, 0xf6, 0xe2, 0xca, 0x28, 0x3b, 0x11, 0x08, 0x07, 0x4b, 0x06, - 0xd8, 0xc3, 0x16, 0x22, 0xb0, 0xd6, 0xb6, 0x7c, 0xb7, 0x4f, 0xb1, 0x1d, 0xa8, 0x48, 0x0b, 0x15, - 0xef, 0x5f, 0x61, 0x29, 0x0a, 0x98, 0xa1, 0x9a, 0x4c, 0x7b, 0x4c, 0x82, 0x9a, 0x90, 0x08, 0xaa, - 0xbe, 0x3a, 0x95, 0x5e, 0xbd, 0xe8, 0x0f, 0x91, 0x74, 0x1d, 0x62, 0x9c, 0x04, 0x42, 0x55, 0x20, - 0x1a, 0x54, 0x01, 0xfd, 0x04, 0xe2, 0x62, 0x4f, 0xa3, 0x67, 0x20, 0x29, 0xf6, 0xb4, 0xd9, 0xf7, - 0x89, 0xa2, 0xdc, 0x84, 0x10, 0x9c, 0xfa, 0x04, 0xbd, 0x06, 0xc8, 0x6a, 0xb7, 0x31, 0xa5, 0xa4, - 0x45, 0x6c, 0xc1, 0x8f, 0x1c, 0x47, 0x92, 0xef, 0xfa, 0x58, 0x0f, 0x57, 0xa4, 0x57, 0x21, 0x1d, - 0xde, 0xd1, 0x9c, 0x92, 0x19, 0x61, 0x76, 0x40, 0xe5, 0xb2, 0xc1, 0x29, 0x79, 0x9c, 0x3c, 0x22, - 0xc2, 0xae, 0xb1, 0xcd, 0xaf, 0xff, 0x45, 0x83, 0x98, 0x58, 0xba, 0xd3, 0x31, 0x74, 0x48, 0xd0, - 0x7e, 0x4b, 0x76, 0x48, 0x73, 0x86, 0xed, 0x71, 0x8f, 0xa2, 0x97, 0x3c, 0x3a, 0x85, 0x95, 0x56, - 0x9f, 0x31, 0xbe, 0x0b, 0x62, 0x8b, 0x95, 0xe6, 0x30, 0xaf, 0x14, 0x76, 0x04, 0x86, 0x11, 0x60, - 0xe9, 0xef, 0xc0, 0xb2, 0x14, 0x4d, 0x2d, 0xb9, 0xfc, 0x7a, 0xe0, 0x52, 0x26, 0xce, 0xff, 0xca, - 0xda, 0xa0, 0xad, 0xf7, 0x20, 0x33, 0x4e, 0x0e, 0xe8, 0xbf, 0x20, 0x23, 0xce, 0xf9, 0xcc, 0x35, - 0xa9, 0x87, 0x71, 0xfb, 0xa1, 0xc2, 0x4a, 0x73, 0x69, 0xd3, 0x3d, 0x11, 0x32, 0xae, 0x87, 0xd2, - 0x9e, 0xad, 0xf0, 0xc4, 0x77, 0xb8, 0x4a, 0x0a, 0x1b, 0xa2, 0x63, 0x55, 0x52, 0xa4, 0xe8, 0x3b, - 0xb0, 0x76, 0x89, 0x8b, 0x10, 0x99, 0x42, 0x73, 0x9a, 0x88, 0xcd, 0xfb, 0xd7, 0xa3, 0xb9, 0x09, - 0x8e, 0xd3, 0x7f, 0x17, 0x85, 0xe4, 0x90, 0xa2, 0xae, 0x90, 0xda, 0x97, 0x20, 0xc3, 0x57, 0xb6, - 0xc5, 0x18, 0xee, 0x84, 0x5d, 0x5c, 0x1d, 0x4a, 0xc5, 0x82, 0x3f, 0x0c, 0xaa, 0x5d, 0xec, 0x1a, - 0xd5, 0x2e, 0xa8, 0x75, 0x1f, 0x8d, 0x56, 0x4c, 0x5c, 0x44, 0xe5, 0x2b, 0x57, 0x26, 0xe5, 0x89, - 0x65, 0xf3, 0x5b, 0x6d, 0xb8, 0x6e, 0xa6, 0x07, 0xe3, 0x02, 0xd6, 0x5c, 0x0f, 0x3b, 0x7c, 0x29, - 0x9b, 0xea, 0x6a, 0x25, 0x2b, 0xf8, 0xf1, 0x75, 0x8d, 0x28, 0xd4, 0x3d, 0xec, 0x9c, 0xfa, 0xa4, - 0x2c, 0x50, 0x8d, 0x55, 0x37, 0xdc, 0xd4, 0x5f, 0x80, 0xd5, 0xb1, 0x7e, 0x94, 0x85, 0xe8, 0x88, - 0x20, 0xf8, 0xa7, 0x9e, 0x07, 0x08, 0x71, 0xf1, 0x54, 0xf3, 0xf5, 0x73, 0x48, 0x85, 0x8a, 0x06, - 0xfa, 0xfa, 0x78, 0x19, 0xd2, 0x16, 0x3b, 0xf2, 0x4f, 0x96, 0xa1, 0xb1, 0x1a, 0xa4, 0x37, 0x60, - 0x7d, 0xa2, 0x70, 0xa0, 0x57, 0x20, 0xdb, 0xe1, 0x9f, 0x8e, 0x78, 0xa9, 0x30, 0x43, 0x07, 0xcb, - 0xb5, 0x90, 0x5c, 0x1c, 0xe6, 0x94, 0x8b, 0x91, 0x91, 0x8b, 0x5f, 0x46, 0x00, 0x46, 0xe5, 0xe1, - 0x31, 0x29, 0x3a, 0x85, 0x38, 0x61, 0xb8, 0x27, 0x69, 0xec, 0x0a, 0x47, 0x83, 0x91, 0x82, 0x42, - 0x95, 0xe1, 0x9e, 0x21, 0xd1, 0xf4, 0x3f, 0x6a, 0x10, 0xe3, 0x6d, 0x64, 0x40, 0x4c, 0x5c, 0x90, - 0xb4, 0xab, 0xd5, 0x1e, 0x09, 0xcd, 0x91, 0xc4, 0x25, 0x49, 0x60, 0x8d, 0x3c, 0x89, 0x84, 0x3d, - 0xd9, 0x84, 0x54, 0x07, 0xd3, 0xb6, 0x4f, 0x3c, 0xb1, 0xd0, 0x02, 0xf6, 0x18, 0x89, 0x9e, 0xe8, - 0xc6, 0xd2, 0x7f, 0x1f, 0x81, 0xcc, 0x78, 0x65, 0x44, 0x0f, 0x82, 0x58, 0xca, 0xa5, 0xb1, 0x7b, - 0xbd, 0x42, 0xfb, 0x1f, 0x16, 0xcf, 0xf7, 0x21, 0x33, 0x6e, 0x1c, 0x5f, 0xd1, 0xe7, 0x78, 0x10, - 0x6c, 0xda, 0x73, 0x3c, 0x10, 0xe4, 0x3a, 0x70, 0x5c, 0x67, 0xd0, 0x0b, 0xca, 0xee, 0xb0, 0x9d, - 0xff, 0x91, 0x06, 0x89, 0xe0, 0x14, 0x81, 0x72, 0x70, 0x83, 0xdf, 0xce, 0xf6, 0xeb, 0xc6, 0xd1, - 0xa5, 0x7b, 0x5c, 0x1a, 0x12, 0xfb, 0xe5, 0xdd, 0xca, 0x4e, 0xbd, 0x7e, 0x98, 0xd5, 0x50, 0x12, - 0xe2, 0x27, 0xb5, 0xf2, 0xee, 0x61, 0x36, 0x22, 0xef, 0x74, 0xb5, 0xca, 0x5d, 0xa3, 0x7c, 0x94, - 0x8d, 0xa2, 0x15, 0x88, 0x1e, 0x56, 0x0f, 0xb3, 0x31, 0x31, 0xe2, 0xf0, 0x41, 0xa3, 0x92, 0x8d, - 0xa3, 0x04, 0xc4, 0x6a, 0xd5, 0xe3, 0x4a, 0x76, 0x99, 0x0b, 0xef, 0x57, 0x77, 0x2a, 0x46, 0x76, - 0x05, 0x3d, 0x05, 0xeb, 0xe5, 0xdd, 0x66, 0xb5, 0x7e, 0x7c, 0x62, 0xd6, 0x8f, 0xcd, 0xbb, 0xf5, - 0xfa, 0xdd, 0x5a, 0x25, 0x9b, 0xd8, 0x49, 0xc2, 0x8a, 0x7a, 0x25, 0xd1, 0xbf, 0xaf, 0x01, 0x9a, - 0xbc, 0xef, 0xa3, 0xff, 0x9d, 0x7c, 0x49, 0x08, 0x6d, 0xef, 0x4b, 0xaf, 0x01, 0xf3, 0x3c, 0x5d, - 0x44, 0xfe, 0xf9, 0xd3, 0x45, 0x9e, 0x41, 0x3a, 0xfc, 0x06, 0x88, 0x9e, 0x83, 0x5b, 0x1f, 0x56, - 0x76, 0x0e, 0xea, 0xf5, 0x43, 0xf3, 0xa4, 0x59, 0x6e, 0x5e, 0xbe, 0xf0, 0xde, 0x82, 0xa7, 0xc6, - 0xbb, 0x2b, 0xc7, 0xe5, 0x9d, 0x5a, 0x65, 0x2f, 0xab, 0xa1, 0x6d, 0x78, 0x79, 0x6a, 0x97, 0xb9, - 0x5f, 0x37, 0xcc, 0x93, 0x5a, 0xbd, 0x69, 0xee, 0x57, 0x6b, 0xb5, 0xea, 0xf1, 0xdd, 0x6c, 0x24, - 0xff, 0xa5, 0x06, 0x88, 0x73, 0x84, 0x34, 0x84, 0x1a, 0xf8, 0xe3, 0x3e, 0xa6, 0x0c, 0xdd, 0x84, - 0x65, 0x69, 0xa8, 0xf2, 0x57, 0xb5, 0xf8, 0xe9, 0xca, 0xb6, 0x9c, 0x6e, 0x9f, 0x1f, 0x80, 0xda, - 0x6e, 0x27, 0xf0, 0x2a, 0x1d, 0x08, 0x77, 0xdd, 0x0e, 0x46, 0x35, 0x48, 0x29, 0xc7, 0x2f, 0x08, - 0x7e, 0x24, 0x56, 0x66, 0xa6, 0xf4, 0x3f, 0x73, 0xae, 0xbe, 0xfb, 0x04, 0x3f, 0x32, 0x80, 0x0c, - 0xbf, 0xf9, 0x81, 0xcb, 0xe3, 0xea, 0x28, 0xf9, 0x14, 0xab, 0x17, 0x89, 0x04, 0x17, 0x9c, 0x90, - 0x4f, 0x79, 0x90, 0x40, 0x74, 0x32, 0xf7, 0x1c, 0x3b, 0xea, 0x36, 0x2d, 0x86, 0x37, 0xb9, 0x20, - 0xff, 0x5d, 0x0d, 0x36, 0xc6, 0xbc, 0x53, 0x87, 0xa0, 0x32, 0xac, 0x48, 0x0d, 0x01, 0x17, 0xfc, - 0xf7, 0x9c, 0xd6, 0x19, 0xc1, 0x3c, 0xf4, 0x32, 0xac, 0x39, 0xfc, 0x1c, 0x15, 0x52, 0x2f, 0x63, - 0xb1, 0xca, 0xc5, 0x8d, 0xa1, 0x09, 0x3f, 0xd3, 0x20, 0x7b, 0x17, 0x2b, 0x0b, 0x82, 0xf0, 0x4e, - 0x7b, 0x84, 0xf8, 0xf7, 0x87, 0x36, 0xff, 0x27, 0x0d, 0x36, 0x76, 0x7d, 0x6c, 0x31, 0x3c, 0x6e, - 0xde, 0xe3, 0xb2, 0xff, 0x01, 0x2c, 0xcb, 0xd9, 0xea, 0x98, 0x30, 0x77, 0xd4, 0xd4, 0xb4, 0x49, - 0x1f, 0xa3, 0xb3, 0x7d, 0x8c, 0x5d, 0xcf, 0xc7, 0x1f, 0x44, 0x60, 0xe3, 0xd4, 0xeb, 0x4c, 0xf8, - 0x38, 0xf2, 0x45, 0x7b, 0x42, 0xbe, 0x4c, 0xcb, 0xd7, 0x1d, 0x48, 0xf5, 0x85, 0x72, 0xf1, 0x53, - 0x84, 0x7a, 0xc8, 0xd0, 0x27, 0x5e, 0x07, 0xf6, 0x09, 0xb6, 0x3b, 0x47, 0x16, 0x3d, 0x37, 0x40, - 0x0e, 0xe7, 0xdf, 0x4f, 0x38, 0x10, 0xaf, 0xc0, 0xc6, 0x1e, 0xb6, 0xf1, 0xe5, 0x38, 0x4c, 0x59, - 0x8a, 0xf9, 0xbf, 0x47, 0xe0, 0xd6, 0x8e, 0xc5, 0xda, 0x0f, 0xc3, 0x81, 0x9b, 0xc9, 0x0d, 0xdb, - 0x90, 0x55, 0xe6, 0xb6, 0xf8, 0x5c, 0x73, 0x78, 0xdc, 0xe1, 0x57, 0x52, 0xd9, 0x23, 0x41, 0x7d, - 0x82, 0xbe, 0x09, 0x1b, 0x63, 0x63, 0x89, 0x63, 0x13, 0x07, 0xab, 0xf8, 0xbc, 0x3a, 0xa7, 0x8b, - 0x02, 0x8d, 0x5f, 0xde, 0x43, 0xe0, 0x55, 0x01, 0x34, 0x99, 0x9c, 0xd8, 0xec, 0xe4, 0xc4, 0xaf, - 0x93, 0x9c, 0xe5, 0x6b, 0x25, 0x67, 0x27, 0x03, 0xe9, 0x70, 0x3c, 0xf2, 0x26, 0xe8, 0xd3, 0x12, - 0xf0, 0xc4, 0xe8, 0x2b, 0x7f, 0xa1, 0x32, 0x1c, 0x5e, 0x12, 0x33, 0x33, 0x1c, 0xd2, 0x1b, 0xb9, - 0xa2, 0xde, 0x06, 0xa4, 0x42, 0xc9, 0x7b, 0x02, 0x9e, 0x6c, 0x7f, 0x00, 0x50, 0x0d, 0x57, 0x8b, - 0xa7, 0xab, 0xc7, 0xcd, 0xca, 0x71, 0xd3, 0xbc, 0x5f, 0xad, 0x7c, 0x78, 0xa9, 0x66, 0xde, 0x80, - 0x6c, 0xb8, 0x73, 0xff, 0xb4, 0x56, 0xcb, 0x6a, 0xa5, 0x2f, 0x92, 0xb0, 0xa2, 0x02, 0x80, 0x7e, - 0xad, 0x41, 0x2a, 0x54, 0x30, 0xd0, 0x1b, 0x33, 0xcd, 0x99, 0x2c, 0x9e, 0xfa, 0x9b, 0x8b, 0x4d, - 0x92, 0x49, 0xcd, 0x97, 0xbe, 0xf7, 0x87, 0x3f, 0xff, 0x34, 0xf2, 0x2a, 0xda, 0x1e, 0xfe, 0x48, - 0xfa, 0x6d, 0x19, 0xf6, 0xf7, 0x3c, 0xdf, 0xfd, 0x16, 0x6e, 0x33, 0x5a, 0xdc, 0x2e, 0x5a, 0x5d, - 0xec, 0xb0, 0xcf, 0x8a, 0x41, 0x11, 0xfa, 0xb9, 0x06, 0xc9, 0x61, 0x71, 0x41, 0xb3, 0x7f, 0x7a, - 0xba, 0x5c, 0x88, 0xf4, 0x79, 0xc3, 0x3d, 0xcd, 0x3a, 0x4e, 0x15, 0x13, 0xb6, 0x05, 0xa6, 0x15, - 0xb7, 0x3f, 0x43, 0x9f, 0x6b, 0x90, 0x0e, 0x97, 0x17, 0x34, 0x3b, 0x30, 0x53, 0xaa, 0xd1, 0xfc, - 0x36, 0xde, 0x16, 0x36, 0xbe, 0x99, 0x5f, 0x20, 0x82, 0xb7, 0x03, 0x36, 0xff, 0x8d, 0x06, 0xe9, - 0xf0, 0x66, 0x9b, 0xc3, 0xd6, 0x29, 0x55, 0x65, 0x7e, 0x5b, 0xcb, 0xc2, 0xd6, 0x3b, 0xa5, 0xd7, - 0x47, 0xb6, 0xaa, 0x1f, 0xe0, 0x67, 0x85, 0x75, 0x68, 0xf2, 0x8f, 0x35, 0x48, 0x87, 0xb7, 0xef, - 0x1c, 0x26, 0x4f, 0x29, 0x00, 0xfa, 0xcd, 0x09, 0xc2, 0xab, 0xf4, 0x3c, 0x36, 0x08, 0x32, 0xbe, - 0xbd, 0x48, 0xc6, 0xbf, 0xd0, 0x00, 0x4d, 0xf2, 0x16, 0x9a, 0x7d, 0x09, 0x7f, 0x6c, 0xb5, 0xd1, - 0x9f, 0x0b, 0xe6, 0x86, 0xfe, 0x1c, 0x50, 0xa8, 0x07, 0x7f, 0x0e, 0x08, 0xe2, 0x98, 0x7f, 0x6b, - 0x81, 0x9c, 0xb7, 0x46, 0xca, 0x6e, 0x6b, 0xdb, 0x23, 0xa3, 0xc7, 0xb8, 0x70, 0x5e, 0xa3, 0xa7, - 0x11, 0xe8, 0xbf, 0xcc, 0x68, 0xa9, 0xec, 0xb6, 0xb6, 0xbd, 0xf3, 0xb9, 0x06, 0x2f, 0xb6, 0xdd, - 0xde, 0x2c, 0x1b, 0x77, 0x14, 0xdb, 0x36, 0x78, 0x6e, 0x1b, 0xda, 0x47, 0x55, 0x35, 0xbe, 0xeb, - 0xf2, 0x4a, 0x58, 0x70, 0xfd, 0x6e, 0xb1, 0x8b, 0x1d, 0x91, 0xf9, 0xa2, 0xec, 0xb2, 0x3c, 0x42, - 0x1f, 0xfb, 0xbf, 0x8d, 0x3b, 0x23, 0xd1, 0xdf, 0x34, 0xed, 0x17, 0x91, 0xc8, 0xde, 0xfe, 0xaf, - 0x22, 0xcf, 0xdf, 0x95, 0x98, 0xbb, 0xc2, 0x86, 0xbd, 0x91, 0x0d, 0xf7, 0xe5, 0xa4, 0xd6, 0xb2, - 0xc0, 0x7f, 0xe3, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x83, 0x8c, 0xb5, 0x01, 0x83, 0x22, 0x00, - 0x00, + 0xcf, 0x5e, 0x1e, 0x41, 0x99, 0xdf, 0x6f, 0x2b, 0x0b, 0xf2, 0x3f, 0xdb, 0x86, 0xe5, 0xaa, 0x08, + 0x00, 0x42, 0x10, 0x73, 0xac, 0x1e, 0xce, 0x69, 0x9b, 0xda, 0x56, 0xd2, 0x10, 0xdf, 0xe8, 0x05, + 0x48, 0x77, 0x08, 0xf5, 0x6c, 0x6b, 0x60, 0x8a, 0xbe, 0x88, 0xe8, 0x4b, 0x29, 0xd9, 0x31, 0x1f, + 0xf2, 0x00, 0x56, 0x1f, 0xe1, 0xd6, 0x43, 0xd7, 0x3d, 0x37, 0x29, 0xb3, 0x18, 0xce, 0x2d, 0x6f, + 0x6a, 0x5b, 0x99, 0xd2, 0x9b, 0x85, 0x19, 0x91, 0x2c, 0x48, 0xb5, 0x85, 0x0f, 0xe5, 0xe4, 0x13, + 0x3e, 0xd7, 0x48, 0x3f, 0x0a, 0xb5, 0x90, 0x0e, 0x09, 0xcf, 0x27, 0xae, 0x4f, 0xd8, 0x20, 0x17, + 0xdd, 0xd4, 0xb6, 0xe2, 0xc6, 0xb0, 0x8d, 0x9e, 0x87, 0x14, 0xa1, 0xe6, 0x99, 0x65, 0xdb, 0x2d, + 0xab, 0x7d, 0x9e, 0x8b, 0x6d, 0x6a, 0x5b, 0x09, 0x03, 0x08, 0xdd, 0x57, 0x12, 0xf4, 0x1c, 0x40, + 0xcf, 0x36, 0xb1, 0x63, 0xb5, 0x6c, 0xdc, 0xc9, 0xc5, 0x45, 0x7f, 0xb2, 0x67, 0x57, 0xa4, 0x80, + 0xcf, 0xef, 0xd9, 0x66, 0x87, 0x50, 0xd9, 0xbf, 0x21, 0xe7, 0xf7, 0xec, 0x3d, 0x25, 0x41, 0x05, + 0xd8, 0x20, 0x8e, 0xd7, 0x67, 0xa6, 0x4a, 0x99, 0x08, 0x00, 0xcd, 0xad, 0x6c, 0x46, 0xb7, 0x92, + 0xc6, 0xba, 0xe8, 0xda, 0x95, 0x3d, 0x3c, 0x0c, 0x14, 0xdd, 0x84, 0x65, 0x7c, 0x81, 0x1d, 0x46, + 0x73, 0x09, 0x31, 0x44, 0xb5, 0x90, 0x05, 0x59, 0xe6, 0x5b, 0x84, 0xe7, 0xd6, 0xf4, 0x1e, 0xfa, + 0x16, 0xc5, 0x34, 0x97, 0xdc, 0x8c, 0x6e, 0xa5, 0x4a, 0x6f, 0xcd, 0x1b, 0xa2, 0xa6, 0x9a, 0xdf, + 0x10, 0xd3, 0x8d, 0x35, 0x36, 0xd6, 0x16, 0xaa, 0xad, 0x36, 0x5f, 0x32, 0x39, 0x10, 0xf9, 0x51, + 0x2d, 0x74, 0x0f, 0xd6, 0xdc, 0x3e, 0x0b, 0xf9, 0x40, 0x73, 0x29, 0xa1, 0x79, 0x6b, 0xa6, 0x66, + 0xe5, 0x9a, 0x91, 0x91, 0x00, 0xaa, 0x49, 0xd1, 0x4b, 0x90, 0xf1, 0x31, 0xc5, 0x21, 0xc4, 0xb4, + 0x88, 0xdc, 0xaa, 0x90, 0x0e, 0x87, 0xdd, 0x03, 0xf0, 0x2c, 0xdf, 0xea, 0x61, 0x86, 0x7d, 0x9a, + 0x5b, 0x15, 0x4a, 0x5f, 0x9f, 0xd7, 0xdd, 0x46, 0x30, 0xd3, 0x08, 0x81, 0xa0, 0x43, 0x48, 0xf4, + 0x30, 0xa5, 0x56, 0x17, 0xd3, 0x5c, 0x46, 0x00, 0x16, 0xe7, 0x05, 0x3c, 0x92, 0xf3, 0x8c, 0x21, + 0x00, 0xba, 0x00, 0xbd, 0x83, 0xcf, 0xac, 0xbe, 0xcd, 0x4c, 0x1f, 0x53, 0xcf, 0x75, 0x28, 0x36, + 0x3d, 0xdb, 0x62, 0x67, 0xae, 0xdf, 0xa3, 0xb9, 0xb5, 0xcd, 0xe8, 0x56, 0xa6, 0xf4, 0xce, 0x82, + 0xf0, 0x85, 0x86, 0x02, 0x30, 0x72, 0x0a, 0xdb, 0x50, 0xd0, 0x41, 0x07, 0x45, 0xef, 0xc2, 0x2d, + 0xdf, 0x75, 0x99, 0x79, 0xe6, 0xda, 0xb6, 0xfb, 0xa8, 0xef, 0x99, 0x92, 0x7c, 0xe4, 0xe6, 0xca, + 0x8a, 0xe4, 0xdd, 0xe4, 0x03, 0xf6, 0x55, 0xbf, 0xd4, 0x20, 0xf6, 0xd9, 0x7b, 0xf0, 0x8c, 0x67, + 0xf9, 0x7c, 0xf0, 0xd4, 0xc9, 0xeb, 0x62, 0x72, 0x4e, 0x0e, 0x99, 0x32, 0xdd, 0x86, 0x1b, 0x97, + 0xe7, 0x11, 0xe7, 0xcc, 0xcd, 0x21, 0x11, 0xca, 0xdb, 0xf3, 0xfa, 0x3a, 0x8e, 0x5c, 0x75, 0xce, + 0x5c, 0x03, 0x9d, 0x4d, 0xc8, 0xf4, 0x5f, 0x45, 0x21, 0x33, 0xbe, 0x6a, 0xa7, 0xd2, 0x4b, 0x1d, + 0x62, 0x6c, 0xe0, 0x49, 0x5a, 0xc9, 0x94, 0xee, 0x5c, 0x6d, 0x3f, 0x14, 0x9a, 0x03, 0x0f, 0x1b, + 0x02, 0x08, 0xdd, 0x83, 0xb8, 0x67, 0xf9, 0x8c, 0xe6, 0xa2, 0xc2, 0xad, 0xab, 0x22, 0x36, 0x2c, + 0x9f, 0x19, 0x12, 0x09, 0x6d, 0xc3, 0x3a, 0x23, 0x3d, 0x4c, 0x4d, 0xab, 0xd3, 0xc1, 0x1d, 0xb3, + 0xed, 0xf6, 0x1d, 0x26, 0xe8, 0x26, 0x6e, 0xac, 0x89, 0x8e, 0x32, 0x97, 0xef, 0x72, 0xb1, 0xce, + 0x20, 0xc6, 0xa7, 0x72, 0x5f, 0xf9, 0x3e, 0x08, 0x7c, 0xe5, 0xdf, 0x9c, 0x70, 0xb0, 0xc3, 0x08, + 0x1b, 0x98, 0x43, 0x97, 0x93, 0x06, 0x48, 0x11, 0xf7, 0x00, 0xdd, 0x80, 0xb8, 0x65, 0x13, 0x8b, + 0x0a, 0xaa, 0x4b, 0x1a, 0xb2, 0xc1, 0x19, 0xb8, 0x4f, 0xb1, 0x6f, 0x76, 0xf0, 0x19, 0x71, 0x70, + 0x47, 0x11, 0x5d, 0x8a, 0xcb, 0xf6, 0xa4, 0x28, 0xff, 0x36, 0xc4, 0x14, 0x40, 0xb6, 0xf9, 0xa0, + 0x51, 0x31, 0x4f, 0x8f, 0x4f, 0x1a, 0x95, 0xdd, 0xea, 0x7e, 0xb5, 0xb2, 0x97, 0x5d, 0x42, 0x29, + 0x58, 0xa9, 0x7c, 0xad, 0x7c, 0xd4, 0xa8, 0x55, 0xb2, 0x1a, 0x4a, 0x43, 0xa2, 0x59, 0x39, 0x6a, + 0xd4, 0xca, 0xcd, 0x4a, 0x36, 0xa2, 0xff, 0x30, 0x02, 0xc9, 0xe1, 0x66, 0xbb, 0x2a, 0xff, 0xdf, + 0x80, 0xf8, 0x85, 0x65, 0xf7, 0x71, 0x60, 0xb6, 0x68, 0xa0, 0x17, 0x61, 0x35, 0xd8, 0x60, 0xb2, + 0x37, 0x26, 0x7a, 0xd3, 0x4a, 0x78, 0x5f, 0x0c, 0x7a, 0x1b, 0x72, 0xa1, 0x90, 0x98, 0x63, 0x9a, + 0xe2, 0x62, 0xfc, 0x53, 0xa3, 0xf8, 0xec, 0x85, 0x74, 0x3e, 0x0b, 0xc9, 0x9e, 0xe5, 0x74, 0x2c, + 0xe6, 0xfa, 0x03, 0x51, 0x6f, 0x38, 0xb5, 0x07, 0x02, 0x94, 0x83, 0x15, 0xcf, 0x77, 0x7b, 0x1e, + 0x0b, 0xd8, 0x3a, 0x68, 0xa2, 0xa7, 0x61, 0x85, 0x50, 0xd3, 0x26, 0x94, 0xe5, 0x12, 0x62, 0xd6, + 0x32, 0xa1, 0x35, 0x42, 0x99, 0xfe, 0x13, 0x1d, 0x56, 0xd4, 0x36, 0x46, 0x5f, 0x0d, 0x25, 0x2f, + 0x35, 0x7f, 0x1d, 0x0b, 0x58, 0xa0, 0x89, 0x3f, 0x61, 0x07, 0x4b, 0x2a, 0xe9, 0x47, 0x10, 0x27, + 0x3d, 0xab, 0x2b, 0x03, 0x97, 0x2a, 0xfd, 0xdf, 0xa2, 0x60, 0x55, 0x3e, 0xf9, 0x60, 0xc9, 0x90, + 0x28, 0xa8, 0x0d, 0xab, 0x1f, 0xf7, 0x49, 0xfb, 0xdc, 0xf4, 0xb1, 0x67, 0x13, 0x2c, 0x97, 0x4a, + 0xaa, 0xf4, 0xff, 0x8b, 0xc2, 0xde, 0xe3, 0x20, 0x86, 0xc4, 0x38, 0x58, 0x32, 0xd2, 0x1f, 0x87, + 0xda, 0xdc, 0xff, 0xb6, 0xe5, 0xcb, 0x95, 0x76, 0x05, 0xff, 0x77, 0x2d, 0xbf, 0xc3, 0xfd, 0xe7, + 0x18, 0xe8, 0x0d, 0x58, 0xf1, 0xac, 0x81, 0xed, 0x5a, 0xb2, 0x02, 0xa7, 0x4a, 0x4f, 0x07, 0x70, + 0xc1, 0x71, 0xa4, 0x70, 0x22, 0x8e, 0x23, 0x07, 0x4b, 0x46, 0x30, 0x12, 0xd9, 0x90, 0xa5, 0xa4, + 0xe7, 0xd9, 0x78, 0xc8, 0xcd, 0x3c, 0x91, 0x7c, 0xf6, 0x07, 0x8b, 0x1a, 0x73, 0x22, 0x70, 0x02, + 0x1e, 0xe6, 0xbe, 0xae, 0xd1, 0x71, 0x11, 0xfa, 0x08, 0xa0, 0x65, 0x51, 0xd2, 0x36, 0x85, 0xd3, + 0x09, 0xa1, 0xe7, 0xdd, 0x45, 0xf5, 0xec, 0x70, 0x04, 0xe5, 0x79, 0xb2, 0x15, 0x34, 0x90, 0x09, + 0x29, 0xda, 0xef, 0x76, 0x31, 0x15, 0xe7, 0xb9, 0x5c, 0x52, 0x80, 0xdf, 0x59, 0xd8, 0x89, 0x11, + 0xc4, 0xc1, 0x92, 0x11, 0x46, 0x44, 0x14, 0x36, 0x6c, 0xe2, 0x9c, 0x9b, 0x6e, 0x9f, 0x99, 0x23, + 0xb9, 0x38, 0x06, 0xa4, 0x4a, 0xe5, 0x45, 0x15, 0xd5, 0x88, 0x73, 0x5e, 0xef, 0xb3, 0x91, 0xbe, + 0x83, 0x25, 0x63, 0xdd, 0xbe, 0x2c, 0x44, 0xdf, 0x80, 0x14, 0xdf, 0x42, 0x26, 0xc5, 0x36, 0x6e, + 0xb3, 0x5c, 0x4a, 0x28, 0xbb, 0xbd, 0xb8, 0x32, 0xca, 0x4e, 0x04, 0xc2, 0xc1, 0x92, 0x01, 0xf6, + 0xb0, 0x85, 0x08, 0xac, 0xb5, 0x2d, 0xdf, 0xed, 0x53, 0x6c, 0x07, 0x2a, 0xd2, 0x42, 0xc5, 0xfb, + 0x57, 0x58, 0x8a, 0x02, 0x66, 0xa8, 0x26, 0xd3, 0x1e, 0x93, 0xa0, 0x26, 0x24, 0x82, 0xaa, 0xaf, + 0x8e, 0xad, 0x57, 0x2f, 0xfa, 0x43, 0x24, 0x5d, 0x87, 0x18, 0x27, 0x81, 0x50, 0x15, 0x88, 0x06, + 0x55, 0x40, 0x3f, 0x81, 0xb8, 0xd8, 0xd3, 0xe8, 0x19, 0x48, 0x8a, 0x3d, 0x6d, 0xf6, 0x7d, 0xa2, + 0x28, 0x37, 0x21, 0x04, 0xa7, 0x3e, 0x41, 0xaf, 0x01, 0xb2, 0xda, 0x6d, 0x4c, 0x29, 0x69, 0x11, + 0x5b, 0xf0, 0x23, 0xc7, 0x91, 0xe4, 0xbb, 0x3e, 0xd6, 0xc3, 0x15, 0xe9, 0x55, 0x48, 0x87, 0x77, + 0x34, 0xa7, 0x64, 0x46, 0x98, 0x1d, 0x50, 0xb9, 0x6c, 0x70, 0x4a, 0x1e, 0x27, 0x8f, 0x88, 0xb0, + 0x6b, 0x6c, 0xf3, 0xeb, 0x7f, 0xd5, 0x20, 0x26, 0x96, 0xee, 0x74, 0x0c, 0x1d, 0x12, 0xb4, 0xdf, + 0x92, 0x1d, 0xd2, 0x9c, 0x61, 0x7b, 0xdc, 0xa3, 0xe8, 0x25, 0x8f, 0x4e, 0x61, 0xa5, 0xd5, 0x67, + 0x8c, 0xef, 0x82, 0xd8, 0x62, 0xa5, 0x39, 0xcc, 0x2b, 0x85, 0x1d, 0x81, 0x61, 0x04, 0x58, 0xfa, + 0x3b, 0xb0, 0x2c, 0x45, 0x53, 0x4b, 0x2e, 0xbf, 0x3f, 0xb8, 0x94, 0x89, 0x0b, 0x82, 0xb2, 0x36, + 0x68, 0xeb, 0x3d, 0xc8, 0x8c, 0x93, 0x03, 0xfa, 0x2f, 0xc8, 0x88, 0x73, 0x3e, 0x73, 0x4d, 0xea, + 0x61, 0xdc, 0x7e, 0xa8, 0xb0, 0xd2, 0x5c, 0xda, 0x74, 0x4f, 0x84, 0x8c, 0xeb, 0xa1, 0xb4, 0x67, + 0x2b, 0x3c, 0xf1, 0x1d, 0xae, 0x92, 0xc2, 0x86, 0xe8, 0x58, 0x95, 0x14, 0x29, 0xfa, 0x0e, 0xac, + 0x5d, 0xe2, 0x22, 0x44, 0xa6, 0xd0, 0x9c, 0x26, 0x62, 0xf3, 0xfe, 0xf5, 0x68, 0x6e, 0x82, 0xe3, + 0xf4, 0xdf, 0x47, 0x21, 0x39, 0xa4, 0xa8, 0x2b, 0xa4, 0xf6, 0x25, 0xc8, 0xf0, 0x95, 0x6d, 0x31, + 0x86, 0x3b, 0x61, 0x17, 0x57, 0x87, 0x52, 0xb1, 0xe0, 0x0f, 0x83, 0x6a, 0x17, 0xbb, 0x46, 0xb5, + 0x0b, 0x6a, 0xdd, 0x47, 0xa3, 0x15, 0x13, 0x17, 0x51, 0xf9, 0xca, 0x95, 0x49, 0x79, 0x62, 0xd9, + 0xfc, 0x4e, 0x1b, 0xae, 0x9b, 0xe9, 0xc1, 0xb8, 0x80, 0x35, 0xd7, 0xc3, 0x0e, 0x5f, 0xca, 0xa6, + 0xba, 0x5a, 0xc9, 0x0a, 0x7e, 0x7c, 0x5d, 0x23, 0x0a, 0x75, 0x0f, 0x3b, 0xa7, 0x3e, 0x29, 0x0b, + 0x54, 0x63, 0xd5, 0x0d, 0x37, 0xf5, 0x17, 0x60, 0x75, 0xac, 0x1f, 0x65, 0x21, 0x3a, 0x22, 0x08, + 0xfe, 0xa9, 0xe7, 0x01, 0x42, 0x5c, 0x3c, 0xd5, 0x7c, 0xfd, 0x1c, 0x52, 0xa1, 0xa2, 0x81, 0xbe, + 0x3e, 0x5e, 0x86, 0xb4, 0xc5, 0x8e, 0xfc, 0x93, 0x65, 0x68, 0xac, 0x06, 0xe9, 0x0d, 0x58, 0x9f, + 0x28, 0x1c, 0xe8, 0x15, 0xc8, 0x76, 0xf8, 0xa7, 0x23, 0x9e, 0x32, 0xcc, 0xd0, 0xc1, 0x72, 0x2d, + 0x24, 0x17, 0x87, 0x39, 0xe5, 0x62, 0x64, 0xe4, 0xe2, 0x97, 0x11, 0x80, 0x51, 0x79, 0x78, 0x4c, + 0x8a, 0x4e, 0x21, 0x4e, 0x18, 0xee, 0x49, 0x1a, 0xbb, 0xc2, 0xd1, 0x60, 0xa4, 0xa0, 0x50, 0x65, + 0xb8, 0x67, 0x48, 0x34, 0xfd, 0x4f, 0x1a, 0xc4, 0x78, 0x1b, 0x19, 0x10, 0x13, 0x17, 0x24, 0xed, + 0x6a, 0xb5, 0x47, 0x42, 0x73, 0x24, 0x71, 0x49, 0x12, 0x58, 0x23, 0x4f, 0x22, 0x61, 0x4f, 0x36, + 0x21, 0xd5, 0xc1, 0xb4, 0xed, 0x13, 0x4f, 0x2c, 0xb4, 0x80, 0x3d, 0x46, 0xa2, 0x27, 0xba, 0xb1, + 0xf4, 0x3f, 0x44, 0x20, 0x33, 0x5e, 0x19, 0xd1, 0x83, 0x20, 0x96, 0x72, 0x69, 0xec, 0x5e, 0xaf, + 0xd0, 0xfe, 0x87, 0xc5, 0xf3, 0x7d, 0xc8, 0x8c, 0x1b, 0xc7, 0x57, 0xf4, 0x39, 0x1e, 0x04, 0x9b, + 0xf6, 0x1c, 0x0f, 0x04, 0xb9, 0x0e, 0x1c, 0xd7, 0x19, 0xf4, 0x82, 0xb2, 0x3b, 0x6c, 0xe7, 0x7f, + 0xa4, 0x41, 0x22, 0x38, 0x45, 0xa0, 0x1c, 0xdc, 0xe0, 0xb7, 0xb3, 0xfd, 0xba, 0x71, 0x74, 0xe9, + 0x1e, 0x97, 0x86, 0xc4, 0x7e, 0x79, 0xb7, 0xb2, 0x53, 0xaf, 0x1f, 0x66, 0x35, 0x94, 0x84, 0xf8, + 0x49, 0xad, 0xbc, 0x7b, 0x98, 0x8d, 0xc8, 0x3b, 0x5d, 0xad, 0x72, 0xd7, 0x28, 0x1f, 0x65, 0xa3, + 0x68, 0x05, 0xa2, 0x87, 0xd5, 0xc3, 0x6c, 0x4c, 0x8c, 0x38, 0x7c, 0xd0, 0xa8, 0x64, 0xe3, 0x28, + 0x01, 0xb1, 0x5a, 0xf5, 0xb8, 0x92, 0x5d, 0xe6, 0xc2, 0xfb, 0xd5, 0x9d, 0x8a, 0x91, 0x5d, 0x41, + 0x4f, 0xc1, 0x7a, 0x79, 0xb7, 0x59, 0xad, 0x1f, 0x9f, 0x98, 0xf5, 0x63, 0xf3, 0x6e, 0xbd, 0x7e, + 0xb7, 0x56, 0xc9, 0x26, 0x76, 0x92, 0xb0, 0xa2, 0x5e, 0x49, 0xf4, 0xef, 0x6b, 0x80, 0x26, 0xef, + 0xfb, 0xe8, 0x7f, 0x27, 0x5f, 0x12, 0x42, 0xdb, 0xfb, 0xd2, 0x6b, 0xc0, 0x3c, 0x4f, 0x17, 0x91, + 0x7f, 0xfe, 0x74, 0x91, 0x67, 0x90, 0x0e, 0x3f, 0x12, 0xa2, 0xe7, 0xe0, 0xd6, 0x87, 0x95, 0x9d, + 0x83, 0x7a, 0xfd, 0xd0, 0x3c, 0x69, 0x96, 0x9b, 0x97, 0x2f, 0xbc, 0xb7, 0xe0, 0xa9, 0xf1, 0xee, + 0xca, 0x71, 0x79, 0xa7, 0x56, 0xd9, 0xcb, 0x6a, 0x68, 0x1b, 0x5e, 0x9e, 0xda, 0x65, 0xee, 0xd7, + 0x0d, 0xf3, 0xa4, 0x56, 0x6f, 0x9a, 0xfb, 0xd5, 0x5a, 0xad, 0x7a, 0x7c, 0x37, 0x1b, 0xc9, 0x7f, + 0xa9, 0x01, 0xe2, 0x1c, 0x21, 0x0d, 0xa1, 0x06, 0xfe, 0xb8, 0x8f, 0x29, 0x43, 0x37, 0x61, 0x59, + 0x1a, 0xaa, 0xfc, 0x55, 0x2d, 0x7e, 0xba, 0xb2, 0x2d, 0xa7, 0xdb, 0xe7, 0x07, 0xa0, 0xb6, 0xdb, + 0x09, 0xbc, 0x4a, 0x07, 0xc2, 0x5d, 0xb7, 0x83, 0x51, 0x0d, 0x52, 0xca, 0xf1, 0x0b, 0x82, 0x1f, + 0x89, 0x95, 0x99, 0x29, 0xfd, 0xcf, 0x9c, 0xab, 0xef, 0x3e, 0xc1, 0x8f, 0x0c, 0x20, 0xc3, 0x6f, + 0x7e, 0xe0, 0xf2, 0xb8, 0x3a, 0x4a, 0x3e, 0xc5, 0xea, 0x45, 0x22, 0xc1, 0x05, 0x27, 0xe4, 0x53, + 0x1e, 0x24, 0x10, 0x9d, 0xcc, 0x3d, 0xc7, 0x8e, 0xba, 0x4d, 0x8b, 0xe1, 0x4d, 0x2e, 0xc8, 0x7f, + 0x57, 0x83, 0x8d, 0x31, 0xef, 0xd4, 0x21, 0xa8, 0x0c, 0x2b, 0x52, 0x43, 0xc0, 0x05, 0xff, 0x3d, + 0xa7, 0x75, 0x46, 0x30, 0x0f, 0xbd, 0x0c, 0x6b, 0x0e, 0x3f, 0x47, 0x85, 0xd4, 0xcb, 0x58, 0xac, + 0x72, 0x71, 0x63, 0x68, 0xc2, 0xcf, 0x35, 0xc8, 0xde, 0xc5, 0xca, 0x82, 0x20, 0xbc, 0xd3, 0x1e, + 0x21, 0xfe, 0xfd, 0xa1, 0xcd, 0xff, 0x59, 0x83, 0x8d, 0x5d, 0x1f, 0x5b, 0x0c, 0x8f, 0x9b, 0xf7, + 0xb8, 0xec, 0x7f, 0x00, 0xcb, 0x72, 0xb6, 0x3a, 0x26, 0xcc, 0x1d, 0x35, 0x35, 0x6d, 0xd2, 0xc7, + 0xe8, 0x6c, 0x1f, 0x63, 0xd7, 0xf3, 0xf1, 0x07, 0x11, 0xd8, 0x38, 0xf5, 0x3a, 0x13, 0x3e, 0x8e, + 0x7c, 0xd1, 0x9e, 0x90, 0x2f, 0xd3, 0xf2, 0x75, 0x07, 0x52, 0x7d, 0xa1, 0x5c, 0xfc, 0x56, 0xa1, + 0x1e, 0x32, 0xf4, 0x89, 0xd7, 0x81, 0x7d, 0x82, 0xed, 0xce, 0x91, 0x45, 0xcf, 0x0d, 0x90, 0xc3, + 0xf9, 0xf7, 0x13, 0x0e, 0xc4, 0x2b, 0xb0, 0xb1, 0x87, 0x6d, 0x7c, 0x39, 0x0e, 0x53, 0x96, 0x62, + 0xfe, 0xef, 0x11, 0xb8, 0xb5, 0x63, 0xb1, 0xf6, 0xc3, 0x70, 0xe0, 0x66, 0x72, 0xc3, 0x36, 0x64, + 0x95, 0xb9, 0x2d, 0x3e, 0xd7, 0x1c, 0x1e, 0x77, 0xf8, 0x95, 0x54, 0xf6, 0x48, 0x50, 0x9f, 0xa0, + 0x6f, 0xc2, 0xc6, 0xd8, 0x58, 0xe2, 0xd8, 0xc4, 0xc1, 0x2a, 0x3e, 0xaf, 0xce, 0xe9, 0xa2, 0x40, + 0xe3, 0x97, 0xf7, 0x10, 0x78, 0x55, 0x00, 0x4d, 0x26, 0x27, 0x36, 0x3b, 0x39, 0xf1, 0xeb, 0x24, + 0x67, 0xf9, 0x5a, 0xc9, 0xd9, 0xc9, 0x40, 0x3a, 0x1c, 0x8f, 0xbc, 0x09, 0xfa, 0xb4, 0x04, 0x3c, + 0x31, 0xfa, 0xca, 0x5f, 0xa8, 0x0c, 0x87, 0x97, 0xc4, 0xcc, 0x0c, 0x87, 0xf4, 0x46, 0xae, 0xa8, + 0xb7, 0x01, 0xa9, 0x50, 0xf2, 0x9e, 0x80, 0x27, 0xdb, 0x1f, 0x00, 0x54, 0xc3, 0xd5, 0xe2, 0xe9, + 0xea, 0x71, 0xb3, 0x72, 0xdc, 0x34, 0xef, 0x57, 0x2b, 0x1f, 0x5e, 0xaa, 0x99, 0x37, 0x20, 0x1b, + 0xee, 0xdc, 0x3f, 0xad, 0xd5, 0xb2, 0x5a, 0xe9, 0x8b, 0x24, 0xac, 0xa8, 0x00, 0xa0, 0xdf, 0x68, + 0x90, 0x0a, 0x15, 0x0c, 0xf4, 0xc6, 0x4c, 0x73, 0x26, 0x8b, 0xa7, 0xfe, 0xe6, 0x62, 0x93, 0x64, + 0x52, 0xf3, 0xa5, 0xef, 0xfd, 0xf1, 0x2f, 0x3f, 0x8d, 0xbc, 0x8a, 0xb6, 0x87, 0xbf, 0xa2, 0x7e, + 0x5b, 0x86, 0xfd, 0x3d, 0xcf, 0x77, 0xbf, 0x85, 0xdb, 0x8c, 0x16, 0xb7, 0x8b, 0x56, 0x17, 0x3b, + 0xec, 0xb3, 0x62, 0x50, 0x84, 0x7e, 0xa1, 0x41, 0x72, 0x58, 0x5c, 0xd0, 0xec, 0x9f, 0x9e, 0x2e, + 0x17, 0x22, 0x7d, 0xde, 0x70, 0x4f, 0xb3, 0x8e, 0x53, 0xc5, 0x84, 0x6d, 0x81, 0x69, 0xc5, 0xed, + 0xcf, 0xd0, 0xe7, 0x1a, 0xa4, 0xc3, 0xe5, 0x05, 0xcd, 0x0e, 0xcc, 0x94, 0x6a, 0x34, 0xbf, 0x8d, + 0xb7, 0x85, 0x8d, 0x6f, 0xe6, 0x17, 0x88, 0xe0, 0xed, 0x80, 0xcd, 0x7f, 0xab, 0x41, 0x3a, 0xbc, + 0xd9, 0xe6, 0xb0, 0x75, 0x4a, 0x55, 0x99, 0xdf, 0xd6, 0xb2, 0xb0, 0xf5, 0x4e, 0xe9, 0xf5, 0x91, + 0xad, 0xea, 0x17, 0xfa, 0x59, 0x61, 0x1d, 0x9a, 0xfc, 0x63, 0x0d, 0xd2, 0xe1, 0xed, 0x3b, 0x87, + 0xc9, 0x53, 0x0a, 0x80, 0x7e, 0x73, 0x82, 0xf0, 0x2a, 0x3d, 0x8f, 0x0d, 0x82, 0x8c, 0x6f, 0x2f, + 0x92, 0xf1, 0x2f, 0x34, 0x40, 0x93, 0xbc, 0x85, 0x66, 0x5f, 0xc2, 0x1f, 0x5b, 0x6d, 0xf4, 0xe7, + 0x82, 0xb9, 0xa1, 0x7f, 0x0f, 0x14, 0xea, 0xc1, 0xbf, 0x07, 0x82, 0x38, 0xe6, 0xdf, 0x5a, 0x20, + 0xe7, 0xad, 0x91, 0xb2, 0xdb, 0xda, 0xf6, 0xc8, 0xe8, 0x31, 0x2e, 0x9c, 0xd7, 0xe8, 0x69, 0x04, + 0xfa, 0x2f, 0x33, 0x5a, 0x2a, 0xbb, 0xad, 0x6d, 0xef, 0x7c, 0xae, 0xc1, 0x8b, 0x6d, 0xb7, 0x37, + 0xcb, 0xc6, 0x1d, 0xc5, 0xb6, 0x0d, 0x9e, 0xdb, 0x86, 0xf6, 0x51, 0x55, 0x8d, 0xef, 0xba, 0xbc, + 0x12, 0x16, 0x5c, 0xbf, 0x5b, 0xec, 0x62, 0x47, 0x64, 0xbe, 0x28, 0xbb, 0x2c, 0x8f, 0xd0, 0xc7, + 0xfe, 0xb1, 0xe3, 0xce, 0x48, 0xf4, 0x37, 0x4d, 0xfb, 0x65, 0x24, 0xb2, 0xb7, 0xff, 0xeb, 0xc8, + 0xf3, 0x77, 0x25, 0xe6, 0xae, 0xb0, 0x61, 0x6f, 0x64, 0xc3, 0x7d, 0x39, 0xa9, 0xb5, 0x2c, 0xf0, + 0xdf, 0xf8, 0x47, 0x00, 0x00, 0x00, 0xff, 0xff, 0x39, 0x20, 0xe1, 0xfe, 0xa4, 0x22, 0x00, 0x00, } diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/session.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/session.pb.go index fa627cb9a..28e66c524 100644 --- a/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/session.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/session.pb.go @@ -128,7 +128,10 @@ func (StreamingRecognitionResult_MessageType) EnumDescriptor() ([]byte, []int) { // The request to detect user's intent. type DetectIntentRequest struct { // Required. The name of the session this query is sent to. Format: - // `projects//agent/sessions/`. + // `projects//agent/sessions/`, or + // `projects//agent/runtimes//sessions/`. + // Note: Runtimes are under construction and will be available soon. + // If is not specified, we assume default 'sandbox' runtime. // It's up to the API caller to choose an appropriate session ID. It can be // a random number or some type of user identifier (preferably hashed). // The length of the session ID must not exceed 36 bytes. @@ -624,8 +627,7 @@ func (m *QueryResult) GetDiagnosticInfo() *google_protobuf4.Struct { // Multiple request messages should be sent in order: // // 1. The first message must contain `session`, `query_input` plus optionally -// `query_params` and/or `single_utterance`. The message must not contain -// `input_audio`. +// `query_params` and/or `single_utterance`. The message must not contain `input_audio`. // // 2. If `query_input` was set to a streaming input audio config, // all subsequent messages must contain only `input_audio`. @@ -633,7 +635,10 @@ func (m *QueryResult) GetDiagnosticInfo() *google_protobuf4.Struct { type StreamingDetectIntentRequest struct { // Required. The name of the session the query is sent to. // Format of the session name: - // `projects//agent/sessions/`. + // `projects//agent/sessions/`, or + // `projects//agent/runtimes//sessions/`. + // Note: Runtimes are under construction and will be available soon. + // If is not specified, we assume default 'sandbox' runtime. // It’s up to the API caller to choose an appropriate . It can be // a random number or some type of user identifier (preferably hashed). // The length of the session ID must not exceed 36 characters. @@ -649,10 +654,14 @@ type StreamingDetectIntentRequest struct { // // 3. an event that specifies which intent to trigger. QueryInput *QueryInput `protobuf:"bytes,3,opt,name=query_input,json=queryInput" json:"query_input,omitempty"` - // Optional. If `true`, the recognizer will detect a single spoken utterance - // in input audio. When it detects that the user has paused or stopped - // speaking, it will cease recognition. This setting is ignored when - // `query_input` is a piece of text or an event. + // Optional. If `false` (default), recognition does not cease until the + // client closes the stream. + // If `true`, the recognizer will detect a single spoken utterance in input + // audio. Recognition ceases when it detects the audio's voice has + // stopped or paused. In this case, once a detected intent is received, the + // client should close the stream and start a new request with a new stream as + // needed. + // This setting is ignored when `query_input` is a piece of text or an event. SingleUtterance bool `protobuf:"varint,4,opt,name=single_utterance,json=singleUtterance" json:"single_utterance,omitempty"` // Optional. The input audio content to be recognized. Must be sent if // `query_input` was set to a streaming input audio config. The complete audio @@ -1144,104 +1153,106 @@ var _Sessions_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("google/cloud/dialogflow/v2beta1/session.proto", fileDescriptor4) } var fileDescriptor4 = []byte{ - // 1580 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x57, 0x3f, 0x73, 0x1b, 0xc7, - 0x15, 0xd7, 0x81, 0xff, 0x1f, 0x40, 0x12, 0x5e, 0xc9, 0xd6, 0x89, 0x94, 0x2c, 0x05, 0x1e, 0x4f, - 0x28, 0xc6, 0x01, 0x2c, 0x26, 0x71, 0xc6, 0xd6, 0xc8, 0x11, 0x08, 0x1c, 0x49, 0xcc, 0x80, 0x20, - 0xbc, 0x00, 0x2d, 0xdb, 0xcd, 0xce, 0xf2, 0xb0, 0x38, 0x9e, 0x72, 0xd8, 0x3d, 0xde, 0x2e, 0x6c, - 0xd3, 0x99, 0xa4, 0xc8, 0x57, 0x48, 0x97, 0x32, 0x4d, 0x66, 0xdc, 0xa6, 0x49, 0x93, 0xca, 0x1f, - 0x21, 0x75, 0xba, 0x94, 0x49, 0x95, 0x14, 0x99, 0x49, 0x93, 0xb9, 0xdd, 0x3d, 0x00, 0x02, 0x29, - 0x03, 0x4a, 0x52, 0xb9, 0xc3, 0xbe, 0xf7, 0x7b, 0x6f, 0xdf, 0x7b, 0xf7, 0x7b, 0xef, 0x2d, 0xe0, - 0x87, 0x81, 0x10, 0x41, 0xc4, 0x2a, 0x7e, 0x24, 0x86, 0xbd, 0x4a, 0x2f, 0xa4, 0x91, 0x08, 0xfa, - 0x91, 0xf8, 0xa2, 0xf2, 0xf9, 0xde, 0x19, 0x53, 0xf4, 0x51, 0x45, 0x32, 0x29, 0x43, 0xc1, 0xcb, - 0x71, 0x22, 0x94, 0x40, 0xf7, 0x0d, 0xbc, 0xac, 0xe1, 0xe5, 0x31, 0xbc, 0x6c, 0xe1, 0x5b, 0x77, - 0xad, 0x3f, 0x1a, 0x87, 0x15, 0xca, 0xb9, 0x50, 0x54, 0x85, 0x82, 0x4b, 0x63, 0xbe, 0x35, 0xf3, - 0x36, 0x5f, 0x70, 0xc5, 0xbe, 0x54, 0x16, 0xfe, 0xce, 0x2c, 0x78, 0xc8, 0x15, 0xe3, 0x19, 0xfa, - 0xfd, 0x39, 0x53, 0x21, 0x8c, 0xab, 0x50, 0x5d, 0x12, 0x75, 0x19, 0x33, 0x6b, 0x9a, 0x45, 0xad, - 0x4f, 0x67, 0xc3, 0x7e, 0x45, 0xaa, 0x64, 0xe8, 0x67, 0x8e, 0x6f, 0x5b, 0x6d, 0x12, 0xfb, 0x15, - 0xa9, 0xa8, 0x1a, 0x66, 0xe9, 0xb8, 0x56, 0x91, 0x7a, 0xaa, 0x44, 0x54, 0x45, 0x3c, 0x30, 0x9a, - 0xd2, 0x3f, 0x1c, 0xb8, 0x59, 0x67, 0x8a, 0xf9, 0xaa, 0xa1, 0x43, 0xc4, 0xec, 0x62, 0xc8, 0xa4, - 0x42, 0x2e, 0xac, 0xd8, 0x28, 0x5c, 0xe7, 0x81, 0xb3, 0xb3, 0x86, 0xb3, 0x23, 0xea, 0x40, 0xe1, - 0x62, 0xc8, 0x92, 0x4b, 0x12, 0xd3, 0x84, 0x0e, 0xa4, 0x9b, 0x7b, 0xe0, 0xec, 0xe4, 0xf7, 0xde, - 0x2d, 0xcf, 0x28, 0x78, 0xf9, 0xa3, 0xd4, 0xa8, 0x9d, 0xda, 0x30, 0xc5, 0x12, 0x89, 0xf3, 0x17, - 0x23, 0x81, 0x44, 0x4d, 0x30, 0x47, 0x12, 0xf2, 0x78, 0xa8, 0xdc, 0x05, 0xed, 0xf3, 0x07, 0xf3, - 0xf9, 0x6c, 0xa4, 0x26, 0x18, 0x2e, 0x46, 0xbf, 0xd1, 0x7d, 0xc8, 0x6b, 0x3f, 0x84, 0x0e, 0x7b, - 0xa1, 0x70, 0x97, 0x1e, 0x38, 0x3b, 0x05, 0x0c, 0x5a, 0x54, 0x4d, 0x25, 0xa5, 0x6f, 0x1c, 0xb8, - 0xf5, 0x62, 0xd6, 0x32, 0x16, 0x5c, 0xb2, 0xd4, 0x32, 0xb1, 0xbf, 0x49, 0xd8, 0xb3, 0xa9, 0x43, - 0x26, 0x6a, 0xf4, 0xd0, 0x49, 0x96, 0x7d, 0xc2, 0xe4, 0x30, 0x52, 0x36, 0xfb, 0x77, 0xe6, 0x8b, - 0x14, 0x6b, 0x1b, 0x9b, 0xb9, 0x39, 0xa0, 0xf7, 0x61, 0xe3, 0x0b, 0x76, 0x76, 0x2e, 0xc4, 0xcf, - 0x89, 0xf9, 0x64, 0x36, 0x79, 0x94, 0xb9, 0x4c, 0x62, 0xbf, 0xdc, 0xd1, 0x1a, 0xbc, 0x6e, 0x91, - 0xe6, 0x58, 0xfa, 0x5b, 0x0e, 0x36, 0xa7, 0xaa, 0x8a, 0xb6, 0x61, 0x4d, 0x85, 0x03, 0x46, 0xbe, - 0x12, 0x9c, 0xd9, 0xf0, 0x57, 0x53, 0xc1, 0x67, 0x82, 0x33, 0xf4, 0x1e, 0x14, 0x02, 0x26, 0x48, - 0x24, 0x7c, 0x4d, 0x76, 0x1b, 0xfc, 0xcd, 0xec, 0x26, 0xcd, 0xb3, 0x26, 0x55, 0x4d, 0x1e, 0xe0, - 0x7c, 0xc0, 0x44, 0xd3, 0xe2, 0x50, 0x1d, 0x56, 0x2d, 0xdf, 0xd3, 0xe8, 0x16, 0x76, 0xf2, 0x7b, - 0x3b, 0x33, 0x13, 0xae, 0x19, 0x03, 0x3c, 0xb2, 0x44, 0x6f, 0xc3, 0x46, 0xc2, 0x24, 0x53, 0x64, - 0xe4, 0x6b, 0xf1, 0x81, 0xb3, 0xb3, 0x8a, 0xd7, 0xb5, 0xb4, 0x96, 0xc1, 0x7a, 0x70, 0xeb, 0x1a, - 0xfe, 0x4b, 0x77, 0x49, 0x5f, 0xbc, 0x37, 0xf3, 0xe2, 0x8e, 0x31, 0xf6, 0xb4, 0x6d, 0xf7, 0x32, - 0x66, 0x18, 0xc9, 0x69, 0x91, 0x44, 0x8f, 0x60, 0x25, 0xa6, 0x97, 0x91, 0xa0, 0x3d, 0x77, 0x59, - 0x57, 0xe1, 0x76, 0xe6, 0x38, 0x6b, 0xad, 0x72, 0x47, 0xb7, 0x16, 0xce, 0x70, 0xa5, 0x7f, 0x3a, - 0x00, 0x63, 0xc2, 0xa1, 0x8f, 0xa1, 0xa0, 0xe9, 0x95, 0xa6, 0xd3, 0x0f, 0x03, 0x5d, 0xec, 0xfc, - 0xde, 0xa3, 0x99, 0xf1, 0x35, 0x46, 0x34, 0xac, 0x69, 0xc3, 0xa3, 0x1b, 0x38, 0x4f, 0xc7, 0x47, - 0xf4, 0x14, 0x16, 0xd3, 0x42, 0xd8, 0x8f, 0xb3, 0x3b, 0xd3, 0x5f, 0x97, 0x7d, 0xa9, 0xb4, 0xcf, - 0xa3, 0x1b, 0x58, 0x5b, 0xa2, 0x1a, 0x2c, 0xb1, 0xcf, 0x19, 0x9f, 0xbf, 0x8d, 0xbc, 0x14, 0x9d, - 0xf9, 0x30, 0xb6, 0xfb, 0x2b, 0xb0, 0xa4, 0x1b, 0xa6, 0xf4, 0x87, 0x65, 0xc8, 0x4f, 0xb0, 0x17, - 0xdd, 0x03, 0xd3, 0x6a, 0x44, 0x47, 0x69, 0x28, 0xb6, 0xa6, 0x25, 0x69, 0x24, 0xe8, 0x2d, 0x58, - 0x8f, 0x28, 0x0f, 0x86, 0x34, 0x60, 0xc4, 0x17, 0x3d, 0xe6, 0x6e, 0x6a, 0x44, 0x21, 0x13, 0xd6, - 0x44, 0x8f, 0xa1, 0x7d, 0xb8, 0x27, 0x63, 0xc6, 0xfc, 0x73, 0x92, 0x30, 0x5f, 0x04, 0x3c, 0x4c, - 0x69, 0x66, 0x0a, 0xd9, 0x63, 0xdc, 0x67, 0x3a, 0xf9, 0x1c, 0xde, 0x36, 0x20, 0x3c, 0xc6, 0xd4, - 0x46, 0x10, 0xf4, 0x06, 0x2c, 0x53, 0x5f, 0xd3, 0x78, 0x41, 0xdf, 0x60, 0x4f, 0xe8, 0xa7, 0x00, - 0xf1, 0xa8, 0x1f, 0x34, 0xc5, 0xbe, 0xe5, 0xe3, 0x4e, 0x40, 0xd1, 0x13, 0xd8, 0xa6, 0x51, 0x44, - 0x12, 0x76, 0x31, 0x0c, 0x13, 0xd6, 0xb3, 0xf3, 0x8d, 0xc4, 0x29, 0x3d, 0xb9, 0xd2, 0x53, 0x64, - 0x15, 0xbb, 0x34, 0x8a, 0xb0, 0x45, 0x98, 0xd9, 0xd5, 0x36, 0x7a, 0xf4, 0x10, 0x8a, 0xfd, 0x61, - 0xd4, 0x0f, 0xa3, 0x68, 0xc0, 0xb8, 0x32, 0xd5, 0x59, 0xd6, 0x91, 0x6d, 0x4e, 0xc8, 0x75, 0x8d, - 0xce, 0xe0, 0xd6, 0x24, 0x74, 0xc0, 0xa4, 0xa4, 0x01, 0x93, 0xee, 0x8a, 0xa6, 0x78, 0x65, 0x0e, - 0x0a, 0xe9, 0x6d, 0x72, 0x6c, 0xec, 0xf0, 0xcd, 0x09, 0x67, 0x56, 0xa6, 0xbb, 0x6d, 0x34, 0x57, - 0xc4, 0x30, 0xf1, 0x99, 0xbb, 0xaa, 0x83, 0x19, 0xcd, 0x10, 0x2d, 0x44, 0x4f, 0x61, 0x33, 0x83, - 0x65, 0xfd, 0xb0, 0xf6, 0xed, 0x25, 0xcb, 0xdc, 0xb6, 0x0d, 0x1c, 0x7d, 0x04, 0x9b, 0x62, 0xa8, - 0xd2, 0x69, 0x3b, 0xea, 0x6b, 0x78, 0xc5, 0x19, 0xb1, 0x61, 0x1c, 0x8c, 0x46, 0xc0, 0xcf, 0x60, - 0xd9, 0x2c, 0x4c, 0x37, 0xaf, 0x63, 0xf9, 0xfe, 0x9c, 0x15, 0xc1, 0xd6, 0x0c, 0x7d, 0x08, 0xdb, - 0xe6, 0x17, 0xe9, 0xe9, 0x29, 0x3f, 0xc5, 0xae, 0x82, 0x66, 0xd7, 0x1d, 0x03, 0xa9, 0x67, 0x88, - 0x09, 0x6e, 0x3d, 0x85, 0xcd, 0x5e, 0x48, 0x03, 0x2e, 0xa4, 0x0a, 0x7d, 0x12, 0xf2, 0xbe, 0x70, - 0x37, 0x66, 0x54, 0x65, 0x8c, 0x6f, 0xf0, 0xbe, 0x28, 0xfd, 0x3e, 0x07, 0x77, 0x3b, 0x2a, 0x61, - 0x74, 0x10, 0xf2, 0xe0, 0x3b, 0xb7, 0x60, 0x1f, 0x42, 0x51, 0x86, 0x3c, 0x88, 0x18, 0x19, 0x2a, - 0xc5, 0x12, 0x9a, 0x16, 0xd5, 0x0c, 0xf3, 0x4d, 0x23, 0x3f, 0xcd, 0xc4, 0xd3, 0xbb, 0x78, 0xf9, - 0xca, 0x2e, 0xfe, 0x63, 0x0e, 0xee, 0xbd, 0xa4, 0x52, 0xf3, 0x2e, 0xe5, 0xe7, 0x80, 0x26, 0xe7, - 0xc8, 0x0b, 0xab, 0xf9, 0xf1, 0xec, 0x85, 0x91, 0x5d, 0x3e, 0x31, 0x67, 0xec, 0xa6, 0x7e, 0x2d, - 0x99, 0x16, 0x5d, 0x79, 0x00, 0x2c, 0xfc, 0xff, 0x1f, 0x00, 0x8b, 0xf3, 0x3e, 0x00, 0xfe, 0x94, - 0x83, 0xad, 0x97, 0x47, 0x8f, 0x9e, 0x43, 0xc1, 0x8e, 0x16, 0xbd, 0x42, 0x75, 0xe1, 0x36, 0xf6, - 0x0e, 0xff, 0x87, 0x82, 0x64, 0x23, 0x47, 0xaf, 0xd5, 0xfc, 0x60, 0x7c, 0x40, 0x6f, 0x02, 0xa8, - 0x84, 0x72, 0xe9, 0x27, 0x61, 0x6c, 0x4a, 0xbf, 0x86, 0x27, 0x24, 0xe8, 0x0e, 0xac, 0x86, 0x92, - 0xf4, 0x43, 0x4e, 0x23, 0x5d, 0xb2, 0x55, 0xbc, 0x12, 0xca, 0x83, 0xf4, 0x98, 0x9a, 0x4e, 0xf4, - 0xe6, 0xa2, 0xee, 0xcd, 0x09, 0x49, 0xe9, 0x13, 0xc8, 0x4f, 0x5c, 0x8b, 0xee, 0x82, 0x7b, 0xec, - 0x75, 0x3a, 0xd5, 0x43, 0x8f, 0x74, 0x3f, 0x6d, 0x7b, 0xe4, 0xb4, 0xd5, 0x69, 0x7b, 0xb5, 0xc6, - 0x41, 0xc3, 0xab, 0x17, 0x6f, 0xa0, 0x0d, 0x80, 0x2e, 0xae, 0xb6, 0x3a, 0x35, 0xdc, 0x68, 0x77, - 0x8b, 0x0e, 0xda, 0x86, 0xdb, 0x5e, 0xab, 0x4e, 0x4e, 0x0e, 0x48, 0xa7, 0xd1, 0x3a, 0x6c, 0x7a, - 0xe4, 0xb4, 0xdb, 0xf5, 0x70, 0xb5, 0x55, 0xf3, 0x8a, 0xb9, 0xd2, 0x5f, 0x1c, 0x28, 0x4e, 0xaf, - 0x63, 0x74, 0x0a, 0x1b, 0x66, 0xaf, 0x33, 0xee, 0x8b, 0x5e, 0xc8, 0x03, 0x5b, 0xb7, 0xf2, 0xcc, - 0xba, 0x69, 0x2f, 0x9e, 0xb5, 0xc2, 0xeb, 0x74, 0xf2, 0x88, 0x76, 0xe1, 0x35, 0x49, 0x07, 0x71, - 0xc4, 0x48, 0x42, 0x15, 0x23, 0xe7, 0x2c, 0x51, 0x5f, 0xe9, 0x3a, 0x2d, 0xe1, 0x4d, 0xa3, 0xc0, - 0x54, 0xb1, 0xa3, 0x54, 0x7c, 0x75, 0x87, 0x2e, 0x5c, 0xb3, 0x43, 0xbf, 0x07, 0x85, 0xf8, 0x3c, - 0xa1, 0x92, 0x91, 0xf3, 0x90, 0xeb, 0xc7, 0xd4, 0xc2, 0xce, 0x1a, 0xce, 0x1b, 0xd9, 0x51, 0x2a, - 0x2a, 0xd5, 0x61, 0x6d, 0xf4, 0x3a, 0x40, 0xc8, 0xbe, 0x2b, 0x4c, 0xfb, 0x98, 0x97, 0xc2, 0x95, - 0x8b, 0x72, 0x57, 0x2f, 0x2a, 0xfd, 0x0a, 0x60, 0xfc, 0x40, 0x48, 0xdd, 0x70, 0x3a, 0xc8, 0xde, - 0x96, 0xfa, 0xf7, 0xd4, 0xca, 0xcd, 0xcd, 0xbf, 0x72, 0xe7, 0x49, 0x74, 0xf7, 0xdf, 0x0e, 0xac, - 0xbf, 0x50, 0x5a, 0xf4, 0x26, 0x6c, 0x55, 0x4f, 0xeb, 0x8d, 0x13, 0xe2, 0xb5, 0x6a, 0x27, 0xf5, - 0x46, 0xeb, 0x70, 0x8a, 0x04, 0x77, 0xc1, 0x9d, 0xd2, 0x37, 0x1b, 0x2d, 0xaf, 0x8a, 0xc9, 0xa3, - 0xf7, 0x8a, 0x0e, 0xba, 0x0d, 0x37, 0xa7, 0xb4, 0x07, 0xcd, 0x6a, 0xad, 0x98, 0x43, 0x2e, 0xdc, - 0x9a, 0x52, 0x1c, 0x9f, 0x36, 0xab, 0xcf, 0x8a, 0x0b, 0xe8, 0x0d, 0x40, 0x53, 0x9a, 0xea, 0x31, - 0x2e, 0x2e, 0xa2, 0x3b, 0xf0, 0xfa, 0x55, 0x39, 0x79, 0xb6, 0x5f, 0x5c, 0x4a, 0x89, 0x37, 0xa5, - 0x3a, 0x39, 0x3c, 0x24, 0x27, 0xed, 0xd3, 0x4e, 0x71, 0x19, 0x3d, 0x84, 0xb7, 0xa7, 0x94, 0x9d, - 0xb6, 0xe7, 0x7d, 0x42, 0x9e, 0x35, 0xba, 0x47, 0xe4, 0xc8, 0xab, 0xd6, 0x3d, 0x4c, 0xf6, 0x3f, - 0xed, 0x7a, 0xc5, 0x95, 0xbd, 0xbf, 0xe7, 0x60, 0xd5, 0x3e, 0x69, 0x25, 0xfa, 0xc6, 0x81, 0xc2, - 0xe4, 0x88, 0x44, 0x3f, 0x9e, 0x49, 0xca, 0x6b, 0x76, 0xcf, 0xd6, 0x4f, 0x5e, 0xd1, 0xca, 0x0c, - 0xda, 0xd2, 0xc1, 0xaf, 0xff, 0xfc, 0xd7, 0xdf, 0xe4, 0x9e, 0x96, 0x1e, 0x8f, 0xfe, 0xa8, 0xfe, - 0xc2, 0xee, 0xac, 0x27, 0x71, 0x22, 0x9e, 0x33, 0x5f, 0xc9, 0xca, 0x6e, 0x85, 0x06, 0x8c, 0xab, - 0xec, 0x2f, 0xac, 0xac, 0xec, 0xfe, 0xf2, 0x83, 0xde, 0x84, 0xb3, 0x0f, 0x9c, 0x5d, 0xf4, 0x5b, - 0x07, 0x5e, 0xbf, 0x76, 0xe2, 0xa3, 0x27, 0xf3, 0xcf, 0xa6, 0xeb, 0xf2, 0xfa, 0xf0, 0xbf, 0x35, - 0x37, 0x09, 0xee, 0x38, 0xef, 0x3a, 0xfb, 0x5f, 0x3b, 0xf0, 0x96, 0x2f, 0x06, 0xb3, 0x3c, 0xed, - 0x17, 0xec, 0x47, 0x69, 0xa7, 0xf4, 0x6e, 0x3b, 0x9f, 0x35, 0xac, 0x41, 0x20, 0x52, 0xf2, 0x96, - 0x45, 0x12, 0x54, 0x02, 0xc6, 0x35, 0xf9, 0x2b, 0x46, 0x45, 0xe3, 0x50, 0xbe, 0xf4, 0x3f, 0xff, - 0xe3, 0xb1, 0xe8, 0x5f, 0x8e, 0xf3, 0xbb, 0x5c, 0xae, 0x7e, 0xf0, 0x75, 0xee, 0xfe, 0xa1, 0xf1, - 0x59, 0xd3, 0x41, 0xd4, 0xc7, 0x41, 0x7c, 0x6c, 0x8c, 0xce, 0x96, 0xb5, 0xff, 0x1f, 0xfd, 0x27, - 0x00, 0x00, 0xff, 0xff, 0x78, 0xce, 0xe5, 0x99, 0x1d, 0x11, 0x00, 0x00, + // 1601 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x73, 0x23, 0x47, + 0x15, 0xdf, 0x91, 0xbf, 0x9f, 0x64, 0x5b, 0xe9, 0xdd, 0x64, 0x67, 0xed, 0xdd, 0xec, 0xa2, 0x54, + 0x0a, 0xaf, 0x09, 0x52, 0xd6, 0x40, 0xa8, 0x64, 0x6b, 0xc3, 0xca, 0xd2, 0xd8, 0x56, 0xe1, 0xb5, + 0x95, 0x96, 0x9c, 0x4d, 0xf6, 0xd2, 0xd5, 0x1e, 0xb5, 0xc6, 0xb3, 0x8c, 0xba, 0xc7, 0xd3, 0x3d, + 0x49, 0x1c, 0x0a, 0x0e, 0xdc, 0xb9, 0x00, 0x27, 0x8e, 0x5c, 0xa8, 0xca, 0x95, 0x0b, 0x07, 0x38, + 0xf1, 0x27, 0x70, 0xe6, 0xc6, 0x91, 0x1b, 0x1c, 0xa8, 0xe2, 0x42, 0x4d, 0x77, 0x8f, 0xa4, 0x95, + 0xbd, 0x2b, 0x05, 0x38, 0x71, 0x53, 0xbf, 0xf7, 0x7b, 0xaf, 0xdf, 0xfb, 0xcd, 0xfb, 0x68, 0x1b, + 0xbe, 0x1d, 0x08, 0x11, 0x44, 0xac, 0xe6, 0x47, 0x22, 0xed, 0xd5, 0x7a, 0x21, 0x8d, 0x44, 0xd0, + 0x8f, 0xc4, 0xe7, 0xb5, 0xcf, 0x76, 0x4e, 0x99, 0xa2, 0x0f, 0x6a, 0x92, 0x49, 0x19, 0x0a, 0x5e, + 0x8d, 0x13, 0xa1, 0x04, 0xba, 0x6b, 0xe0, 0x55, 0x0d, 0xaf, 0x8e, 0xe0, 0x55, 0x0b, 0xdf, 0xb8, + 0x6d, 0xfd, 0xd1, 0x38, 0xac, 0x51, 0xce, 0x85, 0xa2, 0x2a, 0x14, 0x5c, 0x1a, 0xf3, 0x8d, 0xa9, + 0xb7, 0xf9, 0x82, 0x2b, 0xf6, 0x85, 0xb2, 0xf0, 0x77, 0xa6, 0xc1, 0x43, 0xae, 0x18, 0xcf, 0xd1, + 0xef, 0xcf, 0x98, 0x0a, 0x61, 0x5c, 0x85, 0xea, 0x82, 0xa8, 0x8b, 0x98, 0x59, 0xd3, 0x3c, 0x6a, + 0x7d, 0x3a, 0x4d, 0xfb, 0x35, 0xa9, 0x92, 0xd4, 0xcf, 0x1d, 0xdf, 0xb4, 0xda, 0x24, 0xf6, 0x6b, + 0x52, 0x51, 0x95, 0xe6, 0xe9, 0xb8, 0x56, 0x91, 0x79, 0xaa, 0x45, 0x54, 0x45, 0x3c, 0x30, 0x9a, + 0xca, 0xdf, 0x1d, 0xb8, 0xde, 0x64, 0x8a, 0xf9, 0xaa, 0xa5, 0x43, 0xc4, 0xec, 0x3c, 0x65, 0x52, + 0x21, 0x17, 0x96, 0x6c, 0x14, 0xae, 0x73, 0xcf, 0xd9, 0x5a, 0xc1, 0xf9, 0x11, 0x75, 0xa0, 0x74, + 0x9e, 0xb2, 0xe4, 0x82, 0xc4, 0x34, 0xa1, 0x03, 0xe9, 0x16, 0xee, 0x39, 0x5b, 0xc5, 0x9d, 0x77, + 0xab, 0x53, 0x08, 0xaf, 0x7e, 0x94, 0x19, 0xb5, 0x33, 0x1b, 0xa6, 0x58, 0x22, 0x71, 0xf1, 0x7c, + 0x28, 0x90, 0xe8, 0x10, 0xcc, 0x91, 0x84, 0x3c, 0x4e, 0x95, 0x3b, 0xa7, 0x7d, 0x7e, 0x6b, 0x36, + 0x9f, 0xad, 0xcc, 0x04, 0xc3, 0xf9, 0xf0, 0x37, 0xba, 0x0b, 0x45, 0xed, 0x87, 0xd0, 0xb4, 0x17, + 0x0a, 0x77, 0xe1, 0x9e, 0xb3, 0x55, 0xc2, 0xa0, 0x45, 0xf5, 0x4c, 0x52, 0xf9, 0x93, 0x03, 0x37, + 0x5e, 0xcc, 0x5a, 0xc6, 0x82, 0x4b, 0x96, 0x59, 0x26, 0xf6, 0x37, 0x09, 0x7b, 0x36, 0x75, 0xc8, + 0x45, 0xad, 0x1e, 0x3a, 0xce, 0xb3, 0x4f, 0x98, 0x4c, 0x23, 0x65, 0xb3, 0x7f, 0x67, 0xb6, 0x48, + 0xb1, 0xb6, 0xb1, 0x99, 0x9b, 0x03, 0x7a, 0x1f, 0xd6, 0x3e, 0x67, 0xa7, 0x67, 0x42, 0xfc, 0x88, + 0x98, 0x4f, 0x66, 0x93, 0x47, 0xb9, 0xcb, 0x24, 0xf6, 0xab, 0x1d, 0xad, 0xc1, 0xab, 0x16, 0x69, + 0x8e, 0x95, 0xbf, 0x15, 0x60, 0x7d, 0x82, 0x55, 0xb4, 0x09, 0x2b, 0x2a, 0x1c, 0x30, 0xf2, 0xa5, + 0xe0, 0xcc, 0x86, 0xbf, 0x9c, 0x09, 0x9e, 0x09, 0xce, 0xd0, 0x7b, 0x50, 0x0a, 0x98, 0x20, 0x91, + 0xf0, 0x75, 0xb1, 0xdb, 0xe0, 0xaf, 0xe7, 0x37, 0xe9, 0x3a, 0x3b, 0xa4, 0xea, 0x90, 0x07, 0xb8, + 0x18, 0x30, 0x71, 0x68, 0x71, 0xa8, 0x09, 0xcb, 0xb6, 0xde, 0xb3, 0xe8, 0xe6, 0xb6, 0x8a, 0x3b, + 0x5b, 0x53, 0x13, 0x6e, 0x18, 0x03, 0x3c, 0xb4, 0x44, 0x6f, 0xc3, 0x5a, 0xc2, 0x24, 0x53, 0x64, + 0xe8, 0x6b, 0xfe, 0x9e, 0xb3, 0xb5, 0x8c, 0x57, 0xb5, 0xb4, 0x91, 0xc3, 0x7a, 0x70, 0xe3, 0x8a, + 0xfa, 0x97, 0xee, 0x82, 0xbe, 0x78, 0x67, 0xea, 0xc5, 0x1d, 0x63, 0xec, 0x69, 0xdb, 0xee, 0x45, + 0xcc, 0x30, 0x92, 0x93, 0x22, 0x89, 0x1e, 0xc0, 0x52, 0x4c, 0x2f, 0x22, 0x41, 0x7b, 0xee, 0xa2, + 0x66, 0xe1, 0x66, 0xee, 0x38, 0x6f, 0xad, 0x6a, 0x47, 0xb7, 0x16, 0xce, 0x71, 0x95, 0x7f, 0x38, + 0x00, 0xa3, 0x82, 0x43, 0x1f, 0x43, 0x49, 0x97, 0x57, 0x96, 0x4e, 0x3f, 0x0c, 0x34, 0xd9, 0xc5, + 0x9d, 0x07, 0x53, 0xe3, 0x6b, 0x0d, 0xcb, 0xb0, 0xa1, 0x0d, 0x0f, 0xae, 0xe1, 0x22, 0x1d, 0x1d, + 0xd1, 0x63, 0x98, 0xcf, 0x88, 0xb0, 0x1f, 0x67, 0x7b, 0xaa, 0xbf, 0x2e, 0xfb, 0x42, 0x69, 0x9f, + 0x07, 0xd7, 0xb0, 0xb6, 0x44, 0x0d, 0x58, 0x60, 0x9f, 0x31, 0x3e, 0x7b, 0x1b, 0x79, 0x19, 0x3a, + 0xf7, 0x61, 0x6c, 0x77, 0x97, 0x60, 0x41, 0x37, 0x4c, 0xe5, 0x77, 0x8b, 0x50, 0x1c, 0xab, 0x5e, + 0x74, 0x07, 0x4c, 0xab, 0x11, 0x1d, 0xa5, 0x29, 0xb1, 0x15, 0x2d, 0xc9, 0x22, 0x41, 0x6f, 0xc1, + 0x6a, 0x44, 0x79, 0x90, 0xd2, 0x80, 0x11, 0x5f, 0xf4, 0x98, 0xbb, 0xae, 0x11, 0xa5, 0x5c, 0xd8, + 0x10, 0x3d, 0x86, 0x76, 0xe1, 0x8e, 0x8c, 0x19, 0xf3, 0xcf, 0x48, 0xc2, 0x7c, 0x11, 0xf0, 0x30, + 0x2b, 0x33, 0x43, 0x64, 0x8f, 0x71, 0x9f, 0xe9, 0xe4, 0x0b, 0x78, 0xd3, 0x80, 0xf0, 0x08, 0xd3, + 0x18, 0x42, 0xd0, 0x1b, 0xb0, 0x48, 0x7d, 0x5d, 0xc6, 0x73, 0xfa, 0x06, 0x7b, 0x42, 0xdf, 0x07, + 0x88, 0x87, 0xfd, 0xa0, 0x4b, 0xec, 0x15, 0x1f, 0x77, 0x0c, 0x8a, 0x1e, 0xc1, 0x26, 0x8d, 0x22, + 0x92, 0xb0, 0xf3, 0x34, 0x4c, 0x58, 0xcf, 0xce, 0x37, 0x12, 0x67, 0xe5, 0xc9, 0x95, 0x9e, 0x22, + 0xcb, 0xd8, 0xa5, 0x51, 0x84, 0x2d, 0xc2, 0xcc, 0xae, 0xb6, 0xd1, 0xa3, 0xfb, 0x50, 0xee, 0xa7, + 0x51, 0x3f, 0x8c, 0xa2, 0x01, 0xe3, 0xca, 0xb0, 0xb3, 0xa8, 0x23, 0x5b, 0x1f, 0x93, 0x6b, 0x8e, + 0x4e, 0xe1, 0xc6, 0x38, 0x74, 0xc0, 0xa4, 0xa4, 0x01, 0x93, 0xee, 0x92, 0x2e, 0xf1, 0xda, 0x0c, + 0x25, 0xa4, 0xb7, 0xc9, 0x13, 0x63, 0x87, 0xaf, 0x8f, 0x39, 0xb3, 0x32, 0xdd, 0x6d, 0xc3, 0xb9, + 0x22, 0xd2, 0xc4, 0x67, 0xee, 0xb2, 0x0e, 0x66, 0x38, 0x43, 0xb4, 0x10, 0x3d, 0x86, 0xf5, 0x1c, + 0x96, 0xf7, 0xc3, 0xca, 0xab, 0x29, 0xcb, 0xdd, 0xb6, 0x0d, 0x1c, 0x7d, 0x04, 0xeb, 0x22, 0x55, + 0xd9, 0xb4, 0x1d, 0xf6, 0x35, 0x7c, 0xcd, 0x19, 0xb1, 0x66, 0x1c, 0x0c, 0x47, 0xc0, 0x0f, 0x60, + 0xd1, 0x2c, 0x4c, 0xb7, 0xa8, 0x63, 0xf9, 0xe6, 0x8c, 0x8c, 0x60, 0x6b, 0x86, 0x3e, 0x84, 0x4d, + 0xf3, 0x8b, 0xf4, 0xf4, 0x94, 0x9f, 0xa8, 0xae, 0x92, 0xae, 0xae, 0x5b, 0x06, 0xd2, 0xcc, 0x11, + 0x63, 0xb5, 0xf5, 0x18, 0xd6, 0x7b, 0x21, 0x0d, 0xb8, 0x90, 0x2a, 0xf4, 0x49, 0xc8, 0xfb, 0xc2, + 0x5d, 0x9b, 0xc2, 0xca, 0x08, 0xdf, 0xe2, 0x7d, 0x51, 0xf9, 0x6d, 0x01, 0x6e, 0x77, 0x54, 0xc2, + 0xe8, 0x20, 0xe4, 0xc1, 0xff, 0xdd, 0x82, 0xbd, 0x0f, 0x65, 0x19, 0xf2, 0x20, 0x62, 0x24, 0x55, + 0x8a, 0x25, 0x34, 0x23, 0xd5, 0x0c, 0xf3, 0x75, 0x23, 0x3f, 0xc9, 0xc5, 0x93, 0xbb, 0x78, 0xf1, + 0xd2, 0x2e, 0xfe, 0x7d, 0x01, 0xee, 0xbc, 0x84, 0xa9, 0x59, 0x97, 0xf2, 0x73, 0x40, 0xe3, 0x73, + 0xe4, 0x85, 0xd5, 0xfc, 0x70, 0xfa, 0xc2, 0xc8, 0x2f, 0x1f, 0x9b, 0x33, 0x76, 0x53, 0xbf, 0x96, + 0x4c, 0x8a, 0x2e, 0x3d, 0x00, 0xe6, 0xfe, 0xf7, 0x0f, 0x80, 0xf9, 0x59, 0x1f, 0x00, 0x7f, 0x2c, + 0xc0, 0xc6, 0xcb, 0xa3, 0x47, 0xcf, 0xa1, 0x64, 0x47, 0x8b, 0x5e, 0xa1, 0x9a, 0xb8, 0xb5, 0x9d, + 0xfd, 0xff, 0x82, 0x90, 0x7c, 0xe4, 0xe8, 0xb5, 0x5a, 0x1c, 0x8c, 0x0e, 0xe8, 0x4d, 0x00, 0x95, + 0x50, 0x2e, 0xfd, 0x24, 0x8c, 0x0d, 0xf5, 0x2b, 0x78, 0x4c, 0x82, 0x6e, 0xc1, 0x72, 0x28, 0x49, + 0x3f, 0xe4, 0x34, 0xd2, 0x94, 0x2d, 0xe3, 0xa5, 0x50, 0xee, 0x65, 0xc7, 0xcc, 0x74, 0xac, 0x37, + 0xe7, 0x75, 0x6f, 0x8e, 0x49, 0x2a, 0x9f, 0x40, 0x71, 0xec, 0x5a, 0x74, 0x1b, 0xdc, 0x27, 0x5e, + 0xa7, 0x53, 0xdf, 0xf7, 0x48, 0xf7, 0xd3, 0xb6, 0x47, 0x4e, 0x8e, 0x3a, 0x6d, 0xaf, 0xd1, 0xda, + 0x6b, 0x79, 0xcd, 0xf2, 0x35, 0xb4, 0x06, 0xd0, 0xc5, 0xf5, 0xa3, 0x4e, 0x03, 0xb7, 0xda, 0xdd, + 0xb2, 0x83, 0x36, 0xe1, 0xa6, 0x77, 0xd4, 0x24, 0xc7, 0x7b, 0xa4, 0xd3, 0x3a, 0xda, 0x3f, 0xf4, + 0xc8, 0x49, 0xb7, 0xeb, 0xe1, 0xfa, 0x51, 0xc3, 0x2b, 0x17, 0x2a, 0x7f, 0x71, 0xa0, 0x3c, 0xb9, + 0x8e, 0xd1, 0x09, 0xac, 0x99, 0xbd, 0xce, 0xb8, 0x2f, 0x7a, 0x21, 0x0f, 0x2c, 0x6f, 0xd5, 0xa9, + 0xbc, 0x69, 0x2f, 0x9e, 0xb5, 0xc2, 0xab, 0x74, 0xfc, 0x88, 0xb6, 0xe1, 0x35, 0x49, 0x07, 0x71, + 0xc4, 0x48, 0x42, 0x15, 0x23, 0x67, 0x2c, 0x51, 0x5f, 0x6a, 0x9e, 0x16, 0xf0, 0xba, 0x51, 0x60, + 0xaa, 0xd8, 0x41, 0x26, 0xbe, 0xbc, 0x43, 0xe7, 0xae, 0xd8, 0xa1, 0xdf, 0x80, 0x52, 0x7c, 0x96, + 0x50, 0xc9, 0xc8, 0x59, 0xc8, 0xf5, 0x63, 0x6a, 0x6e, 0x6b, 0x05, 0x17, 0x8d, 0xec, 0x20, 0x13, + 0x55, 0x9a, 0xb0, 0x32, 0x7c, 0x1d, 0x20, 0x64, 0xdf, 0x15, 0xa6, 0x7d, 0xcc, 0x4b, 0xe1, 0xd2, + 0x45, 0x85, 0xcb, 0x17, 0x55, 0x7e, 0x0a, 0x30, 0x7a, 0x20, 0x64, 0x6e, 0x38, 0x1d, 0xe4, 0x6f, + 0x4b, 0xfd, 0x7b, 0x62, 0xe5, 0x16, 0x66, 0x5f, 0xb9, 0xb3, 0x24, 0xba, 0xfd, 0x2f, 0x07, 0x56, + 0x5f, 0xa0, 0x16, 0xbd, 0x09, 0x1b, 0xf5, 0x93, 0x66, 0xeb, 0x98, 0x78, 0x47, 0x8d, 0xe3, 0x66, + 0xeb, 0x68, 0x7f, 0xa2, 0x08, 0x6e, 0x83, 0x3b, 0xa1, 0x3f, 0x6c, 0x1d, 0x79, 0x75, 0x4c, 0x1e, + 0xbc, 0x57, 0x76, 0xd0, 0x4d, 0xb8, 0x3e, 0xa1, 0xdd, 0x3b, 0xac, 0x37, 0xca, 0x05, 0xe4, 0xc2, + 0x8d, 0x09, 0xc5, 0x93, 0x93, 0xc3, 0xfa, 0xd3, 0xf2, 0x1c, 0x7a, 0x03, 0xd0, 0x84, 0xa6, 0xfe, + 0x04, 0x97, 0xe7, 0xd1, 0x2d, 0x78, 0xfd, 0xb2, 0x9c, 0x3c, 0xdd, 0x2d, 0x2f, 0x64, 0x85, 0x37, + 0xa1, 0x3a, 0xde, 0xdf, 0x27, 0xc7, 0xed, 0x93, 0x4e, 0x79, 0x11, 0xdd, 0x87, 0xb7, 0x27, 0x94, + 0x9d, 0xb6, 0xe7, 0x7d, 0x42, 0x9e, 0xb6, 0xba, 0x07, 0xe4, 0xc0, 0xab, 0x37, 0x3d, 0x4c, 0x76, + 0x3f, 0xed, 0x7a, 0xe5, 0xa5, 0x9d, 0x3f, 0xcc, 0xc1, 0xb2, 0x7d, 0xd2, 0x4a, 0xf4, 0x8b, 0x02, + 0x94, 0xc6, 0x47, 0x24, 0xfa, 0xee, 0xd4, 0xa2, 0xbc, 0x62, 0xf7, 0x6c, 0x7c, 0xef, 0x6b, 0x5a, + 0x99, 0x41, 0x5b, 0xf9, 0x95, 0xf3, 0xb3, 0x3f, 0xff, 0xf5, 0x97, 0x85, 0x9f, 0x3b, 0x95, 0x87, + 0xc3, 0x3f, 0x55, 0x7f, 0x6c, 0xb7, 0xd6, 0xa3, 0x38, 0x11, 0xcf, 0x99, 0xaf, 0x64, 0x6d, 0xbb, + 0x46, 0x03, 0xc6, 0x55, 0xfe, 0x47, 0xac, 0xac, 0x6d, 0xff, 0xe4, 0x83, 0xde, 0x98, 0xbb, 0x0f, + 0x9c, 0xed, 0x67, 0x3f, 0xac, 0xec, 0xcd, 0xe0, 0x21, 0x49, 0x79, 0xf6, 0xa7, 0x4b, 0x26, 0x78, + 0x85, 0x33, 0xf4, 0x6b, 0x07, 0x5e, 0xbf, 0x72, 0x81, 0xa0, 0x47, 0xb3, 0x8f, 0xba, 0xab, 0x68, + 0xfa, 0xf0, 0x3f, 0x35, 0x37, 0x7c, 0x6d, 0x39, 0xef, 0x3a, 0xbb, 0x5f, 0x39, 0xf0, 0x96, 0x2f, + 0x06, 0xd3, 0x3c, 0xed, 0x96, 0xec, 0x37, 0x6e, 0x67, 0xdd, 0xd2, 0x76, 0x9e, 0xb5, 0xac, 0x41, + 0x20, 0xb2, 0x5e, 0xa8, 0x8a, 0x24, 0xa8, 0x05, 0x8c, 0xeb, 0x5e, 0xaa, 0x19, 0x15, 0x8d, 0x43, + 0xf9, 0xd2, 0x7f, 0x21, 0x3c, 0x1c, 0x89, 0xfe, 0xe9, 0x38, 0xbf, 0x29, 0x14, 0x9a, 0x7b, 0x5f, + 0x15, 0xee, 0xee, 0x1b, 0x9f, 0x0d, 0x1d, 0x44, 0x73, 0x14, 0xc4, 0xc7, 0xc6, 0xe8, 0x74, 0x51, + 0xfb, 0xff, 0xce, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x93, 0x54, 0x90, 0x52, 0x6c, 0x11, 0x00, + 0x00, } diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/session_entity_type.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/session_entity_type.pb.go index da14f2643..19eb590aa 100644 --- a/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/session_entity_type.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1/session_entity_type.pb.go @@ -66,7 +66,11 @@ func (SessionEntityType_EntityOverrideMode) EnumDescriptor() ([]byte, []int) { type SessionEntityType struct { // Required. The unique identifier of this session entity type. Format: // `projects//agent/sessions//entityTypes/`. + // Display Name>`, or + // `projects//agent/runtimes/sessions//entityTypes/`. + // Note: Runtimes are under construction and will be available soon. + // If is not specified, we assume default 'sandbox' runtime. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // Required. Indicates whether the additional data should override or // supplement the developer entity type definition. @@ -102,11 +106,13 @@ func (m *SessionEntityType) GetEntities() []*EntityType_Entity { return nil } -// The request message for -// [SessionEntityTypes.ListSessionEntityTypes][google.cloud.dialogflow.v2beta1.SessionEntityTypes.ListSessionEntityTypes]. +// The request message for [SessionEntityTypes.ListSessionEntityTypes][google.cloud.dialogflow.v2beta1.SessionEntityTypes.ListSessionEntityTypes]. type ListSessionEntityTypesRequest struct { // Required. The session to list all session entity types from. - // Format: `projects//agent/sessions/`. + // Format: `projects//agent/sessions/` or + // `projects//agent/runtimes//sessions/`. + // Note: Runtimes are under construction and will be available soon. + // If is not specified, we assume default 'sandbox' runtime. Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` // Optional. The maximum number of items to return in a single page. By // default 100 and at most 1000. @@ -141,8 +147,7 @@ func (m *ListSessionEntityTypesRequest) GetPageToken() string { return "" } -// The response message for -// [SessionEntityTypes.ListSessionEntityTypes][google.cloud.dialogflow.v2beta1.SessionEntityTypes.ListSessionEntityTypes]. +// The response message for [SessionEntityTypes.ListSessionEntityTypes][google.cloud.dialogflow.v2beta1.SessionEntityTypes.ListSessionEntityTypes]. type ListSessionEntityTypesResponse struct { // The list of session entity types. There will be a maximum number of items // returned based on the page_size field in the request. @@ -171,12 +176,14 @@ func (m *ListSessionEntityTypesResponse) GetNextPageToken() string { return "" } -// The request message for -// [SessionEntityTypes.GetSessionEntityType][google.cloud.dialogflow.v2beta1.SessionEntityTypes.GetSessionEntityType]. +// The request message for [SessionEntityTypes.GetSessionEntityType][google.cloud.dialogflow.v2beta1.SessionEntityTypes.GetSessionEntityType]. type GetSessionEntityTypeRequest struct { // Required. The name of the session entity type. Format: // `projects//agent/sessions//entityTypes/`. + // Display Name>` or `projects//agent/runtimes//sessions//entityTypes/`. Note: + // Runtimes are under construction and will be available soon. If + // is not specified, we assume default 'sandbox' runtime. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` } @@ -192,11 +199,13 @@ func (m *GetSessionEntityTypeRequest) GetName() string { return "" } -// The request message for -// [SessionEntityTypes.CreateSessionEntityType][google.cloud.dialogflow.v2beta1.SessionEntityTypes.CreateSessionEntityType]. +// The request message for [SessionEntityTypes.CreateSessionEntityType][google.cloud.dialogflow.v2beta1.SessionEntityTypes.CreateSessionEntityType]. type CreateSessionEntityTypeRequest struct { // Required. The session to create a session entity type for. - // Format: `projects//agent/sessions/`. + // Format: `projects//agent/sessions/` or + // `projects//agent/runtimes//sessions/`. + // Note: Runtimes are under construction and will be available soon. + // If is not specified, we assume default 'sandbox' runtime. Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` // Required. The session entity type to create. SessionEntityType *SessionEntityType `protobuf:"bytes,2,opt,name=session_entity_type,json=sessionEntityType" json:"session_entity_type,omitempty"` @@ -221,12 +230,14 @@ func (m *CreateSessionEntityTypeRequest) GetSessionEntityType() *SessionEntityTy return nil } -// The request message for -// [SessionEntityTypes.UpdateSessionEntityType][google.cloud.dialogflow.v2beta1.SessionEntityTypes.UpdateSessionEntityType]. +// The request message for [SessionEntityTypes.UpdateSessionEntityType][google.cloud.dialogflow.v2beta1.SessionEntityTypes.UpdateSessionEntityType]. type UpdateSessionEntityTypeRequest struct { // Required. The entity type to update. Format: // `projects//agent/sessions//entityTypes/`. + // Display Name>` or `projects//agent/runtimes//sessions//entityTypes/`. Note: + // Runtimes are under construction and will be available soon. If + // is not specified, we assume default 'sandbox' runtime. SessionEntityType *SessionEntityType `protobuf:"bytes,1,opt,name=session_entity_type,json=sessionEntityType" json:"session_entity_type,omitempty"` // Optional. The mask to control which fields get updated. UpdateMask *google_protobuf3.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` @@ -251,12 +262,14 @@ func (m *UpdateSessionEntityTypeRequest) GetUpdateMask() *google_protobuf3.Field return nil } -// The request message for -// [SessionEntityTypes.DeleteSessionEntityType][google.cloud.dialogflow.v2beta1.SessionEntityTypes.DeleteSessionEntityType]. +// The request message for [SessionEntityTypes.DeleteSessionEntityType][google.cloud.dialogflow.v2beta1.SessionEntityTypes.DeleteSessionEntityType]. type DeleteSessionEntityTypeRequest struct { // Required. The name of the entity type to delete. Format: // `projects//agent/sessions//entityTypes/`. + // Display Name>` or `projects//agent/runtimes//sessions//entityTypes/`. Note: + // Runtimes are under construction and will be available soon. If + // is not specified, we assume default 'sandbox' runtime. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` } @@ -502,56 +515,59 @@ func init() { } var fileDescriptor5 = []byte{ - // 804 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4f, 0x4f, 0xdb, 0x48, - 0x14, 0xdf, 0x31, 0xbb, 0x08, 0x06, 0xed, 0x2e, 0xcc, 0xa2, 0x10, 0x85, 0x25, 0x64, 0xcd, 0x6a, - 0x85, 0x72, 0xb0, 0x45, 0x76, 0x2f, 0x2c, 0xfd, 0xa3, 0x42, 0x0c, 0x8a, 0x44, 0x42, 0xe4, 0x04, - 0xa4, 0xf6, 0x62, 0x39, 0xf8, 0x61, 0x19, 0x12, 0x8f, 0x9b, 0x99, 0x40, 0x43, 0xc5, 0x85, 0xaf, - 0xd0, 0x6b, 0x4f, 0x3d, 0xf6, 0xd0, 0x53, 0xfb, 0x01, 0x7a, 0xee, 0xb1, 0xd7, 0x4a, 0xbd, 0x54, - 0xfd, 0x06, 0x95, 0xaa, 0x9e, 0x2a, 0x8f, 0x1d, 0x82, 0xea, 0x3f, 0xa1, 0x51, 0x6f, 0x9e, 0x37, - 0xf3, 0x7b, 0xef, 0xf7, 0x7b, 0xf3, 0xde, 0xf3, 0xe0, 0x75, 0x9b, 0x52, 0xbb, 0x0d, 0xea, 0x61, - 0x9b, 0xf6, 0x2c, 0xd5, 0x72, 0xcc, 0x36, 0xb5, 0x8f, 0xda, 0xf4, 0x4c, 0x3d, 0x2d, 0xb5, 0x80, - 0x9b, 0x6b, 0x2a, 0x03, 0xc6, 0x1c, 0xea, 0x1a, 0xe0, 0x72, 0x87, 0xf7, 0x0d, 0xde, 0xf7, 0x40, - 0xf1, 0xba, 0x94, 0x53, 0xb2, 0x1c, 0x40, 0x15, 0x01, 0x55, 0x86, 0x50, 0x25, 0x84, 0xe6, 0xfe, - 0x0c, 0x7d, 0x9b, 0x9e, 0xa3, 0x9a, 0xae, 0x4b, 0xb9, 0xc9, 0x1d, 0xea, 0xb2, 0x00, 0x9e, 0x5b, - 0x1b, 0x15, 0x39, 0x12, 0x31, 0xb7, 0x18, 0x42, 0xc4, 0xaa, 0xd5, 0x3b, 0x52, 0xa1, 0xe3, 0xf1, - 0x7e, 0xb8, 0x59, 0xf8, 0x76, 0xf3, 0xc8, 0x81, 0xb6, 0x65, 0x74, 0x4c, 0x76, 0x12, 0x9c, 0x90, - 0x3f, 0x49, 0x78, 0xae, 0x11, 0xc8, 0xd1, 0x84, 0xef, 0x66, 0xdf, 0x03, 0x42, 0xf0, 0xcf, 0xae, - 0xd9, 0x81, 0x2c, 0x2a, 0xa0, 0xd5, 0x69, 0x5d, 0x7c, 0x93, 0x33, 0x3c, 0x1f, 0x46, 0xa7, 0xa7, - 0xd0, 0xed, 0x3a, 0x16, 0x18, 0x1d, 0x6a, 0x41, 0x56, 0x2a, 0xa0, 0xd5, 0xdf, 0x4a, 0x9a, 0x32, - 0x42, 0xb9, 0x12, 0x89, 0xa2, 0x04, 0x9f, 0x7b, 0xa1, 0xb7, 0x2a, 0xb5, 0x40, 0x27, 0x10, 0xb1, - 0x91, 0x1a, 0x9e, 0x12, 0x56, 0x07, 0x58, 0x76, 0xa2, 0x30, 0xb1, 0x3a, 0x53, 0x2a, 0x8d, 0x0c, - 0x16, 0x89, 0xa2, 0x5f, 0xf9, 0x90, 0x2f, 0x11, 0x26, 0xd1, 0xd0, 0xe4, 0x6f, 0x5c, 0xd0, 0x6a, - 0xcd, 0x4a, 0xf3, 0xbe, 0xb1, 0x77, 0xa0, 0xe9, 0x7a, 0xa5, 0xac, 0x19, 0xd5, 0xbd, 0xb2, 0x66, - 0xec, 0xd7, 0x1a, 0x75, 0x6d, 0xab, 0xb2, 0x5d, 0xd1, 0xca, 0xb3, 0x3f, 0x91, 0xbf, 0xf0, 0x52, - 0xec, 0xa9, 0xc1, 0x6a, 0x16, 0x91, 0x15, 0xbc, 0x1c, 0x7b, 0xa4, 0xb1, 0x5f, 0xaf, 0xef, 0x6a, - 0x55, 0xad, 0xd6, 0x9c, 0x95, 0x64, 0x86, 0x97, 0x76, 0x1d, 0xc6, 0x23, 0x49, 0x61, 0x3a, 0x3c, - 0xec, 0x01, 0xe3, 0x24, 0x83, 0x27, 0x3d, 0xb3, 0x0b, 0x2e, 0x0f, 0x2f, 0x21, 0x5c, 0x91, 0x45, - 0x3c, 0xed, 0x99, 0x36, 0x18, 0xcc, 0x39, 0x0f, 0x72, 0xff, 0x8b, 0x3e, 0xe5, 0x1b, 0x1a, 0xce, - 0x39, 0x90, 0x25, 0x8c, 0xc5, 0x26, 0xa7, 0x27, 0xe0, 0x66, 0x27, 0x04, 0x50, 0x1c, 0x6f, 0xfa, - 0x06, 0xf9, 0x05, 0xc2, 0xf9, 0xa4, 0xa8, 0xcc, 0xa3, 0x2e, 0x03, 0x62, 0xe1, 0xf9, 0x98, 0xea, - 0x66, 0x59, 0x74, 0xc3, 0xc4, 0x47, 0x5c, 0xeb, 0x84, 0x45, 0xa2, 0x91, 0x7f, 0xf0, 0xef, 0x2e, - 0x3c, 0xe2, 0xc6, 0x35, 0xb2, 0x92, 0x20, 0xfb, 0xab, 0x6f, 0xae, 0x5f, 0x11, 0x5e, 0xc3, 0x8b, - 0x3b, 0x10, 0xa5, 0x3b, 0xc8, 0x51, 0x4c, 0x99, 0xca, 0x4f, 0x11, 0xce, 0x6f, 0x75, 0xc1, 0xe4, - 0x90, 0x08, 0x4b, 0x4a, 0x6d, 0x0b, 0xff, 0x11, 0xa3, 0x5d, 0x30, 0x1b, 0x4f, 0xfa, 0x5c, 0x44, - 0xba, 0xfc, 0x1a, 0xe1, 0xfc, 0xbe, 0x67, 0xa5, 0xd1, 0x4b, 0xa0, 0x81, 0x7e, 0x20, 0x0d, 0xb2, - 0x81, 0x67, 0x7a, 0x82, 0x85, 0x98, 0x05, 0xa1, 0xc4, 0xdc, 0xc0, 0xf7, 0x60, 0x5c, 0x28, 0xdb, - 0xfe, 0xb8, 0xa8, 0x9a, 0xec, 0x44, 0xc7, 0xc1, 0x71, 0xff, 0x5b, 0xfe, 0x0f, 0xe7, 0xcb, 0xd0, - 0x86, 0x14, 0x09, 0x31, 0x17, 0x53, 0x7a, 0x3f, 0x85, 0x49, 0xb4, 0xf0, 0xc8, 0x3b, 0x84, 0x33, - 0xf1, 0x35, 0x49, 0xee, 0x8c, 0xd4, 0x9a, 0xda, 0x42, 0xb9, 0xbb, 0x63, 0xe3, 0x83, 0x66, 0x90, - 0xef, 0x5d, 0xbe, 0xfd, 0xf0, 0x44, 0xda, 0x20, 0xeb, 0x57, 0xf3, 0xf7, 0x71, 0x50, 0x2a, 0xb7, - 0xbd, 0x2e, 0x3d, 0x86, 0x43, 0xce, 0xd4, 0xa2, 0x6a, 0xda, 0xe0, 0xf2, 0xc1, 0x2f, 0x81, 0xa9, - 0xc5, 0x8b, 0x70, 0x48, 0x07, 0x1a, 0xde, 0x20, 0x3c, 0x1f, 0x57, 0xc2, 0xe4, 0xd6, 0x48, 0x72, - 0x29, 0x95, 0x9f, 0x1b, 0xa3, 0x0c, 0xe2, 0xd4, 0xf8, 0x17, 0x93, 0xa6, 0xe5, 0xba, 0x14, 0xb5, - 0x78, 0x41, 0x3e, 0x22, 0xbc, 0x90, 0xd0, 0x5c, 0x64, 0x74, 0xb6, 0xd3, 0xdb, 0x72, 0x2c, 0x4d, - 0x07, 0x42, 0x53, 0x5d, 0x1e, 0xff, 0x86, 0xfe, 0x8f, 0x6b, 0x36, 0xf2, 0x05, 0xe1, 0x85, 0x84, - 0x36, 0xbd, 0x81, 0xd0, 0xf4, 0x06, 0x1f, 0x4b, 0xe8, 0xb1, 0x10, 0x6a, 0x95, 0xaa, 0x43, 0xa1, - 0x71, 0xaf, 0x90, 0xef, 0xbc, 0xd0, 0x78, 0xf1, 0xaf, 0x10, 0x5e, 0x48, 0x68, 0xf0, 0x1b, 0x88, - 0x4f, 0x1f, 0x0d, 0xb9, 0x4c, 0x64, 0xc8, 0x68, 0xfe, 0x83, 0x65, 0x50, 0x9d, 0xc5, 0xf1, 0xab, - 0x73, 0xf3, 0x25, 0xc2, 0x2b, 0x87, 0xb4, 0x33, 0x8a, 0xe1, 0x66, 0x26, 0x42, 0xae, 0xee, 0x73, - 0xa9, 0xa3, 0x07, 0x95, 0x10, 0x6a, 0xd3, 0xb6, 0xe9, 0xda, 0x0a, 0xed, 0xda, 0xaa, 0x0d, 0xae, - 0x60, 0xaa, 0x06, 0x5b, 0xa6, 0xe7, 0xb0, 0xc4, 0xe7, 0xd9, 0xc6, 0xd0, 0xf4, 0x19, 0xa1, 0x67, - 0x92, 0x54, 0xde, 0x7e, 0x2e, 0x2d, 0xef, 0x04, 0x3e, 0xb7, 0x04, 0x9d, 0xf2, 0x90, 0xce, 0x41, - 0x00, 0x6a, 0x4d, 0x0a, 0xff, 0xff, 0x7e, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xaf, 0x87, 0xb0, 0xb6, - 0x77, 0x0a, 0x00, 0x00, + // 863 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0x66, 0x5c, 0x58, 0xed, 0xce, 0xf2, 0xa3, 0x3b, 0x54, 0x69, 0x94, 0xd2, 0x34, 0x78, 0x11, + 0xaa, 0x72, 0xb0, 0xd5, 0xc0, 0x65, 0x59, 0x7e, 0x48, 0xdb, 0x38, 0xab, 0x48, 0x9b, 0x34, 0x72, + 0xd2, 0x15, 0x94, 0x83, 0xe5, 0xd4, 0xaf, 0x96, 0x69, 0xe2, 0x31, 0x9e, 0xc9, 0x2e, 0x29, 0xea, + 0xa5, 0xdc, 0xb8, 0x70, 0x40, 0xdc, 0x38, 0x71, 0xe0, 0x80, 0x04, 0xa7, 0xfe, 0x03, 0x1c, 0xf8, + 0x0b, 0x10, 0x37, 0x8e, 0x5c, 0x39, 0x22, 0x71, 0x03, 0x79, 0xec, 0x34, 0xa5, 0x1e, 0xdb, 0x21, + 0xdb, 0x9b, 0xe7, 0xcd, 0x7c, 0xef, 0xbd, 0xef, 0x9b, 0xf7, 0x9e, 0x07, 0xdf, 0x73, 0x29, 0x75, + 0x47, 0xa0, 0x1f, 0x8e, 0xe8, 0xc4, 0xd1, 0x1d, 0xcf, 0x1e, 0x51, 0xf7, 0x68, 0x44, 0x9f, 0xea, + 0x4f, 0x1a, 0x43, 0xe0, 0xf6, 0x8e, 0xce, 0x80, 0x31, 0x8f, 0xfa, 0x16, 0xf8, 0xdc, 0xe3, 0x53, + 0x8b, 0x4f, 0x03, 0xd0, 0x82, 0x90, 0x72, 0x4a, 0xb6, 0x62, 0xa8, 0x26, 0xa0, 0xda, 0x1c, 0xaa, + 0x25, 0xd0, 0xca, 0x6b, 0x89, 0x6f, 0x3b, 0xf0, 0x74, 0xdb, 0xf7, 0x29, 0xb7, 0xb9, 0x47, 0x7d, + 0x16, 0xc3, 0x2b, 0x3b, 0x45, 0x91, 0x53, 0x11, 0x2b, 0x1b, 0x09, 0x44, 0xac, 0x86, 0x93, 0x23, + 0x1d, 0xc6, 0x01, 0x9f, 0x26, 0x9b, 0xb5, 0xab, 0x9b, 0x47, 0x1e, 0x8c, 0x1c, 0x6b, 0x6c, 0xb3, + 0xe3, 0xf8, 0x84, 0xfa, 0x97, 0x82, 0xef, 0xf4, 0x63, 0x3a, 0x86, 0xf0, 0x3d, 0x98, 0x06, 0x40, + 0x08, 0x7e, 0xde, 0xb7, 0xc7, 0x50, 0x46, 0x35, 0xb4, 0x7d, 0xcb, 0x14, 0xdf, 0xe4, 0x29, 0x5e, + 0x4b, 0xa2, 0xd3, 0x27, 0x10, 0x86, 0x9e, 0x03, 0xd6, 0x98, 0x3a, 0x50, 0x56, 0x6a, 0x68, 0xfb, + 0xe5, 0x86, 0xa1, 0x15, 0x30, 0xd7, 0x52, 0x51, 0xb4, 0xf8, 0x73, 0x2f, 0xf1, 0xd6, 0xa1, 0x0e, + 0x98, 0x04, 0x52, 0x36, 0xd2, 0xc5, 0x37, 0x85, 0xd5, 0x03, 0x56, 0x5e, 0xa9, 0xad, 0x6c, 0xdf, + 0x6e, 0x34, 0x0a, 0x83, 0xa5, 0xa2, 0x98, 0x17, 0x3e, 0xd4, 0x33, 0x84, 0x49, 0x3a, 0x34, 0x79, + 0x03, 0xd7, 0x8c, 0xee, 0xa0, 0x3d, 0xf8, 0xc8, 0xda, 0x7b, 0x6c, 0x98, 0x66, 0xbb, 0x69, 0x58, + 0x9d, 0xbd, 0xa6, 0x61, 0xed, 0x77, 0xfb, 0x3d, 0x63, 0xb7, 0xdd, 0x6a, 0x1b, 0xcd, 0xd5, 0xe7, + 0xc8, 0xeb, 0x78, 0x53, 0x7a, 0x6a, 0xb6, 0x5a, 0x45, 0xe4, 0x2e, 0xde, 0x92, 0x1e, 0xe9, 0xef, + 0xf7, 0x7a, 0x8f, 0x8c, 0x8e, 0xd1, 0x1d, 0xac, 0x2a, 0x2a, 0xc3, 0x9b, 0x8f, 0x3c, 0xc6, 0x53, + 0xa2, 0x30, 0x13, 0x3e, 0x9d, 0x00, 0xe3, 0xa4, 0x84, 0x6f, 0x04, 0x76, 0x08, 0x3e, 0x4f, 0x2e, + 0x21, 0x59, 0x91, 0x0d, 0x7c, 0x2b, 0xb0, 0x5d, 0xb0, 0x98, 0x77, 0x12, 0x6b, 0xff, 0x82, 0x79, + 0x33, 0x32, 0xf4, 0xbd, 0x13, 0x20, 0x9b, 0x18, 0x8b, 0x4d, 0x4e, 0x8f, 0xc1, 0x2f, 0xaf, 0x08, + 0xa0, 0x38, 0x3e, 0x88, 0x0c, 0xea, 0x4f, 0x08, 0x57, 0xb3, 0xa2, 0xb2, 0x80, 0xfa, 0x0c, 0x88, + 0x83, 0xd7, 0x24, 0xd5, 0xcd, 0xca, 0x68, 0x41, 0xe1, 0x53, 0xae, 0x4d, 0xc2, 0x52, 0xd1, 0xc8, + 0x9b, 0xf8, 0x15, 0x1f, 0x3e, 0xe3, 0xd6, 0xa5, 0x64, 0x15, 0x91, 0xec, 0x4b, 0x91, 0xb9, 0x77, + 0x91, 0xf0, 0x0e, 0xde, 0x78, 0x08, 0xe9, 0x74, 0x67, 0x1a, 0x49, 0xca, 0x54, 0xfd, 0x16, 0xe1, + 0xea, 0x6e, 0x08, 0x36, 0x87, 0x4c, 0x58, 0x96, 0xb4, 0x43, 0xfc, 0xaa, 0x84, 0xbb, 0xc8, 0x6c, + 0x39, 0xea, 0x77, 0x52, 0xd4, 0xd5, 0x9f, 0x11, 0xae, 0xee, 0x07, 0x4e, 0x5e, 0x7a, 0x19, 0x69, + 0xa0, 0x6b, 0x4c, 0x83, 0xdc, 0xc7, 0xb7, 0x27, 0x22, 0x0b, 0x31, 0x0b, 0x12, 0x8a, 0x95, 0x99, + 0xef, 0xd9, 0xb8, 0xd0, 0x5a, 0xd1, 0xb8, 0xe8, 0xd8, 0xec, 0xd8, 0xc4, 0xf1, 0xf1, 0xe8, 0x5b, + 0x7d, 0x1b, 0x57, 0x9b, 0x30, 0x82, 0x1c, 0x0a, 0x92, 0x8b, 0x69, 0x7c, 0xf5, 0x22, 0x26, 0xe9, + 0xc2, 0x23, 0xdf, 0x2b, 0xb8, 0x24, 0xaf, 0x49, 0xf2, 0x7e, 0x21, 0xd7, 0xdc, 0x16, 0xaa, 0x7c, + 0xb0, 0x34, 0x3e, 0x6e, 0x06, 0xf5, 0x4b, 0x74, 0xf6, 0xeb, 0x1f, 0x5f, 0x2b, 0x5f, 0x20, 0x72, + 0xef, 0x62, 0x02, 0x7f, 0x1e, 0x17, 0xcb, 0x7b, 0x41, 0x48, 0x3f, 0x81, 0x43, 0xce, 0xf4, 0xba, + 0x6e, 0xbb, 0xe0, 0xf3, 0xd9, 0x4f, 0x81, 0xe9, 0xf5, 0xd3, 0x64, 0x4c, 0x0b, 0x67, 0x07, 0x2d, + 0xd2, 0x2c, 0x06, 0x87, 0x13, 0x9f, 0x7b, 0x63, 0x88, 0x0c, 0x19, 0x7e, 0xc8, 0x37, 0x0a, 0x5e, + 0x93, 0x35, 0x03, 0x79, 0xb7, 0x90, 0x66, 0x4e, 0x0f, 0x55, 0x96, 0x28, 0x28, 0xb9, 0x2e, 0xd1, + 0x25, 0xe7, 0xa9, 0x72, 0x99, 0x8c, 0x5e, 0x3f, 0xfd, 0xaf, 0x2e, 0x72, 0xb0, 0x54, 0x95, 0x2b, + 0x7e, 0xc8, 0x2f, 0x0a, 0x5e, 0xcf, 0x68, 0x78, 0x52, 0x5c, 0x01, 0xf9, 0xa3, 0x62, 0x29, 0x75, + 0xce, 0x63, 0x75, 0x7e, 0x44, 0xea, 0xf2, 0x55, 0xf3, 0x8e, 0x6c, 0x04, 0x1c, 0x7c, 0xac, 0x5e, + 0x4b, 0x29, 0x49, 0x9d, 0x93, 0x3f, 0x15, 0xbc, 0x9e, 0x31, 0x99, 0x16, 0xd0, 0x31, 0x7f, 0xa6, + 0x2d, 0xa5, 0xe3, 0xef, 0xb1, 0x8e, 0xbf, 0xa1, 0x46, 0x67, 0xce, 0x5a, 0xf6, 0xf4, 0xfa, 0x9f, + 0x95, 0x27, 0xd7, 0x96, 0x36, 0x3e, 0x5c, 0x26, 0xca, 0x22, 0x25, 0x2a, 0xd7, 0xfb, 0x1f, 0x84, + 0xd7, 0x33, 0xc6, 0xe8, 0x02, 0x7a, 0xe7, 0x0f, 0xe0, 0x4a, 0x29, 0x35, 0xca, 0x8d, 0xe8, 0x59, + 0x38, 0xef, 0xdc, 0xfa, 0xb3, 0x74, 0x6e, 0xfd, 0x5a, 0x3a, 0xf7, 0xc1, 0x39, 0xc2, 0x77, 0x0f, + 0xe9, 0xb8, 0x88, 0xeb, 0x83, 0x52, 0x8a, 0x66, 0x2f, 0x62, 0xd5, 0x43, 0x07, 0xed, 0x04, 0xea, + 0xd2, 0x91, 0xed, 0xbb, 0x1a, 0x0d, 0x5d, 0xdd, 0x05, 0x5f, 0x70, 0xd6, 0xe3, 0x2d, 0x3b, 0xf0, + 0x58, 0xe6, 0x73, 0xfa, 0xfe, 0xdc, 0xf4, 0x37, 0x42, 0xdf, 0x29, 0x4a, 0xb3, 0xf5, 0x83, 0xb2, + 0xf5, 0x30, 0xf6, 0xb9, 0x2b, 0xd2, 0x69, 0xce, 0xd3, 0x79, 0x1c, 0x83, 0x86, 0x37, 0x84, 0xff, + 0xb7, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x65, 0x3a, 0xa2, 0x54, 0x27, 0x0c, 0x00, 0x00, } diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/job_service.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/job_service.pb.go deleted file mode 100644 index df13ca5e7..000000000 --- a/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/job_service.pb.go +++ /dev/null @@ -1,1823 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/cloud/ml/v1beta1/job_service.proto - -/* -Package ml is a generated protocol buffer package. - -It is generated from these files: - google/cloud/ml/v1beta1/job_service.proto - google/cloud/ml/v1beta1/model_service.proto - google/cloud/ml/v1beta1/operation_metadata.proto - google/cloud/ml/v1beta1/prediction_service.proto - google/cloud/ml/v1beta1/project_service.proto - -It has these top-level messages: - TrainingInput - HyperparameterSpec - ParameterSpec - HyperparameterOutput - TrainingOutput - PredictionInput - PredictionOutput - Job - CreateJobRequest - ListJobsRequest - ListJobsResponse - GetJobRequest - CancelJobRequest - Model - Version - ManualScaling - CreateModelRequest - ListModelsRequest - ListModelsResponse - GetModelRequest - DeleteModelRequest - CreateVersionRequest - ListVersionsRequest - ListVersionsResponse - GetVersionRequest - DeleteVersionRequest - SetDefaultVersionRequest - OperationMetadata - PredictRequest - GetConfigRequest - GetConfigResponse -*/ -package ml - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "google.golang.org/genproto/googleapis/api/annotations" -import _ "google.golang.org/genproto/googleapis/api/serviceconfig" -import google_protobuf1 "github.com/golang/protobuf/ptypes/empty" -import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// A scale tier is an abstract representation of the resources Cloud ML -// will allocate to a training job. When selecting a scale tier for your -// training job, you should consider the size of your training dataset and -// the complexity of your model. As the tiers increase, virtual machines are -// added to handle your job, and the individual machines in the cluster -// generally have more memory and greater processing power than they do at -// lower tiers. The number of training units charged per hour of processing -// increases as tiers get more advanced. Refer to the -// [pricing guide](/ml/pricing) for more details. Note that in addition to -// incurring costs, your use of training resources is constrained by the -// [quota policy](/ml/quota). -type TrainingInput_ScaleTier int32 - -const ( - // A single worker instance. This tier is suitable for learning how to use - // Cloud ML, and for experimenting with new models using small datasets. - TrainingInput_BASIC TrainingInput_ScaleTier = 0 - // Many workers and a few parameter servers. - TrainingInput_STANDARD_1 TrainingInput_ScaleTier = 1 - // A large number of workers with many parameter servers. - TrainingInput_PREMIUM_1 TrainingInput_ScaleTier = 3 - // A single worker instance [with a GPU](ml/docs/how-tos/using-gpus). - TrainingInput_BASIC_GPU TrainingInput_ScaleTier = 6 - // The CUSTOM tier is not a set tier, but rather enables you to use your - // own cluster specification. When you use this tier, set values to - // configure your processing cluster according to these guidelines: - // - // * You _must_ set `TrainingInput.masterType` to specify the type - // of machine to use for your master node. This is the only required - // setting. - // - // * You _may_ set `TrainingInput.workerCount` to specify the number of - // workers to use. If you specify one or more workers, you _must_ also - // set `TrainingInput.workerType` to specify the type of machine to use - // for your worker nodes. - // - // * You _may_ set `TrainingInput.parameterServerCount` to specify the - // number of parameter servers to use. If you specify one or more - // parameter servers, you _must_ also set - // `TrainingInput.parameterServerType` to specify the type of machine to - // use for your parameter servers. - // - // Note that all of your workers must use the same machine type, which can - // be different from your parameter server type and master type. Your - // parameter servers must likewise use the same machine type, which can be - // different from your worker type and master type. - TrainingInput_CUSTOM TrainingInput_ScaleTier = 5 -) - -var TrainingInput_ScaleTier_name = map[int32]string{ - 0: "BASIC", - 1: "STANDARD_1", - 3: "PREMIUM_1", - 6: "BASIC_GPU", - 5: "CUSTOM", -} -var TrainingInput_ScaleTier_value = map[string]int32{ - "BASIC": 0, - "STANDARD_1": 1, - "PREMIUM_1": 3, - "BASIC_GPU": 6, - "CUSTOM": 5, -} - -func (x TrainingInput_ScaleTier) String() string { - return proto.EnumName(TrainingInput_ScaleTier_name, int32(x)) -} -func (TrainingInput_ScaleTier) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } - -// The available types of optimization goals. -type HyperparameterSpec_GoalType int32 - -const ( - // Goal Type will default to maximize. - HyperparameterSpec_GOAL_TYPE_UNSPECIFIED HyperparameterSpec_GoalType = 0 - // Maximize the goal metric. - HyperparameterSpec_MAXIMIZE HyperparameterSpec_GoalType = 1 - // Minimize the goal metric. - HyperparameterSpec_MINIMIZE HyperparameterSpec_GoalType = 2 -) - -var HyperparameterSpec_GoalType_name = map[int32]string{ - 0: "GOAL_TYPE_UNSPECIFIED", - 1: "MAXIMIZE", - 2: "MINIMIZE", -} -var HyperparameterSpec_GoalType_value = map[string]int32{ - "GOAL_TYPE_UNSPECIFIED": 0, - "MAXIMIZE": 1, - "MINIMIZE": 2, -} - -func (x HyperparameterSpec_GoalType) String() string { - return proto.EnumName(HyperparameterSpec_GoalType_name, int32(x)) -} -func (HyperparameterSpec_GoalType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{1, 0} -} - -// The type of the parameter. -type ParameterSpec_ParameterType int32 - -const ( - // You must specify a valid type. Using this unspecified type will result in - // an error. - ParameterSpec_PARAMETER_TYPE_UNSPECIFIED ParameterSpec_ParameterType = 0 - // Type for real-valued parameters. - ParameterSpec_DOUBLE ParameterSpec_ParameterType = 1 - // Type for integral parameters. - ParameterSpec_INTEGER ParameterSpec_ParameterType = 2 - // The parameter is categorical, with a value chosen from the categories - // field. - ParameterSpec_CATEGORICAL ParameterSpec_ParameterType = 3 - // The parameter is real valued, with a fixed set of feasible points. If - // `type==DISCRETE`, feasible_points must be provided, and - // {`min_value`, `max_value`} will be ignored. - ParameterSpec_DISCRETE ParameterSpec_ParameterType = 4 -) - -var ParameterSpec_ParameterType_name = map[int32]string{ - 0: "PARAMETER_TYPE_UNSPECIFIED", - 1: "DOUBLE", - 2: "INTEGER", - 3: "CATEGORICAL", - 4: "DISCRETE", -} -var ParameterSpec_ParameterType_value = map[string]int32{ - "PARAMETER_TYPE_UNSPECIFIED": 0, - "DOUBLE": 1, - "INTEGER": 2, - "CATEGORICAL": 3, - "DISCRETE": 4, -} - -func (x ParameterSpec_ParameterType) String() string { - return proto.EnumName(ParameterSpec_ParameterType_name, int32(x)) -} -func (ParameterSpec_ParameterType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{2, 0} -} - -// The type of scaling that should be applied to this parameter. -type ParameterSpec_ScaleType int32 - -const ( - // By default, no scaling is applied. - ParameterSpec_NONE ParameterSpec_ScaleType = 0 - // Scales the feasible space to (0, 1) linearly. - ParameterSpec_UNIT_LINEAR_SCALE ParameterSpec_ScaleType = 1 - // Scales the feasible space logarithmically to (0, 1). The entire feasible - // space must be strictly positive. - ParameterSpec_UNIT_LOG_SCALE ParameterSpec_ScaleType = 2 - // Scales the feasible space "reverse" logarithmically to (0, 1). The result - // is that values close to the top of the feasible space are spread out more - // than points near the bottom. The entire feasible space must be strictly - // positive. - ParameterSpec_UNIT_REVERSE_LOG_SCALE ParameterSpec_ScaleType = 3 -) - -var ParameterSpec_ScaleType_name = map[int32]string{ - 0: "NONE", - 1: "UNIT_LINEAR_SCALE", - 2: "UNIT_LOG_SCALE", - 3: "UNIT_REVERSE_LOG_SCALE", -} -var ParameterSpec_ScaleType_value = map[string]int32{ - "NONE": 0, - "UNIT_LINEAR_SCALE": 1, - "UNIT_LOG_SCALE": 2, - "UNIT_REVERSE_LOG_SCALE": 3, -} - -func (x ParameterSpec_ScaleType) String() string { - return proto.EnumName(ParameterSpec_ScaleType_name, int32(x)) -} -func (ParameterSpec_ScaleType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 1} } - -// The format used to separate data instances in the source files. -type PredictionInput_DataFormat int32 - -const ( - // Unspecified format. - PredictionInput_DATA_FORMAT_UNSPECIFIED PredictionInput_DataFormat = 0 - // The source file is a text file with instances separated by the - // new-line character. - PredictionInput_TEXT PredictionInput_DataFormat = 1 - // The source file is a TFRecord file. - PredictionInput_TF_RECORD PredictionInput_DataFormat = 2 - // The source file is a GZIP-compressed TFRecord file. - PredictionInput_TF_RECORD_GZIP PredictionInput_DataFormat = 3 -) - -var PredictionInput_DataFormat_name = map[int32]string{ - 0: "DATA_FORMAT_UNSPECIFIED", - 1: "TEXT", - 2: "TF_RECORD", - 3: "TF_RECORD_GZIP", -} -var PredictionInput_DataFormat_value = map[string]int32{ - "DATA_FORMAT_UNSPECIFIED": 0, - "TEXT": 1, - "TF_RECORD": 2, - "TF_RECORD_GZIP": 3, -} - -func (x PredictionInput_DataFormat) String() string { - return proto.EnumName(PredictionInput_DataFormat_name, int32(x)) -} -func (PredictionInput_DataFormat) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{5, 0} -} - -// Describes the job state. -type Job_State int32 - -const ( - // The job state is unspecified. - Job_STATE_UNSPECIFIED Job_State = 0 - // The job has been just created and processing has not yet begun. - Job_QUEUED Job_State = 1 - // The service is preparing to run the job. - Job_PREPARING Job_State = 2 - // The job is in progress. - Job_RUNNING Job_State = 3 - // The job completed successfully. - Job_SUCCEEDED Job_State = 4 - // The job failed. - // `error_message` should contain the details of the failure. - Job_FAILED Job_State = 5 - // The job is being cancelled. - // `error_message` should describe the reason for the cancellation. - Job_CANCELLING Job_State = 6 - // The job has been cancelled. - // `error_message` should describe the reason for the cancellation. - Job_CANCELLED Job_State = 7 -) - -var Job_State_name = map[int32]string{ - 0: "STATE_UNSPECIFIED", - 1: "QUEUED", - 2: "PREPARING", - 3: "RUNNING", - 4: "SUCCEEDED", - 5: "FAILED", - 6: "CANCELLING", - 7: "CANCELLED", -} -var Job_State_value = map[string]int32{ - "STATE_UNSPECIFIED": 0, - "QUEUED": 1, - "PREPARING": 2, - "RUNNING": 3, - "SUCCEEDED": 4, - "FAILED": 5, - "CANCELLING": 6, - "CANCELLED": 7, -} - -func (x Job_State) String() string { - return proto.EnumName(Job_State_name, int32(x)) -} -func (Job_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} } - -// Represents input parameters for a training job. -type TrainingInput struct { - // Required. Specifies the machine types, the number of replicas for workers - // and parameter servers. - ScaleTier TrainingInput_ScaleTier `protobuf:"varint,1,opt,name=scale_tier,json=scaleTier,enum=google.cloud.ml.v1beta1.TrainingInput_ScaleTier" json:"scale_tier,omitempty"` - // Optional. Specifies the type of virtual machine to use for your training - // job's master worker. - // - // The following types are supported: - // - //
- //
standard
- //
- // A basic machine configuration suitable for training simple models with - // small to moderate datasets. - //
- //
large_model
- //
- // A machine with a lot of memory, specially suited for parameter servers - // when your model is large (having many hidden layers or layers with very - // large numbers of nodes). - //
- //
complex_model_s
- //
- // A machine suitable for the master and workers of the cluster when your - // model requires more computation than the standard machine can handle - // satisfactorily. - //
- //
complex_model_m
- //
- // A machine with roughly twice the number of cores and roughly double the - // memory of complex_model_s. - //
- //
complex_model_l
- //
- // A machine with roughly twice the number of cores and roughly double the - // memory of complex_model_m. - //
- //
standard_gpu
- //
- // A machine equivalent to standard that - // also includes a - // - // GPU that you can use in your trainer. - //
- //
complex_model_m_gpu
- //
- // A machine equivalent to - // coplex_model_m that also includes - // four GPUs. - //
- //
- // - // You must set this value when `scaleTier` is set to `CUSTOM`. - MasterType string `protobuf:"bytes,2,opt,name=master_type,json=masterType" json:"master_type,omitempty"` - // Optional. Specifies the type of virtual machine to use for your training - // job's worker nodes. - // - // The supported values are the same as those described in the entry for - // `masterType`. - // - // This value must be present when `scaleTier` is set to `CUSTOM` and - // `workerCount` is greater than zero. - WorkerType string `protobuf:"bytes,3,opt,name=worker_type,json=workerType" json:"worker_type,omitempty"` - // Optional. Specifies the type of virtual machine to use for your training - // job's parameter server. - // - // The supported values are the same as those described in the entry for - // `master_type`. - // - // This value must be present when `scaleTier` is set to `CUSTOM` and - // `parameter_server_count` is greater than zero. - ParameterServerType string `protobuf:"bytes,4,opt,name=parameter_server_type,json=parameterServerType" json:"parameter_server_type,omitempty"` - // Optional. The number of worker replicas to use for the training job. Each - // replica in the cluster will be of the type specified in `worker_type`. - // - // This value can only be used when `scale_tier` is set to `CUSTOM`. If you - // set this value, you must also set `worker_type`. - WorkerCount int64 `protobuf:"varint,5,opt,name=worker_count,json=workerCount" json:"worker_count,omitempty"` - // Optional. The number of parameter server replicas to use for the training - // job. Each replica in the cluster will be of the type specified in - // `parameter_server_type`. - // - // This value can only be used when `scale_tier` is set to `CUSTOM`.If you - // set this value, you must also set `parameter_server_type`. - ParameterServerCount int64 `protobuf:"varint,6,opt,name=parameter_server_count,json=parameterServerCount" json:"parameter_server_count,omitempty"` - // Required. The Google Cloud Storage location of the packages with - // the training program and any additional dependencies. - PackageUris []string `protobuf:"bytes,7,rep,name=package_uris,json=packageUris" json:"package_uris,omitempty"` - // Required. The Python module name to run after installing the packages. - PythonModule string `protobuf:"bytes,8,opt,name=python_module,json=pythonModule" json:"python_module,omitempty"` - // Optional. Command line arguments to pass to the program. - Args []string `protobuf:"bytes,10,rep,name=args" json:"args,omitempty"` - // Optional. The set of Hyperparameters to tune. - Hyperparameters *HyperparameterSpec `protobuf:"bytes,12,opt,name=hyperparameters" json:"hyperparameters,omitempty"` - // Required. The Google Compute Engine region to run the training job in. - Region string `protobuf:"bytes,14,opt,name=region" json:"region,omitempty"` - // Optional. A Google Cloud Storage path in which to store training outputs - // and other data needed for training. This path is passed to your TensorFlow - // program as the 'job_dir' command-line argument. The benefit of specifying - // this field is that Cloud ML validates the path for use in training. - JobDir string `protobuf:"bytes,16,opt,name=job_dir,json=jobDir" json:"job_dir,omitempty"` - // Optional. The Google Cloud ML runtime version to use for training. If not - // set, Google Cloud ML will choose the latest stable version. - RuntimeVersion string `protobuf:"bytes,15,opt,name=runtime_version,json=runtimeVersion" json:"runtime_version,omitempty"` -} - -func (m *TrainingInput) Reset() { *m = TrainingInput{} } -func (m *TrainingInput) String() string { return proto.CompactTextString(m) } -func (*TrainingInput) ProtoMessage() {} -func (*TrainingInput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *TrainingInput) GetScaleTier() TrainingInput_ScaleTier { - if m != nil { - return m.ScaleTier - } - return TrainingInput_BASIC -} - -func (m *TrainingInput) GetMasterType() string { - if m != nil { - return m.MasterType - } - return "" -} - -func (m *TrainingInput) GetWorkerType() string { - if m != nil { - return m.WorkerType - } - return "" -} - -func (m *TrainingInput) GetParameterServerType() string { - if m != nil { - return m.ParameterServerType - } - return "" -} - -func (m *TrainingInput) GetWorkerCount() int64 { - if m != nil { - return m.WorkerCount - } - return 0 -} - -func (m *TrainingInput) GetParameterServerCount() int64 { - if m != nil { - return m.ParameterServerCount - } - return 0 -} - -func (m *TrainingInput) GetPackageUris() []string { - if m != nil { - return m.PackageUris - } - return nil -} - -func (m *TrainingInput) GetPythonModule() string { - if m != nil { - return m.PythonModule - } - return "" -} - -func (m *TrainingInput) GetArgs() []string { - if m != nil { - return m.Args - } - return nil -} - -func (m *TrainingInput) GetHyperparameters() *HyperparameterSpec { - if m != nil { - return m.Hyperparameters - } - return nil -} - -func (m *TrainingInput) GetRegion() string { - if m != nil { - return m.Region - } - return "" -} - -func (m *TrainingInput) GetJobDir() string { - if m != nil { - return m.JobDir - } - return "" -} - -func (m *TrainingInput) GetRuntimeVersion() string { - if m != nil { - return m.RuntimeVersion - } - return "" -} - -// Represents a set of hyperparameters to optimize. -type HyperparameterSpec struct { - // Required. The type of goal to use for tuning. Available types are - // `MAXIMIZE` and `MINIMIZE`. - // - // Defaults to `MAXIMIZE`. - Goal HyperparameterSpec_GoalType `protobuf:"varint,1,opt,name=goal,enum=google.cloud.ml.v1beta1.HyperparameterSpec_GoalType" json:"goal,omitempty"` - // Required. The set of parameters to tune. - Params []*ParameterSpec `protobuf:"bytes,2,rep,name=params" json:"params,omitempty"` - // Optional. How many training trials should be attempted to optimize - // the specified hyperparameters. - // - // Defaults to one. - MaxTrials int32 `protobuf:"varint,3,opt,name=max_trials,json=maxTrials" json:"max_trials,omitempty"` - // Optional. The number of training trials to run concurrently. - // You can reduce the time it takes to perform hyperparameter tuning by adding - // trials in parallel. However, each trail only benefits from the information - // gained in completed trials. That means that a trial does not get access to - // the results of trials running at the same time, which could reduce the - // quality of the overall optimization. - // - // Each trial will use the same scale tier and machine types. - // - // Defaults to one. - MaxParallelTrials int32 `protobuf:"varint,4,opt,name=max_parallel_trials,json=maxParallelTrials" json:"max_parallel_trials,omitempty"` - // Optional. The Tensorflow summary tag name to use for optimizing trials. For - // current versions of Tensorflow, this tag name should exactly match what is - // shown in Tensorboard, including all scopes. For versions of Tensorflow - // prior to 0.12, this should be only the tag passed to tf.Summary. - // By default, "training/hptuning/metric" will be used. - HyperparameterMetricTag string `protobuf:"bytes,5,opt,name=hyperparameter_metric_tag,json=hyperparameterMetricTag" json:"hyperparameter_metric_tag,omitempty"` -} - -func (m *HyperparameterSpec) Reset() { *m = HyperparameterSpec{} } -func (m *HyperparameterSpec) String() string { return proto.CompactTextString(m) } -func (*HyperparameterSpec) ProtoMessage() {} -func (*HyperparameterSpec) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *HyperparameterSpec) GetGoal() HyperparameterSpec_GoalType { - if m != nil { - return m.Goal - } - return HyperparameterSpec_GOAL_TYPE_UNSPECIFIED -} - -func (m *HyperparameterSpec) GetParams() []*ParameterSpec { - if m != nil { - return m.Params - } - return nil -} - -func (m *HyperparameterSpec) GetMaxTrials() int32 { - if m != nil { - return m.MaxTrials - } - return 0 -} - -func (m *HyperparameterSpec) GetMaxParallelTrials() int32 { - if m != nil { - return m.MaxParallelTrials - } - return 0 -} - -func (m *HyperparameterSpec) GetHyperparameterMetricTag() string { - if m != nil { - return m.HyperparameterMetricTag - } - return "" -} - -// Represents a single hyperparameter to optimize. -type ParameterSpec struct { - // Required. The parameter name must be unique amongst all ParameterConfigs in - // a HyperparameterSpec message. E.g., "learning_rate". - ParameterName string `protobuf:"bytes,1,opt,name=parameter_name,json=parameterName" json:"parameter_name,omitempty"` - // Required. The type of the parameter. - Type ParameterSpec_ParameterType `protobuf:"varint,4,opt,name=type,enum=google.cloud.ml.v1beta1.ParameterSpec_ParameterType" json:"type,omitempty"` - // Required if type is `DOUBLE` or `INTEGER`. This field - // should be unset if type is `CATEGORICAL`. This value should be integers if - // type is INTEGER. - MinValue float64 `protobuf:"fixed64,2,opt,name=min_value,json=minValue" json:"min_value,omitempty"` - // Required if typeis `DOUBLE` or `INTEGER`. This field - // should be unset if type is `CATEGORICAL`. This value should be integers if - // type is `INTEGER`. - MaxValue float64 `protobuf:"fixed64,3,opt,name=max_value,json=maxValue" json:"max_value,omitempty"` - // Required if type is `CATEGORICAL`. The list of possible categories. - CategoricalValues []string `protobuf:"bytes,5,rep,name=categorical_values,json=categoricalValues" json:"categorical_values,omitempty"` - // Required if type is `DISCRETE`. - // A list of feasible points. - // The list should be in strictly increasing order. For instance, this - // parameter might have possible settings of 1.5, 2.5, and 4.0. This list - // should not contain more than 1,000 values. - DiscreteValues []float64 `protobuf:"fixed64,6,rep,packed,name=discrete_values,json=discreteValues" json:"discrete_values,omitempty"` - // Optional. How the parameter should be scaled to the hypercube. - // Leave unset for categorical parameters. - // Some kind of scaling is strongly recommended for real or integral - // parameters (e.g., `UNIT_LINEAR_SCALE`). - ScaleType ParameterSpec_ScaleType `protobuf:"varint,7,opt,name=scale_type,json=scaleType,enum=google.cloud.ml.v1beta1.ParameterSpec_ScaleType" json:"scale_type,omitempty"` -} - -func (m *ParameterSpec) Reset() { *m = ParameterSpec{} } -func (m *ParameterSpec) String() string { return proto.CompactTextString(m) } -func (*ParameterSpec) ProtoMessage() {} -func (*ParameterSpec) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *ParameterSpec) GetParameterName() string { - if m != nil { - return m.ParameterName - } - return "" -} - -func (m *ParameterSpec) GetType() ParameterSpec_ParameterType { - if m != nil { - return m.Type - } - return ParameterSpec_PARAMETER_TYPE_UNSPECIFIED -} - -func (m *ParameterSpec) GetMinValue() float64 { - if m != nil { - return m.MinValue - } - return 0 -} - -func (m *ParameterSpec) GetMaxValue() float64 { - if m != nil { - return m.MaxValue - } - return 0 -} - -func (m *ParameterSpec) GetCategoricalValues() []string { - if m != nil { - return m.CategoricalValues - } - return nil -} - -func (m *ParameterSpec) GetDiscreteValues() []float64 { - if m != nil { - return m.DiscreteValues - } - return nil -} - -func (m *ParameterSpec) GetScaleType() ParameterSpec_ScaleType { - if m != nil { - return m.ScaleType - } - return ParameterSpec_NONE -} - -// Represents the result of a single hyperparameter tuning trial from a -// training job. The TrainingOutput object that is returned on successful -// completion of a training job with hyperparameter tuning includes a list -// of HyperparameterOutput objects, one for each successful trial. -type HyperparameterOutput struct { - // The trial id for these results. - TrialId string `protobuf:"bytes,1,opt,name=trial_id,json=trialId" json:"trial_id,omitempty"` - // The hyperparameters given to this trial. - Hyperparameters map[string]string `protobuf:"bytes,2,rep,name=hyperparameters" json:"hyperparameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // The final objective metric seen for this trial. - FinalMetric *HyperparameterOutput_HyperparameterMetric `protobuf:"bytes,3,opt,name=final_metric,json=finalMetric" json:"final_metric,omitempty"` - // All recorded object metrics for this trial. - AllMetrics []*HyperparameterOutput_HyperparameterMetric `protobuf:"bytes,4,rep,name=all_metrics,json=allMetrics" json:"all_metrics,omitempty"` -} - -func (m *HyperparameterOutput) Reset() { *m = HyperparameterOutput{} } -func (m *HyperparameterOutput) String() string { return proto.CompactTextString(m) } -func (*HyperparameterOutput) ProtoMessage() {} -func (*HyperparameterOutput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func (m *HyperparameterOutput) GetTrialId() string { - if m != nil { - return m.TrialId - } - return "" -} - -func (m *HyperparameterOutput) GetHyperparameters() map[string]string { - if m != nil { - return m.Hyperparameters - } - return nil -} - -func (m *HyperparameterOutput) GetFinalMetric() *HyperparameterOutput_HyperparameterMetric { - if m != nil { - return m.FinalMetric - } - return nil -} - -func (m *HyperparameterOutput) GetAllMetrics() []*HyperparameterOutput_HyperparameterMetric { - if m != nil { - return m.AllMetrics - } - return nil -} - -// An observed value of a metric. -type HyperparameterOutput_HyperparameterMetric struct { - // The global training step for this metric. - TrainingStep int64 `protobuf:"varint,1,opt,name=training_step,json=trainingStep" json:"training_step,omitempty"` - // The objective value at this training step. - ObjectiveValue float64 `protobuf:"fixed64,2,opt,name=objective_value,json=objectiveValue" json:"objective_value,omitempty"` -} - -func (m *HyperparameterOutput_HyperparameterMetric) Reset() { - *m = HyperparameterOutput_HyperparameterMetric{} -} -func (m *HyperparameterOutput_HyperparameterMetric) String() string { return proto.CompactTextString(m) } -func (*HyperparameterOutput_HyperparameterMetric) ProtoMessage() {} -func (*HyperparameterOutput_HyperparameterMetric) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{3, 0} -} - -func (m *HyperparameterOutput_HyperparameterMetric) GetTrainingStep() int64 { - if m != nil { - return m.TrainingStep - } - return 0 -} - -func (m *HyperparameterOutput_HyperparameterMetric) GetObjectiveValue() float64 { - if m != nil { - return m.ObjectiveValue - } - return 0 -} - -// Represents results of a training job. Output only. -type TrainingOutput struct { - // The number of hyperparameter tuning trials that completed successfully. - // Only set for hyperparameter tuning jobs. - CompletedTrialCount int64 `protobuf:"varint,1,opt,name=completed_trial_count,json=completedTrialCount" json:"completed_trial_count,omitempty"` - // Results for individual Hyperparameter trials. - // Only set for hyperparameter tuning jobs. - Trials []*HyperparameterOutput `protobuf:"bytes,2,rep,name=trials" json:"trials,omitempty"` - // The amount of ML units consumed by the job. - ConsumedMlUnits float64 `protobuf:"fixed64,3,opt,name=consumed_ml_units,json=consumedMlUnits" json:"consumed_ml_units,omitempty"` - // Whether this job is a hyperparameter tuning job. - IsHyperparameterTuningJob bool `protobuf:"varint,4,opt,name=is_hyperparameter_tuning_job,json=isHyperparameterTuningJob" json:"is_hyperparameter_tuning_job,omitempty"` -} - -func (m *TrainingOutput) Reset() { *m = TrainingOutput{} } -func (m *TrainingOutput) String() string { return proto.CompactTextString(m) } -func (*TrainingOutput) ProtoMessage() {} -func (*TrainingOutput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } - -func (m *TrainingOutput) GetCompletedTrialCount() int64 { - if m != nil { - return m.CompletedTrialCount - } - return 0 -} - -func (m *TrainingOutput) GetTrials() []*HyperparameterOutput { - if m != nil { - return m.Trials - } - return nil -} - -func (m *TrainingOutput) GetConsumedMlUnits() float64 { - if m != nil { - return m.ConsumedMlUnits - } - return 0 -} - -func (m *TrainingOutput) GetIsHyperparameterTuningJob() bool { - if m != nil { - return m.IsHyperparameterTuningJob - } - return false -} - -// Represents input parameters for a prediction job. -type PredictionInput struct { - // Required. The model or the version to use for prediction. - // - // Types that are valid to be assigned to ModelVersion: - // *PredictionInput_ModelName - // *PredictionInput_VersionName - // *PredictionInput_Uri - ModelVersion isPredictionInput_ModelVersion `protobuf_oneof:"model_version"` - // Required. The format of the input data files. - DataFormat PredictionInput_DataFormat `protobuf:"varint,3,opt,name=data_format,json=dataFormat,enum=google.cloud.ml.v1beta1.PredictionInput_DataFormat" json:"data_format,omitempty"` - // Required. The Google Cloud Storage location of the input data files. - // May contain wildcards. - InputPaths []string `protobuf:"bytes,4,rep,name=input_paths,json=inputPaths" json:"input_paths,omitempty"` - // Required. The output Google Cloud Storage location. - OutputPath string `protobuf:"bytes,5,opt,name=output_path,json=outputPath" json:"output_path,omitempty"` - // Optional. The maximum number of workers to be used for parallel processing. - // Defaults to 10 if not specified. - MaxWorkerCount int64 `protobuf:"varint,6,opt,name=max_worker_count,json=maxWorkerCount" json:"max_worker_count,omitempty"` - // Required. The Google Compute Engine region to run the prediction job in. - Region string `protobuf:"bytes,7,opt,name=region" json:"region,omitempty"` - // Optional. The Google Cloud ML runtime version to use for this batch - // prediction. If not set, Google Cloud ML will pick the runtime version used - // during the CreateVersion request for this model version, or choose the - // latest stable version when model version information is not available - // such as when the model is specified by uri. - RuntimeVersion string `protobuf:"bytes,8,opt,name=runtime_version,json=runtimeVersion" json:"runtime_version,omitempty"` -} - -func (m *PredictionInput) Reset() { *m = PredictionInput{} } -func (m *PredictionInput) String() string { return proto.CompactTextString(m) } -func (*PredictionInput) ProtoMessage() {} -func (*PredictionInput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } - -type isPredictionInput_ModelVersion interface { - isPredictionInput_ModelVersion() -} - -type PredictionInput_ModelName struct { - ModelName string `protobuf:"bytes,1,opt,name=model_name,json=modelName,oneof"` -} -type PredictionInput_VersionName struct { - VersionName string `protobuf:"bytes,2,opt,name=version_name,json=versionName,oneof"` -} -type PredictionInput_Uri struct { - Uri string `protobuf:"bytes,9,opt,name=uri,oneof"` -} - -func (*PredictionInput_ModelName) isPredictionInput_ModelVersion() {} -func (*PredictionInput_VersionName) isPredictionInput_ModelVersion() {} -func (*PredictionInput_Uri) isPredictionInput_ModelVersion() {} - -func (m *PredictionInput) GetModelVersion() isPredictionInput_ModelVersion { - if m != nil { - return m.ModelVersion - } - return nil -} - -func (m *PredictionInput) GetModelName() string { - if x, ok := m.GetModelVersion().(*PredictionInput_ModelName); ok { - return x.ModelName - } - return "" -} - -func (m *PredictionInput) GetVersionName() string { - if x, ok := m.GetModelVersion().(*PredictionInput_VersionName); ok { - return x.VersionName - } - return "" -} - -func (m *PredictionInput) GetUri() string { - if x, ok := m.GetModelVersion().(*PredictionInput_Uri); ok { - return x.Uri - } - return "" -} - -func (m *PredictionInput) GetDataFormat() PredictionInput_DataFormat { - if m != nil { - return m.DataFormat - } - return PredictionInput_DATA_FORMAT_UNSPECIFIED -} - -func (m *PredictionInput) GetInputPaths() []string { - if m != nil { - return m.InputPaths - } - return nil -} - -func (m *PredictionInput) GetOutputPath() string { - if m != nil { - return m.OutputPath - } - return "" -} - -func (m *PredictionInput) GetMaxWorkerCount() int64 { - if m != nil { - return m.MaxWorkerCount - } - return 0 -} - -func (m *PredictionInput) GetRegion() string { - if m != nil { - return m.Region - } - return "" -} - -func (m *PredictionInput) GetRuntimeVersion() string { - if m != nil { - return m.RuntimeVersion - } - return "" -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*PredictionInput) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _PredictionInput_OneofMarshaler, _PredictionInput_OneofUnmarshaler, _PredictionInput_OneofSizer, []interface{}{ - (*PredictionInput_ModelName)(nil), - (*PredictionInput_VersionName)(nil), - (*PredictionInput_Uri)(nil), - } -} - -func _PredictionInput_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*PredictionInput) - // model_version - switch x := m.ModelVersion.(type) { - case *PredictionInput_ModelName: - b.EncodeVarint(1<<3 | proto.WireBytes) - b.EncodeStringBytes(x.ModelName) - case *PredictionInput_VersionName: - b.EncodeVarint(2<<3 | proto.WireBytes) - b.EncodeStringBytes(x.VersionName) - case *PredictionInput_Uri: - b.EncodeVarint(9<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Uri) - case nil: - default: - return fmt.Errorf("PredictionInput.ModelVersion has unexpected type %T", x) - } - return nil -} - -func _PredictionInput_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*PredictionInput) - switch tag { - case 1: // model_version.model_name - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.ModelVersion = &PredictionInput_ModelName{x} - return true, err - case 2: // model_version.version_name - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.ModelVersion = &PredictionInput_VersionName{x} - return true, err - case 9: // model_version.uri - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.ModelVersion = &PredictionInput_Uri{x} - return true, err - default: - return false, nil - } -} - -func _PredictionInput_OneofSizer(msg proto.Message) (n int) { - m := msg.(*PredictionInput) - // model_version - switch x := m.ModelVersion.(type) { - case *PredictionInput_ModelName: - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.ModelName))) - n += len(x.ModelName) - case *PredictionInput_VersionName: - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.VersionName))) - n += len(x.VersionName) - case *PredictionInput_Uri: - n += proto.SizeVarint(9<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Uri))) - n += len(x.Uri) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// Represents results of a prediction job. -type PredictionOutput struct { - // The output Google Cloud Storage location provided at the job creation time. - OutputPath string `protobuf:"bytes,1,opt,name=output_path,json=outputPath" json:"output_path,omitempty"` - // The number of generated predictions. - PredictionCount int64 `protobuf:"varint,2,opt,name=prediction_count,json=predictionCount" json:"prediction_count,omitempty"` - // The number of data instances which resulted in errors. - ErrorCount int64 `protobuf:"varint,3,opt,name=error_count,json=errorCount" json:"error_count,omitempty"` - // Node hours used by the batch prediction job. - NodeHours float64 `protobuf:"fixed64,4,opt,name=node_hours,json=nodeHours" json:"node_hours,omitempty"` -} - -func (m *PredictionOutput) Reset() { *m = PredictionOutput{} } -func (m *PredictionOutput) String() string { return proto.CompactTextString(m) } -func (*PredictionOutput) ProtoMessage() {} -func (*PredictionOutput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } - -func (m *PredictionOutput) GetOutputPath() string { - if m != nil { - return m.OutputPath - } - return "" -} - -func (m *PredictionOutput) GetPredictionCount() int64 { - if m != nil { - return m.PredictionCount - } - return 0 -} - -func (m *PredictionOutput) GetErrorCount() int64 { - if m != nil { - return m.ErrorCount - } - return 0 -} - -func (m *PredictionOutput) GetNodeHours() float64 { - if m != nil { - return m.NodeHours - } - return 0 -} - -// Represents a training or prediction job. -type Job struct { - // Required. The user-specified id of the job. - JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId" json:"job_id,omitempty"` - // Required. Parameters to create a job. - // - // Types that are valid to be assigned to Input: - // *Job_TrainingInput - // *Job_PredictionInput - Input isJob_Input `protobuf_oneof:"input"` - // Output only. When the job was created. - CreateTime *google_protobuf2.Timestamp `protobuf:"bytes,4,opt,name=create_time,json=createTime" json:"create_time,omitempty"` - // Output only. When the job processing was started. - StartTime *google_protobuf2.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime" json:"start_time,omitempty"` - // Output only. When the job processing was completed. - EndTime *google_protobuf2.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime" json:"end_time,omitempty"` - // Output only. The detailed state of a job. - State Job_State `protobuf:"varint,7,opt,name=state,enum=google.cloud.ml.v1beta1.Job_State" json:"state,omitempty"` - // Output only. The details of a failure or a cancellation. - ErrorMessage string `protobuf:"bytes,8,opt,name=error_message,json=errorMessage" json:"error_message,omitempty"` - // Output only. The current result of the job. - // - // Types that are valid to be assigned to Output: - // *Job_TrainingOutput - // *Job_PredictionOutput - Output isJob_Output `protobuf_oneof:"output"` -} - -func (m *Job) Reset() { *m = Job{} } -func (m *Job) String() string { return proto.CompactTextString(m) } -func (*Job) ProtoMessage() {} -func (*Job) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } - -type isJob_Input interface { - isJob_Input() -} -type isJob_Output interface { - isJob_Output() -} - -type Job_TrainingInput struct { - TrainingInput *TrainingInput `protobuf:"bytes,2,opt,name=training_input,json=trainingInput,oneof"` -} -type Job_PredictionInput struct { - PredictionInput *PredictionInput `protobuf:"bytes,3,opt,name=prediction_input,json=predictionInput,oneof"` -} -type Job_TrainingOutput struct { - TrainingOutput *TrainingOutput `protobuf:"bytes,9,opt,name=training_output,json=trainingOutput,oneof"` -} -type Job_PredictionOutput struct { - PredictionOutput *PredictionOutput `protobuf:"bytes,10,opt,name=prediction_output,json=predictionOutput,oneof"` -} - -func (*Job_TrainingInput) isJob_Input() {} -func (*Job_PredictionInput) isJob_Input() {} -func (*Job_TrainingOutput) isJob_Output() {} -func (*Job_PredictionOutput) isJob_Output() {} - -func (m *Job) GetInput() isJob_Input { - if m != nil { - return m.Input - } - return nil -} -func (m *Job) GetOutput() isJob_Output { - if m != nil { - return m.Output - } - return nil -} - -func (m *Job) GetJobId() string { - if m != nil { - return m.JobId - } - return "" -} - -func (m *Job) GetTrainingInput() *TrainingInput { - if x, ok := m.GetInput().(*Job_TrainingInput); ok { - return x.TrainingInput - } - return nil -} - -func (m *Job) GetPredictionInput() *PredictionInput { - if x, ok := m.GetInput().(*Job_PredictionInput); ok { - return x.PredictionInput - } - return nil -} - -func (m *Job) GetCreateTime() *google_protobuf2.Timestamp { - if m != nil { - return m.CreateTime - } - return nil -} - -func (m *Job) GetStartTime() *google_protobuf2.Timestamp { - if m != nil { - return m.StartTime - } - return nil -} - -func (m *Job) GetEndTime() *google_protobuf2.Timestamp { - if m != nil { - return m.EndTime - } - return nil -} - -func (m *Job) GetState() Job_State { - if m != nil { - return m.State - } - return Job_STATE_UNSPECIFIED -} - -func (m *Job) GetErrorMessage() string { - if m != nil { - return m.ErrorMessage - } - return "" -} - -func (m *Job) GetTrainingOutput() *TrainingOutput { - if x, ok := m.GetOutput().(*Job_TrainingOutput); ok { - return x.TrainingOutput - } - return nil -} - -func (m *Job) GetPredictionOutput() *PredictionOutput { - if x, ok := m.GetOutput().(*Job_PredictionOutput); ok { - return x.PredictionOutput - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Job) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Job_OneofMarshaler, _Job_OneofUnmarshaler, _Job_OneofSizer, []interface{}{ - (*Job_TrainingInput)(nil), - (*Job_PredictionInput)(nil), - (*Job_TrainingOutput)(nil), - (*Job_PredictionOutput)(nil), - } -} - -func _Job_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Job) - // input - switch x := m.Input.(type) { - case *Job_TrainingInput: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.TrainingInput); err != nil { - return err - } - case *Job_PredictionInput: - b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.PredictionInput); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("Job.Input has unexpected type %T", x) - } - // output - switch x := m.Output.(type) { - case *Job_TrainingOutput: - b.EncodeVarint(9<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.TrainingOutput); err != nil { - return err - } - case *Job_PredictionOutput: - b.EncodeVarint(10<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.PredictionOutput); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("Job.Output has unexpected type %T", x) - } - return nil -} - -func _Job_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Job) - switch tag { - case 2: // input.training_input - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(TrainingInput) - err := b.DecodeMessage(msg) - m.Input = &Job_TrainingInput{msg} - return true, err - case 3: // input.prediction_input - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(PredictionInput) - err := b.DecodeMessage(msg) - m.Input = &Job_PredictionInput{msg} - return true, err - case 9: // output.training_output - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(TrainingOutput) - err := b.DecodeMessage(msg) - m.Output = &Job_TrainingOutput{msg} - return true, err - case 10: // output.prediction_output - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(PredictionOutput) - err := b.DecodeMessage(msg) - m.Output = &Job_PredictionOutput{msg} - return true, err - default: - return false, nil - } -} - -func _Job_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Job) - // input - switch x := m.Input.(type) { - case *Job_TrainingInput: - s := proto.Size(x.TrainingInput) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *Job_PredictionInput: - s := proto.Size(x.PredictionInput) - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - // output - switch x := m.Output.(type) { - case *Job_TrainingOutput: - s := proto.Size(x.TrainingOutput) - n += proto.SizeVarint(9<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *Job_PredictionOutput: - s := proto.Size(x.PredictionOutput) - n += proto.SizeVarint(10<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// Request message for the CreateJob method. -type CreateJobRequest struct { - // Required. The project name. - // - // Authorization: requires `Editor` role on the specified project. - Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` - // Required. The job to create. - Job *Job `protobuf:"bytes,2,opt,name=job" json:"job,omitempty"` -} - -func (m *CreateJobRequest) Reset() { *m = CreateJobRequest{} } -func (m *CreateJobRequest) String() string { return proto.CompactTextString(m) } -func (*CreateJobRequest) ProtoMessage() {} -func (*CreateJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } - -func (m *CreateJobRequest) GetParent() string { - if m != nil { - return m.Parent - } - return "" -} - -func (m *CreateJobRequest) GetJob() *Job { - if m != nil { - return m.Job - } - return nil -} - -// Request message for the ListJobs method. -type ListJobsRequest struct { - // Required. The name of the project for which to list jobs. - // - // Authorization: requires `Viewer` role on the specified project. - Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` - // Optional. Specifies the subset of jobs to retrieve. - Filter string `protobuf:"bytes,2,opt,name=filter" json:"filter,omitempty"` - // Optional. A page token to request the next page of results. - // - // You get the token from the `next_page_token` field of the response from - // the previous call. - PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` - // Optional. The number of jobs to retrieve per "page" of results. If there - // are more remaining results than this number, the response message will - // contain a valid value in the `next_page_token` field. - // - // The default value is 20, and the maximum page size is 100. - PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` -} - -func (m *ListJobsRequest) Reset() { *m = ListJobsRequest{} } -func (m *ListJobsRequest) String() string { return proto.CompactTextString(m) } -func (*ListJobsRequest) ProtoMessage() {} -func (*ListJobsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } - -func (m *ListJobsRequest) GetParent() string { - if m != nil { - return m.Parent - } - return "" -} - -func (m *ListJobsRequest) GetFilter() string { - if m != nil { - return m.Filter - } - return "" -} - -func (m *ListJobsRequest) GetPageToken() string { - if m != nil { - return m.PageToken - } - return "" -} - -func (m *ListJobsRequest) GetPageSize() int32 { - if m != nil { - return m.PageSize - } - return 0 -} - -// Response message for the ListJobs method. -type ListJobsResponse struct { - // The list of jobs. - Jobs []*Job `protobuf:"bytes,1,rep,name=jobs" json:"jobs,omitempty"` - // Optional. Pass this token as the `page_token` field of the request for a - // subsequent call. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` -} - -func (m *ListJobsResponse) Reset() { *m = ListJobsResponse{} } -func (m *ListJobsResponse) String() string { return proto.CompactTextString(m) } -func (*ListJobsResponse) ProtoMessage() {} -func (*ListJobsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } - -func (m *ListJobsResponse) GetJobs() []*Job { - if m != nil { - return m.Jobs - } - return nil -} - -func (m *ListJobsResponse) GetNextPageToken() string { - if m != nil { - return m.NextPageToken - } - return "" -} - -// Request message for the GetJob method. -type GetJobRequest struct { - // Required. The name of the job to get the description of. - // - // Authorization: requires `Viewer` role on the parent project. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` -} - -func (m *GetJobRequest) Reset() { *m = GetJobRequest{} } -func (m *GetJobRequest) String() string { return proto.CompactTextString(m) } -func (*GetJobRequest) ProtoMessage() {} -func (*GetJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } - -func (m *GetJobRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -// Request message for the CancelJob method. -type CancelJobRequest struct { - // Required. The name of the job to cancel. - // - // Authorization: requires `Editor` role on the parent project. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` -} - -func (m *CancelJobRequest) Reset() { *m = CancelJobRequest{} } -func (m *CancelJobRequest) String() string { return proto.CompactTextString(m) } -func (*CancelJobRequest) ProtoMessage() {} -func (*CancelJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } - -func (m *CancelJobRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func init() { - proto.RegisterType((*TrainingInput)(nil), "google.cloud.ml.v1beta1.TrainingInput") - proto.RegisterType((*HyperparameterSpec)(nil), "google.cloud.ml.v1beta1.HyperparameterSpec") - proto.RegisterType((*ParameterSpec)(nil), "google.cloud.ml.v1beta1.ParameterSpec") - proto.RegisterType((*HyperparameterOutput)(nil), "google.cloud.ml.v1beta1.HyperparameterOutput") - proto.RegisterType((*HyperparameterOutput_HyperparameterMetric)(nil), "google.cloud.ml.v1beta1.HyperparameterOutput.HyperparameterMetric") - proto.RegisterType((*TrainingOutput)(nil), "google.cloud.ml.v1beta1.TrainingOutput") - proto.RegisterType((*PredictionInput)(nil), "google.cloud.ml.v1beta1.PredictionInput") - proto.RegisterType((*PredictionOutput)(nil), "google.cloud.ml.v1beta1.PredictionOutput") - proto.RegisterType((*Job)(nil), "google.cloud.ml.v1beta1.Job") - proto.RegisterType((*CreateJobRequest)(nil), "google.cloud.ml.v1beta1.CreateJobRequest") - proto.RegisterType((*ListJobsRequest)(nil), "google.cloud.ml.v1beta1.ListJobsRequest") - proto.RegisterType((*ListJobsResponse)(nil), "google.cloud.ml.v1beta1.ListJobsResponse") - proto.RegisterType((*GetJobRequest)(nil), "google.cloud.ml.v1beta1.GetJobRequest") - proto.RegisterType((*CancelJobRequest)(nil), "google.cloud.ml.v1beta1.CancelJobRequest") - proto.RegisterEnum("google.cloud.ml.v1beta1.TrainingInput_ScaleTier", TrainingInput_ScaleTier_name, TrainingInput_ScaleTier_value) - proto.RegisterEnum("google.cloud.ml.v1beta1.HyperparameterSpec_GoalType", HyperparameterSpec_GoalType_name, HyperparameterSpec_GoalType_value) - proto.RegisterEnum("google.cloud.ml.v1beta1.ParameterSpec_ParameterType", ParameterSpec_ParameterType_name, ParameterSpec_ParameterType_value) - proto.RegisterEnum("google.cloud.ml.v1beta1.ParameterSpec_ScaleType", ParameterSpec_ScaleType_name, ParameterSpec_ScaleType_value) - proto.RegisterEnum("google.cloud.ml.v1beta1.PredictionInput_DataFormat", PredictionInput_DataFormat_name, PredictionInput_DataFormat_value) - proto.RegisterEnum("google.cloud.ml.v1beta1.Job_State", Job_State_name, Job_State_value) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for JobService service - -type JobServiceClient interface { - // Creates a training or a batch prediction job. - CreateJob(ctx context.Context, in *CreateJobRequest, opts ...grpc.CallOption) (*Job, error) - // Lists the jobs in the project. - ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error) - // Describes a job. - GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*Job, error) - // Cancels a running job. - CancelJob(ctx context.Context, in *CancelJobRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) -} - -type jobServiceClient struct { - cc *grpc.ClientConn -} - -func NewJobServiceClient(cc *grpc.ClientConn) JobServiceClient { - return &jobServiceClient{cc} -} - -func (c *jobServiceClient) CreateJob(ctx context.Context, in *CreateJobRequest, opts ...grpc.CallOption) (*Job, error) { - out := new(Job) - err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.JobService/CreateJob", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *jobServiceClient) ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error) { - out := new(ListJobsResponse) - err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.JobService/ListJobs", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *jobServiceClient) GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*Job, error) { - out := new(Job) - err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.JobService/GetJob", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *jobServiceClient) CancelJob(ctx context.Context, in *CancelJobRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { - out := new(google_protobuf1.Empty) - err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.JobService/CancelJob", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for JobService service - -type JobServiceServer interface { - // Creates a training or a batch prediction job. - CreateJob(context.Context, *CreateJobRequest) (*Job, error) - // Lists the jobs in the project. - ListJobs(context.Context, *ListJobsRequest) (*ListJobsResponse, error) - // Describes a job. - GetJob(context.Context, *GetJobRequest) (*Job, error) - // Cancels a running job. - CancelJob(context.Context, *CancelJobRequest) (*google_protobuf1.Empty, error) -} - -func RegisterJobServiceServer(s *grpc.Server, srv JobServiceServer) { - s.RegisterService(&_JobService_serviceDesc, srv) -} - -func _JobService_CreateJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateJobRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(JobServiceServer).CreateJob(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.cloud.ml.v1beta1.JobService/CreateJob", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(JobServiceServer).CreateJob(ctx, req.(*CreateJobRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _JobService_ListJobs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListJobsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(JobServiceServer).ListJobs(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.cloud.ml.v1beta1.JobService/ListJobs", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(JobServiceServer).ListJobs(ctx, req.(*ListJobsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _JobService_GetJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetJobRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(JobServiceServer).GetJob(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.cloud.ml.v1beta1.JobService/GetJob", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(JobServiceServer).GetJob(ctx, req.(*GetJobRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _JobService_CancelJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CancelJobRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(JobServiceServer).CancelJob(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.cloud.ml.v1beta1.JobService/CancelJob", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(JobServiceServer).CancelJob(ctx, req.(*CancelJobRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _JobService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "google.cloud.ml.v1beta1.JobService", - HandlerType: (*JobServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateJob", - Handler: _JobService_CreateJob_Handler, - }, - { - MethodName: "ListJobs", - Handler: _JobService_ListJobs_Handler, - }, - { - MethodName: "GetJob", - Handler: _JobService_GetJob_Handler, - }, - { - MethodName: "CancelJob", - Handler: _JobService_CancelJob_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "google/cloud/ml/v1beta1/job_service.proto", -} - -func init() { proto.RegisterFile("google/cloud/ml/v1beta1/job_service.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 2082 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0x4b, 0x6f, 0x1b, 0xc9, - 0x11, 0x16, 0x9f, 0x22, 0x8b, 0x12, 0x39, 0x6e, 0x5b, 0x36, 0x2d, 0x7b, 0x63, 0x79, 0x8c, 0x78, - 0x65, 0x07, 0x26, 0xd7, 0xf2, 0x06, 0xd8, 0xf5, 0x22, 0x09, 0x28, 0x72, 0x2c, 0x51, 0x10, 0x1f, - 0x69, 0x0e, 0x1d, 0xc7, 0x08, 0x30, 0x68, 0x92, 0x6d, 0x7a, 0xe4, 0x99, 0xe9, 0xc9, 0x4c, 0xd3, - 0x91, 0x36, 0x30, 0x10, 0x24, 0x39, 0xe5, 0x90, 0x4b, 0x90, 0x1c, 0x03, 0xe4, 0x9a, 0xbf, 0x93, - 0x43, 0xfe, 0x40, 0x8e, 0xf9, 0x01, 0x39, 0x06, 0xfd, 0xe0, 0x53, 0x96, 0xe4, 0x45, 0x72, 0x63, - 0x7f, 0xf5, 0x55, 0x55, 0x77, 0x55, 0x75, 0x4d, 0x35, 0xe1, 0xd1, 0x98, 0xb1, 0xb1, 0x47, 0xab, - 0x43, 0x8f, 0x4d, 0x46, 0x55, 0xdf, 0xab, 0xbe, 0x7f, 0x3a, 0xa0, 0x9c, 0x3c, 0xad, 0x9e, 0xb0, - 0x81, 0x13, 0xd3, 0xe8, 0xbd, 0x3b, 0xa4, 0x95, 0x30, 0x62, 0x9c, 0xa1, 0x5b, 0x8a, 0x5a, 0x91, - 0xd4, 0x8a, 0xef, 0x55, 0x34, 0x75, 0xfb, 0xae, 0xb6, 0x41, 0x42, 0xb7, 0x4a, 0x82, 0x80, 0x71, - 0xc2, 0x5d, 0x16, 0xc4, 0x4a, 0x6d, 0x7b, 0x6b, 0x51, 0x3a, 0xe1, 0x6f, 0x35, 0x7c, 0x47, 0xc3, - 0x72, 0x35, 0x98, 0xbc, 0xa9, 0x52, 0x3f, 0xe4, 0x67, 0x5a, 0x78, 0x6f, 0x55, 0xc8, 0x5d, 0x9f, - 0xc6, 0x9c, 0xf8, 0xa1, 0x22, 0x98, 0x7f, 0xcc, 0xc0, 0xa6, 0x1d, 0x11, 0x37, 0x70, 0x83, 0x71, - 0x33, 0x08, 0x27, 0x1c, 0x75, 0x00, 0xe2, 0x21, 0xf1, 0xa8, 0xc3, 0x5d, 0x1a, 0x95, 0x13, 0x3b, - 0x89, 0xdd, 0xe2, 0xde, 0x17, 0x95, 0x0b, 0xb6, 0x5c, 0x59, 0xd2, 0xad, 0xf4, 0x84, 0xa2, 0xed, - 0xd2, 0x08, 0xe7, 0xe3, 0xe9, 0x4f, 0x74, 0x0f, 0x0a, 0x3e, 0x89, 0x39, 0x8d, 0x1c, 0x7e, 0x16, - 0xd2, 0x72, 0x72, 0x27, 0xb1, 0x9b, 0xc7, 0xa0, 0x20, 0xfb, 0x2c, 0xa4, 0x82, 0xf0, 0x2b, 0x16, - 0xbd, 0x9b, 0x12, 0x52, 0x8a, 0xa0, 0x20, 0x49, 0xd8, 0x83, 0xad, 0x90, 0x44, 0xc4, 0xa7, 0xc2, - 0x88, 0x88, 0xe5, 0x94, 0x9a, 0x96, 0xd4, 0xeb, 0x33, 0x61, 0x4f, 0xca, 0xa4, 0xce, 0x7d, 0xd8, - 0xd0, 0x46, 0x87, 0x6c, 0x12, 0xf0, 0x72, 0x66, 0x27, 0xb1, 0x9b, 0xc2, 0xda, 0x51, 0x5d, 0x40, - 0xe8, 0x4b, 0xb8, 0x79, 0xce, 0xac, 0x22, 0x67, 0x25, 0xf9, 0xc6, 0x8a, 0x5d, 0xa5, 0x75, 0x1f, - 0x36, 0x42, 0x32, 0x7c, 0x47, 0xc6, 0xd4, 0x99, 0x44, 0x6e, 0x5c, 0x5e, 0xdf, 0x49, 0xed, 0xe6, - 0x71, 0x41, 0x63, 0xfd, 0xc8, 0x8d, 0xd1, 0x03, 0xd8, 0x0c, 0xcf, 0xf8, 0x5b, 0x16, 0x38, 0x3e, - 0x1b, 0x4d, 0x3c, 0x5a, 0xce, 0xc9, 0x7d, 0x6e, 0x28, 0xb0, 0x25, 0x31, 0x84, 0x20, 0x4d, 0xa2, - 0x71, 0x5c, 0x06, 0xa9, 0x2f, 0x7f, 0xa3, 0x3e, 0x94, 0xde, 0x9e, 0x85, 0x34, 0x9a, 0x39, 0x8e, - 0xcb, 0x1b, 0x3b, 0x89, 0xdd, 0xc2, 0xde, 0x0f, 0x2e, 0x4c, 0xc0, 0xe1, 0x12, 0xbf, 0x17, 0xd2, - 0x21, 0x5e, 0xb5, 0x81, 0x6e, 0x42, 0x36, 0xa2, 0x63, 0x97, 0x05, 0xe5, 0xa2, 0xdc, 0x88, 0x5e, - 0xa1, 0x5b, 0xb0, 0x2e, 0xaa, 0x73, 0xe4, 0x46, 0x65, 0x43, 0x09, 0x4e, 0xd8, 0xa0, 0xe1, 0x46, - 0xe8, 0x73, 0x28, 0x45, 0x93, 0x40, 0xd4, 0x8a, 0xf3, 0x9e, 0x46, 0xb1, 0xd0, 0x2c, 0x49, 0x42, - 0x51, 0xc3, 0x2f, 0x15, 0x6a, 0x76, 0x21, 0x3f, 0xcb, 0x39, 0xca, 0x43, 0x66, 0xbf, 0xd6, 0x6b, - 0xd6, 0x8d, 0x35, 0x54, 0x04, 0xe8, 0xd9, 0xb5, 0x76, 0xa3, 0x86, 0x1b, 0xce, 0x53, 0x23, 0x81, - 0x36, 0x21, 0xdf, 0xc5, 0x56, 0xab, 0xd9, 0x6f, 0x39, 0x4f, 0x8d, 0x94, 0x58, 0x4a, 0xa6, 0x73, - 0xd0, 0xed, 0x1b, 0x59, 0x04, 0x90, 0xad, 0xf7, 0x7b, 0x76, 0xa7, 0x65, 0x64, 0xcc, 0x7f, 0x27, - 0x01, 0x9d, 0x3f, 0x13, 0x3a, 0x84, 0xf4, 0x98, 0x11, 0x4f, 0xd7, 0xe3, 0x97, 0xdf, 0x21, 0x1c, - 0x95, 0x03, 0x46, 0x3c, 0x51, 0x12, 0x58, 0x5a, 0x40, 0x3f, 0x86, 0xac, 0x94, 0xc7, 0xe5, 0xe4, - 0x4e, 0x6a, 0xb7, 0xb0, 0xf7, 0xf0, 0x42, 0x5b, 0xdd, 0xa5, 0xa8, 0x6a, 0x2d, 0xf4, 0x19, 0x80, - 0x4f, 0x4e, 0x1d, 0x1e, 0xb9, 0xc4, 0x8b, 0x65, 0xb1, 0x66, 0x70, 0xde, 0x27, 0xa7, 0xb6, 0x04, - 0x50, 0x05, 0xae, 0x0b, 0xb1, 0x20, 0x7b, 0x1e, 0xf5, 0xa6, 0xbc, 0xb4, 0xe4, 0x5d, 0xf3, 0xc9, - 0x69, 0x57, 0x4b, 0x34, 0xff, 0x39, 0xdc, 0x5e, 0x4e, 0x97, 0xe3, 0x53, 0x1e, 0xb9, 0x43, 0x87, - 0x93, 0xb1, 0x2c, 0xda, 0x3c, 0xbe, 0xb5, 0x4c, 0x68, 0x49, 0xb9, 0x4d, 0xc6, 0x66, 0x0d, 0x72, - 0xd3, 0xc3, 0xa1, 0xdb, 0xb0, 0x75, 0xd0, 0xa9, 0x1d, 0x3b, 0xf6, 0xcf, 0xbb, 0x96, 0xd3, 0x6f, - 0xf7, 0xba, 0x56, 0xbd, 0xf9, 0xa2, 0x69, 0x35, 0x8c, 0x35, 0xb4, 0x01, 0xb9, 0x56, 0xed, 0x55, - 0xb3, 0xd5, 0x7c, 0x6d, 0x19, 0x09, 0xb9, 0x6a, 0xb6, 0xd5, 0x2a, 0x69, 0xfe, 0x3d, 0x0d, 0x9b, - 0x4b, 0xe7, 0x44, 0xdf, 0x87, 0xe2, 0x7c, 0x2f, 0x01, 0xf1, 0xa9, 0x8c, 0x79, 0x1e, 0x6f, 0xce, - 0xd0, 0x36, 0xf1, 0xa9, 0x48, 0xc8, 0xec, 0x0a, 0x5e, 0x96, 0x90, 0x25, 0xe3, 0xf3, 0x95, 0x4a, - 0x88, 0xb0, 0x80, 0xee, 0x40, 0xde, 0x77, 0x03, 0xe7, 0x3d, 0xf1, 0x26, 0xaa, 0x3b, 0x24, 0x70, - 0xce, 0x77, 0x83, 0x97, 0x62, 0x2d, 0x85, 0xe4, 0x54, 0x0b, 0x53, 0x5a, 0x48, 0x4e, 0x95, 0xf0, - 0x09, 0xa0, 0x21, 0xe1, 0x74, 0xcc, 0x22, 0x77, 0x48, 0x3c, 0x45, 0x8a, 0xcb, 0x19, 0x79, 0xa1, - 0xae, 0x2d, 0x48, 0x24, 0x3b, 0x16, 0x55, 0x3d, 0x72, 0xe3, 0x61, 0x44, 0x39, 0x9d, 0x72, 0xb3, - 0x3b, 0xa9, 0xdd, 0x04, 0x2e, 0x4e, 0x61, 0x4d, 0x9c, 0xb7, 0x40, 0x71, 0xc2, 0xf5, 0x2b, 0x5a, - 0xe0, 0xf2, 0x09, 0xd5, 0x75, 0x10, 0xa7, 0xd3, 0x2d, 0xf0, 0x2c, 0xa4, 0xe6, 0x78, 0x21, 0xc8, - 0x32, 0x5b, 0xdf, 0x83, 0xed, 0x6e, 0x0d, 0xd7, 0x5a, 0x96, 0x6d, 0xe1, 0x8f, 0xa5, 0x0c, 0x20, - 0xdb, 0xe8, 0xf4, 0xf7, 0x8f, 0x45, 0xc2, 0x0a, 0xb0, 0xde, 0x6c, 0xdb, 0xd6, 0x81, 0x85, 0x8d, - 0x24, 0x2a, 0x41, 0xa1, 0x5e, 0xb3, 0xad, 0x83, 0x0e, 0x6e, 0xd6, 0x6b, 0xc7, 0x46, 0x4a, 0xa4, - 0xb3, 0xd1, 0xec, 0xd5, 0xb1, 0x65, 0x5b, 0x46, 0xda, 0xfc, 0xc5, 0xf4, 0x3e, 0x0a, 0x27, 0x39, - 0x48, 0xb7, 0x3b, 0x6d, 0xcb, 0x58, 0x43, 0x5b, 0x70, 0xad, 0xdf, 0x6e, 0xda, 0xce, 0x71, 0xb3, - 0x6d, 0xd5, 0xb0, 0xd3, 0xab, 0xd7, 0xa4, 0x65, 0x04, 0x45, 0x05, 0x77, 0x0e, 0x34, 0x96, 0x44, - 0xdb, 0x70, 0x53, 0x62, 0xd8, 0x7a, 0x69, 0xe1, 0x9e, 0xb5, 0x20, 0x4b, 0x99, 0x7f, 0x4e, 0xc3, - 0x8d, 0xe5, 0x0b, 0xd6, 0x99, 0x70, 0xf1, 0xcd, 0xb8, 0x0d, 0x39, 0x59, 0xe7, 0x8e, 0x3b, 0xd2, - 0xd5, 0xb2, 0x2e, 0xd7, 0xcd, 0x11, 0xf2, 0xce, 0xb7, 0x34, 0x75, 0xef, 0xf6, 0x3f, 0xf1, 0x0e, - 0x2b, 0x17, 0x2b, 0x60, 0x6c, 0x05, 0x3c, 0x3a, 0x3b, 0xdf, 0xe9, 0x28, 0x6c, 0xbc, 0x71, 0x03, - 0xe2, 0xe9, 0x4b, 0x24, 0x2b, 0xe6, 0x7f, 0x74, 0xa5, 0xae, 0x1b, 0x2e, 0x48, 0xbb, 0x6a, 0x81, - 0x86, 0x50, 0x20, 0xde, 0xd4, 0x89, 0xb8, 0xdc, 0xa9, 0xff, 0x93, 0x17, 0x20, 0x9e, 0xf6, 0x11, - 0x6f, 0x8f, 0x56, 0x83, 0xad, 0x9d, 0x3f, 0x80, 0x4d, 0xae, 0xbf, 0xba, 0x4e, 0xcc, 0x69, 0x28, - 0x23, 0x9e, 0xc2, 0x1b, 0x53, 0xb0, 0xc7, 0x69, 0x28, 0x6a, 0x9d, 0x0d, 0x4e, 0xe8, 0x90, 0xbb, - 0xef, 0xe9, 0xd2, 0xd5, 0x2a, 0xce, 0x60, 0x59, 0xec, 0xdb, 0xfb, 0xab, 0x5e, 0x54, 0x68, 0x91, - 0x01, 0xa9, 0x77, 0xf4, 0x4c, 0x67, 0x53, 0xfc, 0x44, 0x37, 0x20, 0x33, 0x37, 0x94, 0xc7, 0x6a, - 0xf1, 0x3c, 0xf9, 0x55, 0xc2, 0xfc, 0x4f, 0x02, 0x8a, 0xd3, 0x41, 0x40, 0x57, 0xc4, 0x1e, 0x6c, - 0x0d, 0x99, 0x1f, 0x7a, 0x94, 0xd3, 0x91, 0xea, 0x81, 0xfa, 0xd3, 0xaa, 0x36, 0x7b, 0x7d, 0x26, - 0x94, 0x6d, 0x50, 0x7d, 0x59, 0x2d, 0xc8, 0xea, 0x6e, 0xa9, 0x2a, 0xe4, 0xc9, 0x77, 0x0a, 0x28, - 0xd6, 0xca, 0xe8, 0x31, 0x5c, 0x1b, 0xb2, 0x20, 0x9e, 0xf8, 0x74, 0xe4, 0xf8, 0x9e, 0x33, 0x09, - 0x5c, 0x1e, 0xeb, 0xd6, 0x51, 0x9a, 0x0a, 0x5a, 0x5e, 0x5f, 0xc0, 0xe8, 0x27, 0x70, 0xd7, 0x8d, - 0x9d, 0x95, 0x06, 0xcc, 0x27, 0x32, 0xb6, 0x27, 0x6c, 0x20, 0xbb, 0x5b, 0x0e, 0xdf, 0x76, 0xe3, - 0x65, 0x8f, 0xb6, 0x64, 0x1c, 0xb1, 0x81, 0xf9, 0xcf, 0x14, 0x94, 0xba, 0x11, 0x1d, 0xb9, 0x43, - 0x31, 0xaa, 0xa9, 0x09, 0xea, 0x1e, 0x80, 0xcf, 0x46, 0xd4, 0x5b, 0xe8, 0x9e, 0x87, 0x6b, 0x38, - 0x2f, 0x31, 0xd9, 0x3b, 0x1f, 0xc0, 0x86, 0xfe, 0xac, 0x2a, 0x4a, 0x52, 0x53, 0x0a, 0x1a, 0x95, - 0x24, 0x04, 0xa9, 0x49, 0xe4, 0x96, 0xf3, 0x5a, 0x26, 0x16, 0xc8, 0x86, 0xc2, 0x88, 0x70, 0xe2, - 0xbc, 0x61, 0x91, 0x4f, 0xb8, 0x3c, 0x54, 0x71, 0xef, 0xd9, 0xc5, 0x9d, 0x69, 0x79, 0x63, 0x95, - 0x06, 0xe1, 0xe4, 0x85, 0x54, 0xc5, 0x30, 0x9a, 0xfd, 0x16, 0xf3, 0x97, 0x2b, 0xe4, 0x4e, 0x48, - 0xf8, 0x5b, 0x55, 0xcd, 0x79, 0x0c, 0x12, 0xea, 0x0a, 0x44, 0x10, 0x98, 0x8c, 0xb1, 0x64, 0xe8, - 0xaf, 0x12, 0x28, 0x48, 0x30, 0xd0, 0x2e, 0x18, 0xa2, 0x4b, 0x2f, 0x0d, 0x5c, 0x6a, 0x86, 0x2a, - 0xfa, 0xe4, 0xf4, 0x67, 0x0b, 0x33, 0xd7, 0x7c, 0x14, 0x59, 0x5f, 0x1a, 0x45, 0x3e, 0x32, 0x71, - 0xe4, 0x3e, 0x3a, 0x71, 0xbc, 0x04, 0x98, 0x1f, 0x03, 0xdd, 0x81, 0x5b, 0x8d, 0x9a, 0x5d, 0x73, - 0x5e, 0x74, 0x70, 0xab, 0x66, 0xaf, 0x34, 0xd1, 0x1c, 0xa4, 0x6d, 0xeb, 0x95, 0xad, 0xc6, 0x0f, - 0xfb, 0x85, 0x83, 0xad, 0x7a, 0x07, 0x37, 0x8c, 0xa4, 0xe8, 0x7b, 0xb3, 0xa5, 0x73, 0xf0, 0xba, - 0xd9, 0x35, 0x52, 0xfb, 0x25, 0xd8, 0x54, 0x49, 0xd3, 0xee, 0xcd, 0xbf, 0x26, 0xc0, 0x98, 0x07, - 0x50, 0x97, 0xf5, 0x4a, 0x24, 0x12, 0xe7, 0x22, 0xf1, 0x08, 0x8c, 0x70, 0xa6, 0xa4, 0x23, 0x91, - 0x94, 0x91, 0x28, 0xcd, 0x71, 0x15, 0x8a, 0x7b, 0x50, 0xa0, 0x51, 0xc4, 0xa6, 0xf1, 0x4a, 0x49, - 0x16, 0x48, 0x48, 0x11, 0x3e, 0x03, 0x08, 0xd8, 0x88, 0x3a, 0x6f, 0xd9, 0x24, 0x52, 0x13, 0x44, - 0x02, 0xe7, 0x05, 0x72, 0x28, 0x00, 0xf3, 0x2f, 0x59, 0x48, 0x1d, 0xb1, 0x01, 0xda, 0x02, 0x31, - 0xb6, 0xcd, 0x5b, 0x6f, 0xe6, 0x84, 0x0d, 0x9a, 0x23, 0xd4, 0x81, 0xe2, 0xac, 0x4d, 0xc8, 0x5c, - 0xca, 0x7d, 0x5c, 0x36, 0xef, 0x2c, 0xcd, 0xf2, 0x87, 0x6b, 0x78, 0xd6, 0x66, 0x54, 0x59, 0xf7, - 0x97, 0x8e, 0xa6, 0x4c, 0xaa, 0xfe, 0xba, 0xfb, 0xa9, 0x15, 0x78, 0xb8, 0xb6, 0x18, 0x06, 0x65, - 0xf6, 0x1b, 0x28, 0x0c, 0x23, 0x4a, 0xb8, 0x78, 0x70, 0xf8, 0x6a, 0x9e, 0x28, 0xec, 0x6d, 0x4f, - 0x2d, 0x4e, 0x1f, 0x2e, 0x15, 0x7b, 0xfa, 0x70, 0xc1, 0xa0, 0xe8, 0x02, 0x40, 0x5f, 0x03, 0xc4, - 0x9c, 0x44, 0x5c, 0xe9, 0x66, 0xae, 0xd4, 0xcd, 0x4b, 0xb6, 0x54, 0xfd, 0x21, 0xe4, 0x68, 0x30, - 0x52, 0x8a, 0xd9, 0x2b, 0x15, 0xd7, 0x69, 0x30, 0x92, 0x6a, 0x5f, 0x41, 0x26, 0xe6, 0x84, 0x4f, - 0xc7, 0x02, 0xf3, 0xc2, 0xa3, 0x1f, 0xb1, 0x41, 0xa5, 0x27, 0x98, 0x58, 0x29, 0x88, 0xbe, 0xad, - 0xf2, 0xed, 0xd3, 0x38, 0x26, 0xe3, 0xd9, 0xab, 0x40, 0x82, 0x2d, 0x85, 0x21, 0x0c, 0xa5, 0x59, - 0xd6, 0x54, 0x59, 0xc9, 0x0e, 0x50, 0xd8, 0xfb, 0xfc, 0xca, 0xb4, 0xa9, 0x12, 0x3d, 0x4c, 0xe0, - 0x59, 0xde, 0x75, 0xd1, 0xbe, 0x82, 0x6b, 0x0b, 0x89, 0xd3, 0x56, 0x41, 0x5a, 0x7d, 0xf4, 0x09, - 0x99, 0x9b, 0xd9, 0x5d, 0x48, 0xbf, 0xc2, 0xcc, 0xdf, 0x24, 0x20, 0x23, 0xcf, 0x28, 0x26, 0x8c, - 0x9e, 0x5d, 0xb3, 0x3f, 0x32, 0xc7, 0xfc, 0xb4, 0x6f, 0xf5, 0xad, 0xc6, 0xec, 0x0d, 0xd0, 0xad, - 0xe1, 0x66, 0xfb, 0xc0, 0x48, 0x8a, 0xb1, 0x06, 0xf7, 0xdb, 0x6d, 0xb1, 0x90, 0x0f, 0x82, 0x5e, - 0xbf, 0x5e, 0xb7, 0xac, 0x86, 0xd5, 0x30, 0xd2, 0x42, 0xed, 0x45, 0xad, 0x79, 0x6c, 0x35, 0x8c, - 0x8c, 0x78, 0x4a, 0xd4, 0x6b, 0xed, 0xba, 0x75, 0x7c, 0x2c, 0xa8, 0x59, 0x41, 0xd5, 0x6b, 0xab, - 0x61, 0xac, 0xef, 0xaf, 0x43, 0x46, 0x96, 0xe2, 0x7e, 0x0e, 0xb2, 0xea, 0x68, 0xe6, 0x6b, 0x30, - 0xea, 0xb2, 0x44, 0x8e, 0xd8, 0x00, 0xd3, 0x5f, 0x4e, 0x68, 0x2c, 0xfb, 0x4e, 0x48, 0x22, 0xaa, - 0x3f, 0x40, 0x79, 0xac, 0x57, 0xa8, 0x02, 0x29, 0xd1, 0xe7, 0xd5, 0xd5, 0xb8, 0x7b, 0x59, 0x32, - 0xb1, 0x20, 0x9a, 0x1f, 0xa0, 0x74, 0xec, 0xc6, 0xfc, 0x88, 0x0d, 0xe2, 0xab, 0x4c, 0xdf, 0x84, - 0xec, 0x1b, 0xd7, 0xe3, 0x34, 0xd2, 0x1f, 0x4c, 0xbd, 0x12, 0xd7, 0x3a, 0x14, 0xaf, 0x47, 0xce, - 0xde, 0xd1, 0x40, 0x3f, 0x61, 0xf3, 0x02, 0xb1, 0x05, 0x20, 0x26, 0x5e, 0x29, 0x8e, 0xdd, 0x6f, - 0x55, 0x45, 0x67, 0x70, 0x4e, 0x00, 0x3d, 0xf7, 0x5b, 0x6a, 0x7a, 0x60, 0xcc, 0xdd, 0xc7, 0x21, - 0x0b, 0x62, 0x8a, 0xbe, 0x80, 0xf4, 0x09, 0x1b, 0xc4, 0xe5, 0x84, 0xfc, 0x68, 0x5e, 0x7e, 0x06, - 0xc9, 0x44, 0x0f, 0xa1, 0x14, 0xd0, 0x53, 0xd1, 0xc3, 0x66, 0xdb, 0x50, 0x5b, 0xdc, 0x14, 0x70, - 0x77, 0xba, 0x15, 0xf3, 0x01, 0x6c, 0x1e, 0x50, 0xbe, 0x10, 0x45, 0x04, 0xe9, 0x85, 0x17, 0x81, - 0xfc, 0x6d, 0x3e, 0x04, 0xa3, 0x4e, 0x82, 0x21, 0xf5, 0x2e, 0xe7, 0xed, 0xfd, 0x2d, 0x0d, 0x70, - 0xc4, 0x06, 0x3d, 0xf5, 0x57, 0x08, 0xfa, 0x7d, 0x02, 0xf2, 0xb3, 0x2c, 0xa1, 0x8b, 0xeb, 0x70, - 0x35, 0x93, 0xdb, 0x97, 0x1e, 0xd0, 0xac, 0xfc, 0xf6, 0x1f, 0xff, 0xfa, 0x53, 0x72, 0xd7, 0xbc, - 0x3f, 0xfb, 0xff, 0xe5, 0xd7, 0x2a, 0x1d, 0x3f, 0x0a, 0x23, 0x26, 0x06, 0x9f, 0xb8, 0xfa, 0xf8, - 0x43, 0x55, 0x44, 0xe1, 0xb9, 0xc8, 0x27, 0xfa, 0x43, 0x02, 0x72, 0xd3, 0x88, 0xa2, 0x8b, 0xfb, - 0xd8, 0x4a, 0xce, 0xb7, 0x1f, 0x7d, 0x02, 0x53, 0xa5, 0xc7, 0x7c, 0x24, 0x77, 0xf4, 0x00, 0x5d, - 0xbd, 0x23, 0x74, 0x06, 0x59, 0x15, 0x6f, 0x74, 0x71, 0x93, 0x5e, 0x4a, 0xc8, 0x15, 0xc1, 0xf8, - 0x88, 0x6b, 0x91, 0x8a, 0x05, 0xc7, 0xd2, 0x6f, 0xf5, 0xf1, 0x07, 0xf4, 0x3b, 0x91, 0x8e, 0x69, - 0x1a, 0x2f, 0x4b, 0xc7, 0x4a, 0xaa, 0xb7, 0x6f, 0x9e, 0x6b, 0x9a, 0x96, 0x1f, 0xf2, 0x33, 0xf3, - 0x99, 0xf4, 0xfd, 0xc4, 0xdc, 0xbd, 0xd2, 0xf7, 0xf3, 0xa1, 0xb4, 0xf9, 0x3c, 0xf1, 0x78, 0x9f, - 0xc1, 0xbd, 0x21, 0xf3, 0xcf, 0x39, 0x27, 0xa1, 0x3b, 0xdd, 0xc0, 0x7e, 0x69, 0x5e, 0x43, 0x5d, - 0xe1, 0xb1, 0x9b, 0x78, 0xfd, 0xb5, 0xe6, 0x8f, 0x99, 0x47, 0x82, 0x71, 0x85, 0x45, 0xe3, 0xea, - 0x98, 0x06, 0x72, 0x3f, 0x55, 0x25, 0x22, 0xa1, 0x1b, 0x9f, 0xfb, 0x67, 0xee, 0x1b, 0xdf, 0x1b, - 0x64, 0x25, 0xeb, 0xd9, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xaf, 0x10, 0x62, 0xa8, 0xbe, 0x13, - 0x00, 0x00, -} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/model_service.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/model_service.pb.go deleted file mode 100644 index 486865db6..000000000 --- a/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/model_service.pb.go +++ /dev/null @@ -1,1050 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/cloud/ml/v1beta1/model_service.proto - -package ml - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "google.golang.org/genproto/googleapis/api/annotations" -import _ "google.golang.org/genproto/googleapis/api/serviceconfig" -import google_longrunning "google.golang.org/genproto/googleapis/longrunning" -import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// Represents a machine learning solution. -// -// A model can have multiple versions, each of which is a deployed, trained -// model ready to receive prediction requests. The model itself is just a -// container. -type Model struct { - // Required. The name specified for the model when it was created. - // - // The model name must be unique within the project it is created in. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Optional. The description specified for the model when it was created. - Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` - // Output only. The default version of the model. This version will be used to - // handle prediction requests that do not specify a version. - // - // You can change the default version by calling - // [projects.methods.versions.setDefault](/ml/reference/rest/v1beta1/projects.models.versions/setDefault). - DefaultVersion *Version `protobuf:"bytes,3,opt,name=default_version,json=defaultVersion" json:"default_version,omitempty"` - // Optional. The list of regions where the model is going to be deployed. - // Currently only one region per model is supported. - // Defaults to 'us-central1' if nothing is set. - Regions []string `protobuf:"bytes,4,rep,name=regions" json:"regions,omitempty"` - // Optional. If true, enables StackDriver Logging for online prediction. - // Default is false. - OnlinePredictionLogging bool `protobuf:"varint,5,opt,name=online_prediction_logging,json=onlinePredictionLogging" json:"online_prediction_logging,omitempty"` -} - -func (m *Model) Reset() { *m = Model{} } -func (m *Model) String() string { return proto.CompactTextString(m) } -func (*Model) ProtoMessage() {} -func (*Model) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } - -func (m *Model) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Model) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Model) GetDefaultVersion() *Version { - if m != nil { - return m.DefaultVersion - } - return nil -} - -func (m *Model) GetRegions() []string { - if m != nil { - return m.Regions - } - return nil -} - -func (m *Model) GetOnlinePredictionLogging() bool { - if m != nil { - return m.OnlinePredictionLogging - } - return false -} - -// Represents a version of the model. -// -// Each version is a trained model deployed in the cloud, ready to handle -// prediction requests. A model can have multiple versions. You can get -// information about all of the versions of a given model by calling -// [projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list). -type Version struct { - // Required.The name specified for the version when it was created. - // - // The version name must be unique within the model it is created in. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Optional. The description specified for the version when it was created. - Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` - // Output only. If true, this version will be used to handle prediction - // requests that do not specify a version. - // - // You can change the default version by calling - // [projects.methods.versions.setDefault](/ml/reference/rest/v1beta1/projects.models.versions/setDefault). - IsDefault bool `protobuf:"varint,3,opt,name=is_default,json=isDefault" json:"is_default,omitempty"` - // Required. The Google Cloud Storage location of the trained model used to - // create the version. See the - // [overview of model deployment](/ml/docs/concepts/deployment-overview) for - // more informaiton. - // - // When passing Version to - // [projects.models.versions.create](/ml/reference/rest/v1beta1/projects.models.versions/create) - // the model service uses the specified location as the source of the model. - // Once deployed, the model version is hosted by the prediction service, so - // this location is useful only as a historical record. - DeploymentUri string `protobuf:"bytes,4,opt,name=deployment_uri,json=deploymentUri" json:"deployment_uri,omitempty"` - // Output only. The time the version was created. - CreateTime *google_protobuf2.Timestamp `protobuf:"bytes,5,opt,name=create_time,json=createTime" json:"create_time,omitempty"` - // Output only. The time the version was last used for prediction. - LastUseTime *google_protobuf2.Timestamp `protobuf:"bytes,6,opt,name=last_use_time,json=lastUseTime" json:"last_use_time,omitempty"` - // Optional. The Google Cloud ML runtime version to use for this deployment. - // If not set, Google Cloud ML will choose a version. - RuntimeVersion string `protobuf:"bytes,8,opt,name=runtime_version,json=runtimeVersion" json:"runtime_version,omitempty"` - // Optional. Manually select the number of nodes to use for serving the - // model. If unset (i.e., by default), the number of nodes used to serve - // the model automatically scales with traffic. However, care should be - // taken to ramp up traffic according to the model's ability to scale. If - // your model needs to handle bursts of traffic beyond it's ability to - // scale, it is recommended you set this field appropriately. - ManualScaling *ManualScaling `protobuf:"bytes,9,opt,name=manual_scaling,json=manualScaling" json:"manual_scaling,omitempty"` -} - -func (m *Version) Reset() { *m = Version{} } -func (m *Version) String() string { return proto.CompactTextString(m) } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } - -func (m *Version) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Version) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Version) GetIsDefault() bool { - if m != nil { - return m.IsDefault - } - return false -} - -func (m *Version) GetDeploymentUri() string { - if m != nil { - return m.DeploymentUri - } - return "" -} - -func (m *Version) GetCreateTime() *google_protobuf2.Timestamp { - if m != nil { - return m.CreateTime - } - return nil -} - -func (m *Version) GetLastUseTime() *google_protobuf2.Timestamp { - if m != nil { - return m.LastUseTime - } - return nil -} - -func (m *Version) GetRuntimeVersion() string { - if m != nil { - return m.RuntimeVersion - } - return "" -} - -func (m *Version) GetManualScaling() *ManualScaling { - if m != nil { - return m.ManualScaling - } - return nil -} - -// Options for manually scaling a model. -type ManualScaling struct { - // The number of nodes to allocate for this model. These nodes are always up, - // starting from the time the model is deployed, so the cost of operating - // this model will be proportional to nodes * number of hours since - // deployment. - Nodes int32 `protobuf:"varint,1,opt,name=nodes" json:"nodes,omitempty"` -} - -func (m *ManualScaling) Reset() { *m = ManualScaling{} } -func (m *ManualScaling) String() string { return proto.CompactTextString(m) } -func (*ManualScaling) ProtoMessage() {} -func (*ManualScaling) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } - -func (m *ManualScaling) GetNodes() int32 { - if m != nil { - return m.Nodes - } - return 0 -} - -// Request message for the CreateModel method. -type CreateModelRequest struct { - // Required. The project name. - // - // Authorization: requires `Editor` role on the specified project. - Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` - // Required. The model to create. - Model *Model `protobuf:"bytes,2,opt,name=model" json:"model,omitempty"` -} - -func (m *CreateModelRequest) Reset() { *m = CreateModelRequest{} } -func (m *CreateModelRequest) String() string { return proto.CompactTextString(m) } -func (*CreateModelRequest) ProtoMessage() {} -func (*CreateModelRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } - -func (m *CreateModelRequest) GetParent() string { - if m != nil { - return m.Parent - } - return "" -} - -func (m *CreateModelRequest) GetModel() *Model { - if m != nil { - return m.Model - } - return nil -} - -// Request message for the ListModels method. -type ListModelsRequest struct { - // Required. The name of the project whose models are to be listed. - // - // Authorization: requires `Viewer` role on the specified project. - Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` - // Optional. A page token to request the next page of results. - // - // You get the token from the `next_page_token` field of the response from - // the previous call. - PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` - // Optional. The number of models to retrieve per "page" of results. If there - // are more remaining results than this number, the response message will - // contain a valid value in the `next_page_token` field. - // - // The default value is 20, and the maximum page size is 100. - PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` -} - -func (m *ListModelsRequest) Reset() { *m = ListModelsRequest{} } -func (m *ListModelsRequest) String() string { return proto.CompactTextString(m) } -func (*ListModelsRequest) ProtoMessage() {} -func (*ListModelsRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } - -func (m *ListModelsRequest) GetParent() string { - if m != nil { - return m.Parent - } - return "" -} - -func (m *ListModelsRequest) GetPageToken() string { - if m != nil { - return m.PageToken - } - return "" -} - -func (m *ListModelsRequest) GetPageSize() int32 { - if m != nil { - return m.PageSize - } - return 0 -} - -// Response message for the ListModels method. -type ListModelsResponse struct { - // The list of models. - Models []*Model `protobuf:"bytes,1,rep,name=models" json:"models,omitempty"` - // Optional. Pass this token as the `page_token` field of the request for a - // subsequent call. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` -} - -func (m *ListModelsResponse) Reset() { *m = ListModelsResponse{} } -func (m *ListModelsResponse) String() string { return proto.CompactTextString(m) } -func (*ListModelsResponse) ProtoMessage() {} -func (*ListModelsResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } - -func (m *ListModelsResponse) GetModels() []*Model { - if m != nil { - return m.Models - } - return nil -} - -func (m *ListModelsResponse) GetNextPageToken() string { - if m != nil { - return m.NextPageToken - } - return "" -} - -// Request message for the GetModel method. -type GetModelRequest struct { - // Required. The name of the model. - // - // Authorization: requires `Viewer` role on the parent project. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` -} - -func (m *GetModelRequest) Reset() { *m = GetModelRequest{} } -func (m *GetModelRequest) String() string { return proto.CompactTextString(m) } -func (*GetModelRequest) ProtoMessage() {} -func (*GetModelRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } - -func (m *GetModelRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -// Request message for the DeleteModel method. -type DeleteModelRequest struct { - // Required. The name of the model. - // - // Authorization: requires `Editor` role on the parent project. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` -} - -func (m *DeleteModelRequest) Reset() { *m = DeleteModelRequest{} } -func (m *DeleteModelRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteModelRequest) ProtoMessage() {} -func (*DeleteModelRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } - -func (m *DeleteModelRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -// Uploads the provided trained model version to Cloud Machine Learning. -type CreateVersionRequest struct { - // Required. The name of the model. - // - // Authorization: requires `Editor` role on the parent project. - Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` - // Required. The version details. - Version *Version `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` -} - -func (m *CreateVersionRequest) Reset() { *m = CreateVersionRequest{} } -func (m *CreateVersionRequest) String() string { return proto.CompactTextString(m) } -func (*CreateVersionRequest) ProtoMessage() {} -func (*CreateVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } - -func (m *CreateVersionRequest) GetParent() string { - if m != nil { - return m.Parent - } - return "" -} - -func (m *CreateVersionRequest) GetVersion() *Version { - if m != nil { - return m.Version - } - return nil -} - -// Request message for the ListVersions method. -type ListVersionsRequest struct { - // Required. The name of the model for which to list the version. - // - // Authorization: requires `Viewer` role on the parent project. - Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` - // Optional. A page token to request the next page of results. - // - // You get the token from the `next_page_token` field of the response from - // the previous call. - PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` - // Optional. The number of versions to retrieve per "page" of results. If - // there are more remaining results than this number, the response message - // will contain a valid value in the `next_page_token` field. - // - // The default value is 20, and the maximum page size is 100. - PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` -} - -func (m *ListVersionsRequest) Reset() { *m = ListVersionsRequest{} } -func (m *ListVersionsRequest) String() string { return proto.CompactTextString(m) } -func (*ListVersionsRequest) ProtoMessage() {} -func (*ListVersionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } - -func (m *ListVersionsRequest) GetParent() string { - if m != nil { - return m.Parent - } - return "" -} - -func (m *ListVersionsRequest) GetPageToken() string { - if m != nil { - return m.PageToken - } - return "" -} - -func (m *ListVersionsRequest) GetPageSize() int32 { - if m != nil { - return m.PageSize - } - return 0 -} - -// Response message for the ListVersions method. -type ListVersionsResponse struct { - // The list of versions. - Versions []*Version `protobuf:"bytes,1,rep,name=versions" json:"versions,omitempty"` - // Optional. Pass this token as the `page_token` field of the request for a - // subsequent call. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` -} - -func (m *ListVersionsResponse) Reset() { *m = ListVersionsResponse{} } -func (m *ListVersionsResponse) String() string { return proto.CompactTextString(m) } -func (*ListVersionsResponse) ProtoMessage() {} -func (*ListVersionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } - -func (m *ListVersionsResponse) GetVersions() []*Version { - if m != nil { - return m.Versions - } - return nil -} - -func (m *ListVersionsResponse) GetNextPageToken() string { - if m != nil { - return m.NextPageToken - } - return "" -} - -// Request message for the GetVersion method. -type GetVersionRequest struct { - // Required. The name of the version. - // - // Authorization: requires `Viewer` role on the parent project. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` -} - -func (m *GetVersionRequest) Reset() { *m = GetVersionRequest{} } -func (m *GetVersionRequest) String() string { return proto.CompactTextString(m) } -func (*GetVersionRequest) ProtoMessage() {} -func (*GetVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11} } - -func (m *GetVersionRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -// Request message for the DeleteVerionRequest method. -type DeleteVersionRequest struct { - // Required. The name of the version. You can get the names of all the - // versions of a model by calling - // [projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list). - // - // Authorization: requires `Editor` role on the parent project. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` -} - -func (m *DeleteVersionRequest) Reset() { *m = DeleteVersionRequest{} } -func (m *DeleteVersionRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteVersionRequest) ProtoMessage() {} -func (*DeleteVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} } - -func (m *DeleteVersionRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -// Request message for the SetDefaultVersion request. -type SetDefaultVersionRequest struct { - // Required. The name of the version to make the default for the model. You - // can get the names of all the versions of a model by calling - // [projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list). - // - // Authorization: requires `Editor` role on the parent project. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` -} - -func (m *SetDefaultVersionRequest) Reset() { *m = SetDefaultVersionRequest{} } -func (m *SetDefaultVersionRequest) String() string { return proto.CompactTextString(m) } -func (*SetDefaultVersionRequest) ProtoMessage() {} -func (*SetDefaultVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{13} } - -func (m *SetDefaultVersionRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func init() { - proto.RegisterType((*Model)(nil), "google.cloud.ml.v1beta1.Model") - proto.RegisterType((*Version)(nil), "google.cloud.ml.v1beta1.Version") - proto.RegisterType((*ManualScaling)(nil), "google.cloud.ml.v1beta1.ManualScaling") - proto.RegisterType((*CreateModelRequest)(nil), "google.cloud.ml.v1beta1.CreateModelRequest") - proto.RegisterType((*ListModelsRequest)(nil), "google.cloud.ml.v1beta1.ListModelsRequest") - proto.RegisterType((*ListModelsResponse)(nil), "google.cloud.ml.v1beta1.ListModelsResponse") - proto.RegisterType((*GetModelRequest)(nil), "google.cloud.ml.v1beta1.GetModelRequest") - proto.RegisterType((*DeleteModelRequest)(nil), "google.cloud.ml.v1beta1.DeleteModelRequest") - proto.RegisterType((*CreateVersionRequest)(nil), "google.cloud.ml.v1beta1.CreateVersionRequest") - proto.RegisterType((*ListVersionsRequest)(nil), "google.cloud.ml.v1beta1.ListVersionsRequest") - proto.RegisterType((*ListVersionsResponse)(nil), "google.cloud.ml.v1beta1.ListVersionsResponse") - proto.RegisterType((*GetVersionRequest)(nil), "google.cloud.ml.v1beta1.GetVersionRequest") - proto.RegisterType((*DeleteVersionRequest)(nil), "google.cloud.ml.v1beta1.DeleteVersionRequest") - proto.RegisterType((*SetDefaultVersionRequest)(nil), "google.cloud.ml.v1beta1.SetDefaultVersionRequest") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for ModelService service - -type ModelServiceClient interface { - // Creates a model which will later contain one or more versions. - // - // You must add at least one version before you can request predictions from - // the model. Add versions by calling - // [projects.models.versions.create](/ml/reference/rest/v1beta1/projects.models.versions/create). - CreateModel(ctx context.Context, in *CreateModelRequest, opts ...grpc.CallOption) (*Model, error) - // Lists the models in a project. - // - // Each project can contain multiple models, and each model can have multiple - // versions. - ListModels(ctx context.Context, in *ListModelsRequest, opts ...grpc.CallOption) (*ListModelsResponse, error) - // Gets information about a model, including its name, the description (if - // set), and the default version (if at least one version of the model has - // been deployed). - GetModel(ctx context.Context, in *GetModelRequest, opts ...grpc.CallOption) (*Model, error) - // Deletes a model. - // - // You can only delete a model if there are no versions in it. You can delete - // versions by calling - // [projects.models.versions.delete](/ml/reference/rest/v1beta1/projects.models.versions/delete). - DeleteModel(ctx context.Context, in *DeleteModelRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) - // Creates a new version of a model from a trained TensorFlow model. - // - // If the version created in the cloud by this call is the first deployed - // version of the specified model, it will be made the default version of the - // model. When you add a version to a model that already has one or more - // versions, the default version does not automatically change. If you want a - // new version to be the default, you must call - // [projects.models.versions.setDefault](/ml/reference/rest/v1beta1/projects.models.versions/setDefault). - CreateVersion(ctx context.Context, in *CreateVersionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) - // Gets basic information about all the versions of a model. - // - // If you expect that a model has a lot of versions, or if you need to handle - // only a limited number of results at a time, you can request that the list - // be retrieved in batches (called pages): - ListVersions(ctx context.Context, in *ListVersionsRequest, opts ...grpc.CallOption) (*ListVersionsResponse, error) - // Gets information about a model version. - // - // Models can have multiple versions. You can call - // [projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list) - // to get the same information that this method returns for all of the - // versions of a model. - GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*Version, error) - // Deletes a model version. - // - // Each model can have multiple versions deployed and in use at any given - // time. Use this method to remove a single version. - // - // Note: You cannot delete the version that is set as the default version - // of the model unless it is the only remaining version. - DeleteVersion(ctx context.Context, in *DeleteVersionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) - // Designates a version to be the default for the model. - // - // The default version is used for prediction requests made against the model - // that don't specify a version. - // - // The first version to be created for a model is automatically set as the - // default. You must make any subsequent changes to the default version - // setting manually using this method. - SetDefaultVersion(ctx context.Context, in *SetDefaultVersionRequest, opts ...grpc.CallOption) (*Version, error) -} - -type modelServiceClient struct { - cc *grpc.ClientConn -} - -func NewModelServiceClient(cc *grpc.ClientConn) ModelServiceClient { - return &modelServiceClient{cc} -} - -func (c *modelServiceClient) CreateModel(ctx context.Context, in *CreateModelRequest, opts ...grpc.CallOption) (*Model, error) { - out := new(Model) - err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.ModelService/CreateModel", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *modelServiceClient) ListModels(ctx context.Context, in *ListModelsRequest, opts ...grpc.CallOption) (*ListModelsResponse, error) { - out := new(ListModelsResponse) - err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.ModelService/ListModels", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *modelServiceClient) GetModel(ctx context.Context, in *GetModelRequest, opts ...grpc.CallOption) (*Model, error) { - out := new(Model) - err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.ModelService/GetModel", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *modelServiceClient) DeleteModel(ctx context.Context, in *DeleteModelRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { - out := new(google_longrunning.Operation) - err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.ModelService/DeleteModel", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *modelServiceClient) CreateVersion(ctx context.Context, in *CreateVersionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { - out := new(google_longrunning.Operation) - err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.ModelService/CreateVersion", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *modelServiceClient) ListVersions(ctx context.Context, in *ListVersionsRequest, opts ...grpc.CallOption) (*ListVersionsResponse, error) { - out := new(ListVersionsResponse) - err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.ModelService/ListVersions", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *modelServiceClient) GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*Version, error) { - out := new(Version) - err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.ModelService/GetVersion", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *modelServiceClient) DeleteVersion(ctx context.Context, in *DeleteVersionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { - out := new(google_longrunning.Operation) - err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.ModelService/DeleteVersion", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *modelServiceClient) SetDefaultVersion(ctx context.Context, in *SetDefaultVersionRequest, opts ...grpc.CallOption) (*Version, error) { - out := new(Version) - err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.ModelService/SetDefaultVersion", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for ModelService service - -type ModelServiceServer interface { - // Creates a model which will later contain one or more versions. - // - // You must add at least one version before you can request predictions from - // the model. Add versions by calling - // [projects.models.versions.create](/ml/reference/rest/v1beta1/projects.models.versions/create). - CreateModel(context.Context, *CreateModelRequest) (*Model, error) - // Lists the models in a project. - // - // Each project can contain multiple models, and each model can have multiple - // versions. - ListModels(context.Context, *ListModelsRequest) (*ListModelsResponse, error) - // Gets information about a model, including its name, the description (if - // set), and the default version (if at least one version of the model has - // been deployed). - GetModel(context.Context, *GetModelRequest) (*Model, error) - // Deletes a model. - // - // You can only delete a model if there are no versions in it. You can delete - // versions by calling - // [projects.models.versions.delete](/ml/reference/rest/v1beta1/projects.models.versions/delete). - DeleteModel(context.Context, *DeleteModelRequest) (*google_longrunning.Operation, error) - // Creates a new version of a model from a trained TensorFlow model. - // - // If the version created in the cloud by this call is the first deployed - // version of the specified model, it will be made the default version of the - // model. When you add a version to a model that already has one or more - // versions, the default version does not automatically change. If you want a - // new version to be the default, you must call - // [projects.models.versions.setDefault](/ml/reference/rest/v1beta1/projects.models.versions/setDefault). - CreateVersion(context.Context, *CreateVersionRequest) (*google_longrunning.Operation, error) - // Gets basic information about all the versions of a model. - // - // If you expect that a model has a lot of versions, or if you need to handle - // only a limited number of results at a time, you can request that the list - // be retrieved in batches (called pages): - ListVersions(context.Context, *ListVersionsRequest) (*ListVersionsResponse, error) - // Gets information about a model version. - // - // Models can have multiple versions. You can call - // [projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list) - // to get the same information that this method returns for all of the - // versions of a model. - GetVersion(context.Context, *GetVersionRequest) (*Version, error) - // Deletes a model version. - // - // Each model can have multiple versions deployed and in use at any given - // time. Use this method to remove a single version. - // - // Note: You cannot delete the version that is set as the default version - // of the model unless it is the only remaining version. - DeleteVersion(context.Context, *DeleteVersionRequest) (*google_longrunning.Operation, error) - // Designates a version to be the default for the model. - // - // The default version is used for prediction requests made against the model - // that don't specify a version. - // - // The first version to be created for a model is automatically set as the - // default. You must make any subsequent changes to the default version - // setting manually using this method. - SetDefaultVersion(context.Context, *SetDefaultVersionRequest) (*Version, error) -} - -func RegisterModelServiceServer(s *grpc.Server, srv ModelServiceServer) { - s.RegisterService(&_ModelService_serviceDesc, srv) -} - -func _ModelService_CreateModel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateModelRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ModelServiceServer).CreateModel(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.cloud.ml.v1beta1.ModelService/CreateModel", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ModelServiceServer).CreateModel(ctx, req.(*CreateModelRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ModelService_ListModels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListModelsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ModelServiceServer).ListModels(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.cloud.ml.v1beta1.ModelService/ListModels", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ModelServiceServer).ListModels(ctx, req.(*ListModelsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ModelService_GetModel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetModelRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ModelServiceServer).GetModel(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.cloud.ml.v1beta1.ModelService/GetModel", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ModelServiceServer).GetModel(ctx, req.(*GetModelRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ModelService_DeleteModel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteModelRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ModelServiceServer).DeleteModel(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.cloud.ml.v1beta1.ModelService/DeleteModel", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ModelServiceServer).DeleteModel(ctx, req.(*DeleteModelRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ModelService_CreateVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateVersionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ModelServiceServer).CreateVersion(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.cloud.ml.v1beta1.ModelService/CreateVersion", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ModelServiceServer).CreateVersion(ctx, req.(*CreateVersionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ModelService_ListVersions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListVersionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ModelServiceServer).ListVersions(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.cloud.ml.v1beta1.ModelService/ListVersions", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ModelServiceServer).ListVersions(ctx, req.(*ListVersionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ModelService_GetVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetVersionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ModelServiceServer).GetVersion(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.cloud.ml.v1beta1.ModelService/GetVersion", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ModelServiceServer).GetVersion(ctx, req.(*GetVersionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ModelService_DeleteVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteVersionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ModelServiceServer).DeleteVersion(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.cloud.ml.v1beta1.ModelService/DeleteVersion", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ModelServiceServer).DeleteVersion(ctx, req.(*DeleteVersionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ModelService_SetDefaultVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetDefaultVersionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ModelServiceServer).SetDefaultVersion(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.cloud.ml.v1beta1.ModelService/SetDefaultVersion", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ModelServiceServer).SetDefaultVersion(ctx, req.(*SetDefaultVersionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _ModelService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "google.cloud.ml.v1beta1.ModelService", - HandlerType: (*ModelServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateModel", - Handler: _ModelService_CreateModel_Handler, - }, - { - MethodName: "ListModels", - Handler: _ModelService_ListModels_Handler, - }, - { - MethodName: "GetModel", - Handler: _ModelService_GetModel_Handler, - }, - { - MethodName: "DeleteModel", - Handler: _ModelService_DeleteModel_Handler, - }, - { - MethodName: "CreateVersion", - Handler: _ModelService_CreateVersion_Handler, - }, - { - MethodName: "ListVersions", - Handler: _ModelService_ListVersions_Handler, - }, - { - MethodName: "GetVersion", - Handler: _ModelService_GetVersion_Handler, - }, - { - MethodName: "DeleteVersion", - Handler: _ModelService_DeleteVersion_Handler, - }, - { - MethodName: "SetDefaultVersion", - Handler: _ModelService_SetDefaultVersion_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "google/cloud/ml/v1beta1/model_service.proto", -} - -func init() { proto.RegisterFile("google/cloud/ml/v1beta1/model_service.proto", fileDescriptor1) } - -var fileDescriptor1 = []byte{ - // 1013 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcf, 0x6f, 0x1b, 0x45, - 0x14, 0xd6, 0x26, 0x71, 0x62, 0x3f, 0xd7, 0x89, 0x32, 0x04, 0x6a, 0x0c, 0xa1, 0xd6, 0x56, 0x69, - 0x2d, 0xa7, 0xdd, 0x25, 0x06, 0x55, 0x8a, 0x0b, 0x45, 0x2a, 0x91, 0x2a, 0xa4, 0x46, 0x44, 0x9b, - 0x96, 0x03, 0x97, 0xd5, 0xc6, 0x9e, 0x2e, 0x53, 0x76, 0x67, 0xb6, 0x3b, 0xb3, 0x11, 0x14, 0x7a, - 0x80, 0x03, 0x47, 0x0e, 0x20, 0xae, 0x5c, 0xb8, 0xf3, 0xcf, 0x70, 0xe7, 0x84, 0xf8, 0x23, 0x38, - 0xa1, 0xf9, 0xb1, 0xce, 0x3a, 0xfe, 0xb1, 0x1b, 0x24, 0x6e, 0x9e, 0x37, 0xdf, 0x9b, 0xf7, 0xcd, - 0xfb, 0xde, 0x7b, 0x3b, 0x86, 0xfd, 0x90, 0xb1, 0x30, 0xc2, 0xee, 0x28, 0x62, 0xd9, 0xd8, 0x8d, - 0x23, 0xf7, 0xfc, 0xe0, 0x0c, 0x8b, 0xe0, 0xc0, 0x8d, 0xd9, 0x18, 0x47, 0x3e, 0xc7, 0xe9, 0x39, - 0x19, 0x61, 0x27, 0x49, 0x99, 0x60, 0xe8, 0xba, 0x06, 0x3b, 0x0a, 0xec, 0xc4, 0x91, 0x63, 0xc0, - 0x9d, 0xb7, 0xcd, 0x29, 0x41, 0x42, 0xdc, 0x80, 0x52, 0x26, 0x02, 0x41, 0x18, 0xe5, 0xda, 0xad, - 0xf3, 0x7a, 0x71, 0x37, 0x13, 0x5f, 0x18, 0xf3, 0x4d, 0x63, 0x8e, 0x18, 0x0d, 0xd3, 0x8c, 0x52, - 0x42, 0x43, 0x97, 0x25, 0x38, 0x9d, 0xf2, 0xbd, 0x61, 0x40, 0x6a, 0x75, 0x96, 0x3d, 0x73, 0x05, - 0x89, 0x31, 0x17, 0x41, 0x9c, 0x68, 0x80, 0xfd, 0xa7, 0x05, 0xb5, 0x63, 0xc9, 0x15, 0x21, 0x58, - 0xa3, 0x41, 0x8c, 0xdb, 0x56, 0xd7, 0xea, 0x35, 0x3c, 0xf5, 0x1b, 0x75, 0xa1, 0x39, 0xc6, 0x7c, - 0x94, 0x92, 0x44, 0x1e, 0xda, 0x5e, 0x51, 0x5b, 0x45, 0x13, 0xfa, 0x04, 0xb6, 0xc6, 0xf8, 0x59, - 0x90, 0x45, 0xc2, 0x3f, 0xc7, 0x29, 0x97, 0xa8, 0xd5, 0xae, 0xd5, 0x6b, 0x0e, 0xba, 0xce, 0x82, - 0xdb, 0x3a, 0x9f, 0x69, 0x9c, 0xb7, 0x69, 0x1c, 0xcd, 0x1a, 0xb5, 0x61, 0x23, 0xc5, 0xa1, 0x24, - 0xdf, 0x5e, 0xeb, 0xae, 0xf6, 0x1a, 0x5e, 0xbe, 0x44, 0x43, 0x78, 0x93, 0xd1, 0x88, 0x50, 0xec, - 0x27, 0x29, 0x1e, 0x93, 0x91, 0x8c, 0xec, 0x47, 0x2c, 0x0c, 0x09, 0x0d, 0xdb, 0xb5, 0xae, 0xd5, - 0xab, 0x7b, 0xd7, 0x35, 0xe0, 0x64, 0xb2, 0xff, 0x58, 0x6f, 0xdb, 0xff, 0xac, 0xc0, 0x46, 0x1e, - 0xe1, 0xbf, 0x5d, 0x71, 0x17, 0x80, 0x70, 0xdf, 0x90, 0x55, 0xb7, 0xab, 0x7b, 0x0d, 0xc2, 0x8f, - 0xb4, 0x01, 0xed, 0xc1, 0xe6, 0x18, 0x27, 0x11, 0xfb, 0x3a, 0xc6, 0x54, 0xf8, 0x59, 0x4a, 0xda, - 0x6b, 0xea, 0x8c, 0xd6, 0x85, 0xf5, 0x69, 0x4a, 0xd0, 0x7d, 0x68, 0x8e, 0x52, 0x1c, 0x08, 0xec, - 0x4b, 0x09, 0x14, 0xeb, 0xe6, 0xa0, 0x93, 0x27, 0x29, 0xd7, 0xc7, 0x79, 0x92, 0xeb, 0xe3, 0x81, - 0x86, 0x4b, 0x03, 0x7a, 0x00, 0xad, 0x28, 0xe0, 0xc2, 0xcf, 0xb8, 0x71, 0x5f, 0x2f, 0x75, 0x6f, - 0x4a, 0x87, 0xa7, 0x5c, 0xfb, 0xdf, 0x86, 0xad, 0x34, 0xa3, 0xd2, 0x73, 0xa2, 0x52, 0x5d, 0x91, - 0xdc, 0x34, 0xe6, 0x3c, 0x43, 0xc7, 0xb0, 0x19, 0x07, 0x34, 0x0b, 0x22, 0x9f, 0x8f, 0x82, 0x48, - 0xa6, 0xb7, 0xa1, 0x22, 0xdd, 0x5a, 0xa8, 0xe6, 0xb1, 0x82, 0x9f, 0x6a, 0xb4, 0xd7, 0x8a, 0x8b, - 0x4b, 0x7b, 0x0f, 0x5a, 0x53, 0xfb, 0x68, 0x07, 0x6a, 0x94, 0x8d, 0x31, 0x57, 0x12, 0xd4, 0x3c, - 0xbd, 0xb0, 0xcf, 0x00, 0x7d, 0xac, 0x2e, 0xab, 0x2a, 0xd1, 0xc3, 0x2f, 0x32, 0xcc, 0x05, 0x7a, - 0x03, 0xd6, 0x93, 0x20, 0xc5, 0x54, 0x18, 0xbd, 0xcc, 0x0a, 0xbd, 0x0f, 0x35, 0xd5, 0x5d, 0x4a, - 0xab, 0xe6, 0xe0, 0x9d, 0xc5, 0xd4, 0xd4, 0x69, 0x1a, 0x6c, 0x87, 0xb0, 0xfd, 0x98, 0x70, 0xa1, - 0x6c, 0xbc, 0x2c, 0xc4, 0x2e, 0x40, 0x12, 0x84, 0xd8, 0x17, 0xec, 0x4b, 0x4c, 0x8d, 0x9e, 0x0d, - 0x69, 0x79, 0x22, 0x0d, 0xe8, 0x2d, 0x50, 0x0b, 0x9f, 0x93, 0x97, 0x5a, 0xc9, 0x9a, 0x57, 0x97, - 0x86, 0x53, 0xf2, 0x12, 0xdb, 0x02, 0x50, 0x31, 0x10, 0x4f, 0x18, 0xe5, 0x18, 0xdd, 0x83, 0x75, - 0xc5, 0x43, 0xde, 0x7c, 0xb5, 0x02, 0x6b, 0x83, 0x46, 0xb7, 0x60, 0x8b, 0xe2, 0xaf, 0x84, 0x5f, - 0xa0, 0xa3, 0x4b, 0xb4, 0x25, 0xcd, 0x27, 0x39, 0x25, 0x7b, 0x0f, 0xb6, 0x1e, 0x61, 0x31, 0x95, - 0xbf, 0x39, 0xd5, 0x6e, 0xf7, 0x00, 0x1d, 0xe1, 0x08, 0x5f, 0xca, 0xf4, 0x3c, 0xe4, 0x73, 0xd8, - 0xd1, 0x9a, 0xe4, 0xed, 0x5a, 0x92, 0xb2, 0x21, 0x6c, 0xe4, 0xa5, 0xb5, 0x52, 0x71, 0x00, 0xe4, - 0x0e, 0x36, 0x81, 0xd7, 0x64, 0xca, 0x8c, 0xfd, 0x7f, 0x55, 0xe7, 0x5b, 0xd8, 0x99, 0x0e, 0x65, - 0xf4, 0xf9, 0x00, 0xea, 0x86, 0x4d, 0xae, 0x50, 0x39, 0xff, 0x89, 0x47, 0x65, 0x95, 0x6e, 0xc3, - 0xf6, 0x23, 0x2c, 0x2e, 0x65, 0x74, 0x5e, 0xf6, 0xfb, 0xb0, 0xa3, 0x75, 0xaa, 0x80, 0x75, 0xa0, - 0x7d, 0x8a, 0xc5, 0xd1, 0xd4, 0x30, 0x5d, 0x82, 0x1f, 0xfc, 0x0d, 0x70, 0x4d, 0xc9, 0x7f, 0xaa, - 0xbf, 0x4e, 0xe8, 0x47, 0x0b, 0x9a, 0x85, 0xfe, 0x43, 0xfb, 0x0b, 0x6f, 0x3e, 0xdb, 0xa5, 0x9d, - 0x92, 0x42, 0xb6, 0x07, 0xdf, 0xff, 0xf1, 0xd7, 0xcf, 0x2b, 0x77, 0xec, 0x9b, 0x93, 0x4f, 0xe3, - 0x37, 0x5a, 0xc6, 0x0f, 0x93, 0x94, 0x3d, 0xc7, 0x23, 0xc1, 0xdd, 0xfe, 0x2b, 0xfd, 0xb9, 0xe4, - 0x43, 0xdd, 0xab, 0xe8, 0x27, 0x0b, 0xe0, 0xa2, 0x87, 0x50, 0x7f, 0x61, 0x88, 0x99, 0x8e, 0xee, - 0xec, 0x57, 0xc2, 0x6a, 0xd1, 0xed, 0x7d, 0xc5, 0x6d, 0x0f, 0x55, 0xe1, 0x86, 0xbe, 0xb3, 0xa0, - 0x9e, 0xb7, 0x18, 0xea, 0x2d, 0x0c, 0x73, 0xa9, 0x0b, 0x4b, 0xf3, 0x33, 0x87, 0x83, 0x54, 0xa9, - 0xc0, 0xc0, 0x10, 0x70, 0xfb, 0xaf, 0xd0, 0x0f, 0x16, 0x34, 0x0b, 0xfd, 0xbb, 0x44, 0xa9, 0xd9, - 0x2e, 0xef, 0xec, 0xe6, 0xe0, 0xc2, 0x8b, 0xc1, 0xf9, 0x34, 0x7f, 0x31, 0xe4, 0x44, 0xfa, 0x95, - 0x88, 0xfc, 0x6a, 0x41, 0x6b, 0x6a, 0x3c, 0xa0, 0xbb, 0x25, 0x45, 0x33, 0x5d, 0x98, 0x65, 0x64, - 0x3e, 0x52, 0x64, 0x0e, 0x6d, 0x67, 0x89, 0x32, 0x17, 0x74, 0xdc, 0xbc, 0x11, 0x87, 0xf9, 0x48, - 0x41, 0xbf, 0x59, 0x70, 0xad, 0xd8, 0xe8, 0xe8, 0xce, 0xd2, 0xc2, 0xb8, 0x34, 0x7a, 0x3a, 0x77, - 0x2b, 0xa2, 0x4d, 0x21, 0xdd, 0x53, 0x74, 0xdf, 0x45, 0x57, 0xa4, 0xab, 0x0a, 0xfd, 0x62, 0x20, - 0x2c, 0x29, 0xf4, 0x99, 0xa9, 0xd1, 0x29, 0x1d, 0x4f, 0xf3, 0x48, 0x2d, 0x12, 0x74, 0xc2, 0x48, - 0x6a, 0xfb, 0x8b, 0x05, 0xad, 0xa9, 0xe1, 0xb3, 0x44, 0xdb, 0x79, 0x43, 0xaa, 0x4c, 0x5b, 0xc3, - 0xab, 0x7f, 0x55, 0x5e, 0xbf, 0x5b, 0xb0, 0x3d, 0x33, 0xe8, 0xd0, 0xc1, 0x42, 0x6e, 0x8b, 0x86, - 0x62, 0x85, 0xd4, 0x1d, 0x29, 0x8a, 0x0f, 0xec, 0xc3, 0xab, 0x51, 0x1c, 0xf2, 0x49, 0xc8, 0xa1, - 0xd5, 0x7f, 0xf8, 0x02, 0x6e, 0x8c, 0x58, 0x3c, 0x13, 0x2c, 0x48, 0x48, 0x1e, 0xf0, 0xe1, 0x76, - 0x71, 0x10, 0x9f, 0xc8, 0x57, 0xdc, 0x89, 0xf5, 0xf9, 0xa1, 0xf1, 0x08, 0x59, 0x14, 0xd0, 0xd0, - 0x61, 0x69, 0xe8, 0x86, 0x98, 0xaa, 0x37, 0x9e, 0xab, 0xb7, 0x82, 0x84, 0xf0, 0x99, 0xff, 0x1c, - 0xf7, 0xe3, 0xe8, 0x6c, 0x5d, 0xa1, 0xde, 0xfb, 0x37, 0x00, 0x00, 0xff, 0xff, 0x04, 0x39, 0xff, - 0x08, 0x98, 0x0c, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/operation_metadata.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/operation_metadata.pb.go deleted file mode 100644 index 293077731..000000000 --- a/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/operation_metadata.pb.go +++ /dev/null @@ -1,161 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/cloud/ml/v1beta1/operation_metadata.proto - -package ml - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "google.golang.org/genproto/googleapis/api/annotations" -import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// The operation type. -type OperationMetadata_OperationType int32 - -const ( - // Unspecified operation type. - OperationMetadata_OPERATION_TYPE_UNSPECIFIED OperationMetadata_OperationType = 0 - // An operation to create a new version. - OperationMetadata_CREATE_VERSION OperationMetadata_OperationType = 1 - // An operation to delete an existing version. - OperationMetadata_DELETE_VERSION OperationMetadata_OperationType = 2 - // An operation to delete an existing model. - OperationMetadata_DELETE_MODEL OperationMetadata_OperationType = 3 -) - -var OperationMetadata_OperationType_name = map[int32]string{ - 0: "OPERATION_TYPE_UNSPECIFIED", - 1: "CREATE_VERSION", - 2: "DELETE_VERSION", - 3: "DELETE_MODEL", -} -var OperationMetadata_OperationType_value = map[string]int32{ - "OPERATION_TYPE_UNSPECIFIED": 0, - "CREATE_VERSION": 1, - "DELETE_VERSION": 2, - "DELETE_MODEL": 3, -} - -func (x OperationMetadata_OperationType) String() string { - return proto.EnumName(OperationMetadata_OperationType_name, int32(x)) -} -func (OperationMetadata_OperationType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor2, []int{0, 0} -} - -// Represents the metadata of the long-running operation. -type OperationMetadata struct { - // The time the operation was submitted. - CreateTime *google_protobuf2.Timestamp `protobuf:"bytes,1,opt,name=create_time,json=createTime" json:"create_time,omitempty"` - // The time operation processing started. - StartTime *google_protobuf2.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"` - // The time operation processing completed. - EndTime *google_protobuf2.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime" json:"end_time,omitempty"` - // Indicates whether a request to cancel this operation has been made. - IsCancellationRequested bool `protobuf:"varint,4,opt,name=is_cancellation_requested,json=isCancellationRequested" json:"is_cancellation_requested,omitempty"` - // The operation type. - OperationType OperationMetadata_OperationType `protobuf:"varint,5,opt,name=operation_type,json=operationType,enum=google.cloud.ml.v1beta1.OperationMetadata_OperationType" json:"operation_type,omitempty"` - // Contains the name of the model associated with the operation. - ModelName string `protobuf:"bytes,6,opt,name=model_name,json=modelName" json:"model_name,omitempty"` - // Contains the version associated with the operation. - Version *Version `protobuf:"bytes,7,opt,name=version" json:"version,omitempty"` -} - -func (m *OperationMetadata) Reset() { *m = OperationMetadata{} } -func (m *OperationMetadata) String() string { return proto.CompactTextString(m) } -func (*OperationMetadata) ProtoMessage() {} -func (*OperationMetadata) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } - -func (m *OperationMetadata) GetCreateTime() *google_protobuf2.Timestamp { - if m != nil { - return m.CreateTime - } - return nil -} - -func (m *OperationMetadata) GetStartTime() *google_protobuf2.Timestamp { - if m != nil { - return m.StartTime - } - return nil -} - -func (m *OperationMetadata) GetEndTime() *google_protobuf2.Timestamp { - if m != nil { - return m.EndTime - } - return nil -} - -func (m *OperationMetadata) GetIsCancellationRequested() bool { - if m != nil { - return m.IsCancellationRequested - } - return false -} - -func (m *OperationMetadata) GetOperationType() OperationMetadata_OperationType { - if m != nil { - return m.OperationType - } - return OperationMetadata_OPERATION_TYPE_UNSPECIFIED -} - -func (m *OperationMetadata) GetModelName() string { - if m != nil { - return m.ModelName - } - return "" -} - -func (m *OperationMetadata) GetVersion() *Version { - if m != nil { - return m.Version - } - return nil -} - -func init() { - proto.RegisterType((*OperationMetadata)(nil), "google.cloud.ml.v1beta1.OperationMetadata") - proto.RegisterEnum("google.cloud.ml.v1beta1.OperationMetadata_OperationType", OperationMetadata_OperationType_name, OperationMetadata_OperationType_value) -} - -func init() { proto.RegisterFile("google/cloud/ml/v1beta1/operation_metadata.proto", fileDescriptor2) } - -var fileDescriptor2 = []byte{ - // 457 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x5f, 0x6b, 0xdb, 0x30, - 0x14, 0xc5, 0xe7, 0xb6, 0x6b, 0x1b, 0x75, 0x0d, 0x99, 0x1f, 0x56, 0x2f, 0x6c, 0xab, 0xe9, 0x53, - 0x60, 0x60, 0xaf, 0x1d, 0x83, 0x75, 0x7d, 0x6a, 0x13, 0x0d, 0x02, 0x6d, 0x6c, 0x54, 0xaf, 0xb0, - 0xbd, 0x18, 0xc5, 0xbe, 0x33, 0x02, 0xfd, 0xf1, 0x2c, 0x25, 0xd0, 0x0f, 0xb4, 0xef, 0x39, 0x22, - 0xd9, 0x34, 0x23, 0x84, 0x3e, 0xea, 0xdc, 0xf3, 0xbb, 0xf7, 0xf8, 0x5e, 0xa3, 0x4f, 0x95, 0x52, - 0x15, 0x87, 0xb8, 0xe0, 0x6a, 0x51, 0xc6, 0x82, 0xc7, 0xcb, 0xf3, 0x39, 0x18, 0x7a, 0x1e, 0xab, - 0x1a, 0x1a, 0x6a, 0x98, 0x92, 0xb9, 0x00, 0x43, 0x4b, 0x6a, 0x68, 0x54, 0x37, 0xca, 0x28, 0xff, - 0xc4, 0x11, 0x91, 0x25, 0x22, 0xc1, 0xa3, 0x96, 0x18, 0xbe, 0x6b, 0x5b, 0xd1, 0x9a, 0xc5, 0x54, - 0x4a, 0x65, 0x2c, 0xae, 0x1d, 0x36, 0xfc, 0xb8, 0x6d, 0x90, 0x50, 0x25, 0xf0, 0x5c, 0x43, 0xb3, - 0x64, 0x05, 0xb4, 0xe6, 0xd3, 0xd6, 0x6c, 0x5f, 0xf3, 0xc5, 0xef, 0xd8, 0x30, 0x01, 0xda, 0x50, - 0x51, 0x3b, 0xc3, 0xd9, 0xdf, 0x3d, 0xf4, 0x3a, 0xe9, 0x12, 0xde, 0xb5, 0x01, 0xfd, 0x2b, 0x74, - 0x54, 0x34, 0x40, 0x0d, 0xe4, 0x2b, 0x7f, 0xe0, 0x85, 0xde, 0xe8, 0xe8, 0x62, 0x18, 0xb5, 0x81, - 0xbb, 0x66, 0x51, 0xd6, 0x35, 0x23, 0xc8, 0xd9, 0x57, 0x82, 0x7f, 0x89, 0x90, 0x36, 0xb4, 0x31, - 0x8e, 0xdd, 0x79, 0x96, 0xed, 0x59, 0xb7, 0x45, 0xbf, 0xa0, 0x43, 0x90, 0xa5, 0x03, 0x77, 0x9f, - 0x05, 0x0f, 0x40, 0x96, 0x16, 0xfb, 0x86, 0xde, 0x32, 0x9d, 0x17, 0x54, 0x16, 0xc0, 0xb9, 0xdb, - 0x75, 0x03, 0x7f, 0x16, 0xa0, 0x0d, 0x94, 0xc1, 0x5e, 0xe8, 0x8d, 0x0e, 0xc9, 0x09, 0xd3, 0xe3, - 0xb5, 0x3a, 0xe9, 0xca, 0x7e, 0x8e, 0xfa, 0x4f, 0x17, 0x32, 0x8f, 0x35, 0x04, 0x2f, 0x43, 0x6f, - 0xd4, 0xbf, 0xf8, 0x1a, 0x6d, 0x39, 0x4f, 0xb4, 0xb1, 0xae, 0x27, 0x25, 0x7b, 0xac, 0x81, 0x1c, - 0xab, 0xf5, 0xa7, 0xff, 0x1e, 0x21, 0x77, 0x19, 0x49, 0x05, 0x04, 0xfb, 0xa1, 0x37, 0xea, 0x91, - 0x9e, 0x55, 0x66, 0xd4, 0x66, 0x3f, 0x58, 0x42, 0xa3, 0x99, 0x92, 0xc1, 0x81, 0xfd, 0xe2, 0x70, - 0xeb, 0xe0, 0x07, 0xe7, 0x23, 0x1d, 0x70, 0xc6, 0xd0, 0xf1, 0x7f, 0xa3, 0xfd, 0x0f, 0x68, 0x98, - 0xa4, 0x98, 0x5c, 0x67, 0xd3, 0x64, 0x96, 0x67, 0x3f, 0x53, 0x9c, 0xff, 0x98, 0xdd, 0xa7, 0x78, - 0x3c, 0xfd, 0x3e, 0xc5, 0x93, 0xc1, 0x0b, 0xdf, 0x47, 0xfd, 0x31, 0xc1, 0xd7, 0x19, 0xce, 0x1f, - 0x30, 0xb9, 0x9f, 0x26, 0xb3, 0x81, 0xb7, 0xd2, 0x26, 0xf8, 0x16, 0xaf, 0x69, 0x3b, 0xfe, 0x00, - 0xbd, 0x6a, 0xb5, 0xbb, 0x64, 0x82, 0x6f, 0x07, 0xbb, 0x37, 0x4b, 0x74, 0x5a, 0x28, 0xb1, 0x11, - 0x8d, 0xd6, 0xac, 0x8b, 0x77, 0xf3, 0x66, 0x63, 0x31, 0xe9, 0xea, 0x66, 0xa9, 0xf7, 0xeb, 0xb2, - 0xc5, 0x2a, 0xc5, 0xa9, 0xac, 0x22, 0xd5, 0x54, 0x71, 0x05, 0xd2, 0x5e, 0x34, 0x76, 0x25, 0x5a, - 0x33, 0xbd, 0xf1, 0x47, 0x5f, 0x09, 0x3e, 0xdf, 0xb7, 0xae, 0xcf, 0xff, 0x02, 0x00, 0x00, 0xff, - 0xff, 0x89, 0xf8, 0x21, 0xa7, 0x5f, 0x03, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/prediction_service.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/prediction_service.pb.go deleted file mode 100644 index 7d6a8ada9..000000000 --- a/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/prediction_service.pb.go +++ /dev/null @@ -1,343 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/cloud/ml/v1beta1/prediction_service.proto - -package ml - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "google.golang.org/genproto/googleapis/api/annotations" -import google_api3 "google.golang.org/genproto/googleapis/api/httpbody" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// Request for predictions to be issued against a trained model. -// -// The body of the request is a single JSON object with a single top-level -// field: -// -//
-//
instances
-//
A JSON array containing values representing the instances to use for -// prediction.
-//
-// -// The structure of each element of the instances list is determined by your -// model's input definition. Instances can include named inputs or can contain -// only unlabeled values. -// -// Not all data includes named inputs. Some instances will be simple -// JSON values (boolean, number, or string). However, instances are often lists -// of simple values, or complex nested lists. Here are some examples of request -// bodies: -// -// CSV data with each row encoded as a string value: -//
-// {"instances": ["1.0,true,\\"x\\"", "-2.0,false,\\"y\\""]}
-// 
-// Plain text: -//
-// {"instances": ["the quick brown fox", "la bruja le dio"]}
-// 
-// Sentences encoded as lists of words (vectors of strings): -//
-// {
-//   "instances": [
-//     ["the","quick","brown"],
-//     ["la","bruja","le"],
-//     ...
-//   ]
-// }
-// 
-// Floating point scalar values: -//
-// {"instances": [0.0, 1.1, 2.2]}
-// 
-// Vectors of integers: -//
-// {
-//   "instances": [
-//     [0, 1, 2],
-//     [3, 4, 5],
-//     ...
-//   ]
-// }
-// 
-// Tensors (in this case, two-dimensional tensors): -//
-// {
-//   "instances": [
-//     [
-//       [0, 1, 2],
-//       [3, 4, 5]
-//     ],
-//     ...
-//   ]
-// }
-// 
-// Images can be represented different ways. In this encoding scheme the first -// two dimensions represent the rows and columns of the image, and the third -// contains lists (vectors) of the R, G, and B values for each pixel. -//
-// {
-//   "instances": [
-//     [
-//       [
-//         [138, 30, 66],
-//         [130, 20, 56],
-//         ...
-//       ],
-//       [
-//         [126, 38, 61],
-//         [122, 24, 57],
-//         ...
-//       ],
-//       ...
-//     ],
-//     ...
-//   ]
-// }
-// 
-// JSON strings must be encoded as UTF-8. To send binary data, you must -// base64-encode the data and mark it as binary. To mark a JSON string -// as binary, replace it with a JSON object with a single attribute named `b64`: -//
{"b64": "..."} 
-// For example: -// -// Two Serialized tf.Examples (fake data, for illustrative purposes only): -//
-// {"instances": [{"b64": "X5ad6u"}, {"b64": "IA9j4nx"}]}
-// 
-// Two JPEG image byte strings (fake data, for illustrative purposes only): -//
-// {"instances": [{"b64": "ASa8asdf"}, {"b64": "JLK7ljk3"}]}
-// 
-// If your data includes named references, format each instance as a JSON object -// with the named references as the keys: -// -// JSON input data to be preprocessed: -//
-// {
-//   "instances": [
-//     {
-//       "a": 1.0,
-//       "b": true,
-//       "c": "x"
-//     },
-//     {
-//       "a": -2.0,
-//       "b": false,
-//       "c": "y"
-//     }
-//   ]
-// }
-// 
-// Some models have an underlying TensorFlow graph that accepts multiple input -// tensors. In this case, you should use the names of JSON name/value pairs to -// identify the input tensors, as shown in the following exmaples: -// -// For a graph with input tensor aliases "tag" (string) and "image" -// (base64-encoded string): -//
-// {
-//   "instances": [
-//     {
-//       "tag": "beach",
-//       "image": {"b64": "ASa8asdf"}
-//     },
-//     {
-//       "tag": "car",
-//       "image": {"b64": "JLK7ljk3"}
-//     }
-//   ]
-// }
-// 
-// For a graph with input tensor aliases "tag" (string) and "image" -// (3-dimensional array of 8-bit ints): -//
-// {
-//   "instances": [
-//     {
-//       "tag": "beach",
-//       "image": [
-//         [
-//           [138, 30, 66],
-//           [130, 20, 56],
-//           ...
-//         ],
-//         [
-//           [126, 38, 61],
-//           [122, 24, 57],
-//           ...
-//         ],
-//         ...
-//       ]
-//     },
-//     {
-//       "tag": "car",
-//       "image": [
-//         [
-//           [255, 0, 102],
-//           [255, 0, 97],
-//           ...
-//         ],
-//         [
-//           [254, 1, 101],
-//           [254, 2, 93],
-//           ...
-//         ],
-//         ...
-//       ]
-//     },
-//     ...
-//   ]
-// }
-// 
-// If the call is successful, the response body will contain one prediction -// entry per instance in the request body. If prediction fails for any -// instance, the response body will contain no predictions and will contian -// a single error entry instead. -type PredictRequest struct { - // Required. The resource name of a model or a version. - // - // Authorization: requires `Viewer` role on the parent project. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // - // Required. The prediction request body. - HttpBody *google_api3.HttpBody `protobuf:"bytes,2,opt,name=http_body,json=httpBody" json:"http_body,omitempty"` -} - -func (m *PredictRequest) Reset() { *m = PredictRequest{} } -func (m *PredictRequest) String() string { return proto.CompactTextString(m) } -func (*PredictRequest) ProtoMessage() {} -func (*PredictRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } - -func (m *PredictRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PredictRequest) GetHttpBody() *google_api3.HttpBody { - if m != nil { - return m.HttpBody - } - return nil -} - -func init() { - proto.RegisterType((*PredictRequest)(nil), "google.cloud.ml.v1beta1.PredictRequest") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for OnlinePredictionService service - -type OnlinePredictionServiceClient interface { - // Performs prediction on the data in the request. - // - // **** REMOVE FROM GENERATED DOCUMENTATION - Predict(ctx context.Context, in *PredictRequest, opts ...grpc.CallOption) (*google_api3.HttpBody, error) -} - -type onlinePredictionServiceClient struct { - cc *grpc.ClientConn -} - -func NewOnlinePredictionServiceClient(cc *grpc.ClientConn) OnlinePredictionServiceClient { - return &onlinePredictionServiceClient{cc} -} - -func (c *onlinePredictionServiceClient) Predict(ctx context.Context, in *PredictRequest, opts ...grpc.CallOption) (*google_api3.HttpBody, error) { - out := new(google_api3.HttpBody) - err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.OnlinePredictionService/Predict", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for OnlinePredictionService service - -type OnlinePredictionServiceServer interface { - // Performs prediction on the data in the request. - // - // **** REMOVE FROM GENERATED DOCUMENTATION - Predict(context.Context, *PredictRequest) (*google_api3.HttpBody, error) -} - -func RegisterOnlinePredictionServiceServer(s *grpc.Server, srv OnlinePredictionServiceServer) { - s.RegisterService(&_OnlinePredictionService_serviceDesc, srv) -} - -func _OnlinePredictionService_Predict_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PredictRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(OnlinePredictionServiceServer).Predict(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.cloud.ml.v1beta1.OnlinePredictionService/Predict", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(OnlinePredictionServiceServer).Predict(ctx, req.(*PredictRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _OnlinePredictionService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "google.cloud.ml.v1beta1.OnlinePredictionService", - HandlerType: (*OnlinePredictionServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Predict", - Handler: _OnlinePredictionService_Predict_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "google/cloud/ml/v1beta1/prediction_service.proto", -} - -func init() { proto.RegisterFile("google/cloud/ml/v1beta1/prediction_service.proto", fileDescriptor3) } - -var fileDescriptor3 = []byte{ - // 312 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4a, 0x03, 0x31, - 0x14, 0xc7, 0x49, 0x11, 0xb5, 0x11, 0x5c, 0x04, 0xb1, 0xb5, 0x08, 0x96, 0xba, 0xb0, 0x74, 0x91, - 0xd8, 0xba, 0xb2, 0xe2, 0xa6, 0x2b, 0x77, 0x0e, 0x75, 0x21, 0xb8, 0x29, 0xe9, 0x4c, 0x48, 0x23, - 0x99, 0xbc, 0x38, 0x93, 0x16, 0x8b, 0xb8, 0xf1, 0x0a, 0x3d, 0x9a, 0x57, 0xf0, 0x20, 0x92, 0x49, - 0x28, 0xca, 0xe8, 0xee, 0x31, 0x6f, 0x7e, 0xef, 0xff, 0x11, 0x7c, 0x29, 0x01, 0xa4, 0x16, 0x2c, - 0xd5, 0xb0, 0xcc, 0x58, 0xae, 0xd9, 0x6a, 0x38, 0x17, 0x8e, 0x0f, 0x99, 0x2d, 0x44, 0xa6, 0x52, - 0xa7, 0xc0, 0xcc, 0x4a, 0x51, 0xac, 0x54, 0x2a, 0xa8, 0x2d, 0xc0, 0x01, 0x69, 0x05, 0x82, 0x56, - 0x04, 0xcd, 0x35, 0x8d, 0x44, 0xe7, 0x34, 0x9e, 0xe2, 0x56, 0x31, 0x6e, 0x0c, 0x38, 0xee, 0xe9, - 0x32, 0x60, 0x9d, 0x93, 0x1f, 0xdb, 0x85, 0x73, 0x76, 0x0e, 0xd9, 0x3a, 0xac, 0x7a, 0x8f, 0xf8, - 0x30, 0x09, 0x6a, 0x53, 0xf1, 0xb2, 0x14, 0xa5, 0x23, 0x04, 0xef, 0x18, 0x9e, 0x8b, 0x36, 0xea, - 0xa2, 0x7e, 0x73, 0x5a, 0xcd, 0x64, 0x88, 0x9b, 0x9e, 0x9b, 0x79, 0xb0, 0xdd, 0xe8, 0xa2, 0xfe, - 0xc1, 0xe8, 0x88, 0x46, 0x2f, 0xdc, 0x2a, 0x7a, 0xe7, 0x9c, 0x9d, 0x40, 0xb6, 0x9e, 0xee, 0x2f, - 0xe2, 0x34, 0xda, 0x20, 0xdc, 0xba, 0x37, 0x5a, 0x19, 0x91, 0x6c, 0xd3, 0x3c, 0x84, 0x30, 0xe4, - 0x15, 0xef, 0xc5, 0x8f, 0xe4, 0x82, 0xfe, 0x13, 0x89, 0xfe, 0xb6, 0xd5, 0xf9, 0x53, 0xaf, 0x47, - 0x3f, 0x3e, 0xbf, 0x36, 0x8d, 0x7e, 0xef, 0x7c, 0xdb, 0xdd, 0x9b, 0x37, 0x7c, 0x6b, 0x0b, 0x78, - 0x16, 0xa9, 0x2b, 0xd9, 0x60, 0xf0, 0x3e, 0x8e, 0x75, 0x8e, 0xd1, 0x60, 0xb2, 0xc2, 0x67, 0x29, - 0xe4, 0x35, 0x4d, 0x7f, 0x33, 0x1e, 0x98, 0x1c, 0xd7, 0xfc, 0x26, 0xbe, 0xa9, 0x04, 0x3d, 0x5d, - 0x47, 0x4c, 0x82, 0xe6, 0x46, 0x52, 0x28, 0x24, 0x93, 0xc2, 0x54, 0x3d, 0xb2, 0xb0, 0xe2, 0x56, - 0x95, 0xb5, 0xe7, 0xbc, 0xc9, 0xf5, 0x7c, 0xb7, 0xfa, 0xeb, 0xea, 0x3b, 0x00, 0x00, 0xff, 0xff, - 0x00, 0x26, 0x25, 0x67, 0xf3, 0x01, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/project_service.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/project_service.pb.go deleted file mode 100644 index cdf5bbc57..000000000 --- a/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/project_service.pb.go +++ /dev/null @@ -1,178 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/cloud/ml/v1beta1/project_service.proto - -package ml - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "google.golang.org/genproto/googleapis/api/annotations" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// Requests service account information associated with a project. -type GetConfigRequest struct { - // Required. The project name. - // - // Authorization: requires `Viewer` role on the specified project. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` -} - -func (m *GetConfigRequest) Reset() { *m = GetConfigRequest{} } -func (m *GetConfigRequest) String() string { return proto.CompactTextString(m) } -func (*GetConfigRequest) ProtoMessage() {} -func (*GetConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } - -func (m *GetConfigRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -// Returns service account information associated with a project. -type GetConfigResponse struct { - // The service account Cloud ML uses to access resources in the project. - ServiceAccount string `protobuf:"bytes,1,opt,name=service_account,json=serviceAccount" json:"service_account,omitempty"` - // The project number for `service_account`. - ServiceAccountProject int64 `protobuf:"varint,2,opt,name=service_account_project,json=serviceAccountProject" json:"service_account_project,omitempty"` -} - -func (m *GetConfigResponse) Reset() { *m = GetConfigResponse{} } -func (m *GetConfigResponse) String() string { return proto.CompactTextString(m) } -func (*GetConfigResponse) ProtoMessage() {} -func (*GetConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} } - -func (m *GetConfigResponse) GetServiceAccount() string { - if m != nil { - return m.ServiceAccount - } - return "" -} - -func (m *GetConfigResponse) GetServiceAccountProject() int64 { - if m != nil { - return m.ServiceAccountProject - } - return 0 -} - -func init() { - proto.RegisterType((*GetConfigRequest)(nil), "google.cloud.ml.v1beta1.GetConfigRequest") - proto.RegisterType((*GetConfigResponse)(nil), "google.cloud.ml.v1beta1.GetConfigResponse") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for ProjectManagementService service - -type ProjectManagementServiceClient interface { - // Get the service account information associated with your project. You need - // this information in order to grant the service account persmissions for - // the Google Cloud Storage location where you put your model training code - // for training the model with Google Cloud Machine Learning. - GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) -} - -type projectManagementServiceClient struct { - cc *grpc.ClientConn -} - -func NewProjectManagementServiceClient(cc *grpc.ClientConn) ProjectManagementServiceClient { - return &projectManagementServiceClient{cc} -} - -func (c *projectManagementServiceClient) GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) { - out := new(GetConfigResponse) - err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.ProjectManagementService/GetConfig", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for ProjectManagementService service - -type ProjectManagementServiceServer interface { - // Get the service account information associated with your project. You need - // this information in order to grant the service account persmissions for - // the Google Cloud Storage location where you put your model training code - // for training the model with Google Cloud Machine Learning. - GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) -} - -func RegisterProjectManagementServiceServer(s *grpc.Server, srv ProjectManagementServiceServer) { - s.RegisterService(&_ProjectManagementService_serviceDesc, srv) -} - -func _ProjectManagementService_GetConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProjectManagementServiceServer).GetConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.cloud.ml.v1beta1.ProjectManagementService/GetConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProjectManagementServiceServer).GetConfig(ctx, req.(*GetConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _ProjectManagementService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "google.cloud.ml.v1beta1.ProjectManagementService", - HandlerType: (*ProjectManagementServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetConfig", - Handler: _ProjectManagementService_GetConfig_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "google/cloud/ml/v1beta1/project_service.proto", -} - -func init() { proto.RegisterFile("google/cloud/ml/v1beta1/project_service.proto", fileDescriptor4) } - -var fileDescriptor4 = []byte{ - // 327 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x4f, 0x4a, 0x43, 0x31, - 0x10, 0xc6, 0x79, 0x55, 0x84, 0x66, 0xe1, 0x9f, 0x88, 0xb4, 0x14, 0xc1, 0x52, 0xa4, 0xd6, 0xa2, - 0x09, 0x55, 0x10, 0x54, 0x5c, 0x58, 0x17, 0xae, 0x84, 0x52, 0x77, 0x6e, 0x4a, 0xfa, 0x1c, 0xc3, - 0x93, 0x24, 0x13, 0x5f, 0xd2, 0x6e, 0xc4, 0x8d, 0x27, 0x10, 0x3c, 0x87, 0xa7, 0xf1, 0x0a, 0x1e, - 0x44, 0xfa, 0x92, 0x16, 0x6d, 0x11, 0xdc, 0x0d, 0x33, 0xbf, 0x6f, 0x32, 0xdf, 0x4c, 0xc8, 0xa1, - 0x44, 0x94, 0x0a, 0x78, 0xaa, 0x70, 0x74, 0xcf, 0xb5, 0xe2, 0xe3, 0xce, 0x10, 0xbc, 0xe8, 0x70, - 0x9b, 0xe3, 0x23, 0xa4, 0x7e, 0xe0, 0x20, 0x1f, 0x67, 0x29, 0x30, 0x9b, 0xa3, 0x47, 0x5a, 0x09, - 0x38, 0x2b, 0x70, 0xa6, 0x15, 0x8b, 0x78, 0x6d, 0x3b, 0xf6, 0x11, 0x36, 0xe3, 0xc2, 0x18, 0xf4, - 0xc2, 0x67, 0x68, 0x5c, 0x90, 0x35, 0x9a, 0x64, 0xfd, 0x1a, 0xfc, 0x15, 0x9a, 0x87, 0x4c, 0xf6, - 0xe1, 0x69, 0x04, 0xce, 0x53, 0x4a, 0x96, 0x8d, 0xd0, 0x50, 0x4d, 0xea, 0x49, 0xab, 0xdc, 0x2f, - 0xe2, 0x86, 0x27, 0x1b, 0x3f, 0x38, 0x67, 0xd1, 0x38, 0xa0, 0x7b, 0x64, 0x2d, 0x0e, 0x31, 0x10, - 0x69, 0x8a, 0x23, 0xe3, 0xa3, 0x66, 0x35, 0xa6, 0x2f, 0x43, 0x96, 0x9e, 0x90, 0xca, 0x1c, 0x38, - 0x88, 0x2e, 0xaa, 0xa5, 0x7a, 0xd2, 0x5a, 0xea, 0x6f, 0xfd, 0x16, 0xf4, 0x42, 0xf1, 0xe8, 0x23, - 0x21, 0xd5, 0x18, 0xdf, 0x08, 0x23, 0x24, 0x68, 0x30, 0xfe, 0x36, 0xa0, 0xf4, 0x2d, 0x21, 0xe5, - 0xd9, 0x4c, 0x74, 0x9f, 0xfd, 0xb1, 0x00, 0x36, 0xef, 0xaf, 0xd6, 0xfe, 0x0f, 0x1a, 0x2c, 0x36, - 0x0e, 0x5e, 0x3f, 0xbf, 0xde, 0x4b, 0x4d, 0xba, 0x3b, 0x5b, 0xff, 0xf3, 0x64, 0x1f, 0x17, 0x71, - 0x7c, 0xc7, 0xdb, 0x2f, 0x67, 0x72, 0xaa, 0xea, 0x3a, 0xb2, 0x93, 0xa2, 0x5e, 0x68, 0x2f, 0x6c, - 0x36, 0x7d, 0xa2, 0xbb, 0x19, 0xfd, 0x44, 0x17, 0xbd, 0xc9, 0x15, 0x7a, 0xc9, 0xdd, 0x69, 0xd4, - 0x48, 0x54, 0xc2, 0x48, 0x86, 0xb9, 0xe4, 0x12, 0x4c, 0x71, 0x23, 0x1e, 0x4a, 0xc2, 0x66, 0x6e, - 0xe1, 0x33, 0x9c, 0x6b, 0x35, 0x5c, 0x29, 0xa8, 0xe3, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x76, - 0x59, 0xc4, 0x91, 0x31, 0x02, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1/cloud_speech.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1/cloud_speech.pb.go index 1a65d2eb4..7b82cffab 100644 --- a/vendor/google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1/cloud_speech.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1/cloud_speech.pb.go @@ -1,11 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/cloud/speech/v1_1beta1/cloud_speech.proto +// source: google/cloud/speech/v1p1beta1/cloud_speech.proto /* Package speech is a generated protocol buffer package. It is generated from these files: - google/cloud/speech/v1_1beta1/cloud_speech.proto + google/cloud/speech/v1p1beta1/cloud_speech.proto It has these top-level messages: RecognizeRequest @@ -13,7 +13,6 @@ It has these top-level messages: StreamingRecognizeRequest StreamingRecognitionConfig RecognitionConfig - RecognitionMetadata SpeechContext RecognitionAudio RecognizeResponse @@ -32,6 +31,7 @@ import fmt "fmt" import math "math" import _ "google.golang.org/genproto/googleapis/api/annotations" import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import _ "github.com/golang/protobuf/ptypes/any" import google_protobuf3 "github.com/golang/protobuf/ptypes/duration" import google_protobuf4 "github.com/golang/protobuf/ptypes/timestamp" import google_rpc "google.golang.org/genproto/googleapis/rpc/status" @@ -52,20 +52,29 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package -// Audio encoding of the data sent in the audio message. All encodings support -// only 1 channel (mono) audio. Only `FLAC` includes a header that describes -// the bytes of audio that follow the header. The other encodings are raw -// audio bytes with no header. +// The encoding of the audio data sent in the request. +// +// All encodings support only 1 channel (mono) audio. +// +// If you send a `FLAC` or `WAV` audio file format in the request, +// then if you specify an encoding in `AudioEncoding`, it must match the +// encoding described in the audio header. If it does not match, then the +// request returns an +// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error code. You can request +// recognition for `WAV` files that contain either `LINEAR16` or `MULAW` +// encoded audio. +// For audio file formats other than `FLAC` or `WAV`, you must +// specify the audio encoding in your `RecognitionConfig`. // // For best results, the audio source should be captured and transmitted using -// a lossless encoding (`FLAC` or `LINEAR16`). Recognition accuracy may be -// reduced if lossy codecs, which include the other codecs listed in -// this section, are used to capture or transmit the audio, particularly if -// background noise is present. +// a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech +// recognition can be reduced if lossy codecs, which include the other codecs +// listed in this section, are used to capture or transmit the audio, +// particularly if background noise is present. type RecognitionConfig_AudioEncoding int32 const ( - // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. + // Not specified. RecognitionConfig_ENCODING_UNSPECIFIED RecognitionConfig_AudioEncoding = 0 // Uncompressed 16-bit signed little-endian samples (Linear PCM). RecognitionConfig_LINEAR16 RecognitionConfig_AudioEncoding = 1 @@ -84,7 +93,7 @@ const ( RecognitionConfig_AMR_WB RecognitionConfig_AudioEncoding = 5 // Opus encoded audio frames in Ogg container // ([OggOpus](https://wiki.xiph.org/OggOpus)). - // `sample_rate_hertz` must be 16000. + // `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000. RecognitionConfig_OGG_OPUS RecognitionConfig_AudioEncoding = 6 // Although the use of lossy encodings is not recommended, if a very low // bitrate encoding is required, `OGG_OPUS` is highly preferred over @@ -130,215 +139,6 @@ func (RecognitionConfig_AudioEncoding) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } -// Use case categories that the audio recognition request can be described -// by. -type RecognitionMetadata_InteractionType int32 - -const ( - // Use case is either unknown or is something other than one of the other - // values below. - RecognitionMetadata_INTERACTION_TYPE_UNSPECIFIED RecognitionMetadata_InteractionType = 0 - // Multiple people in a conversation or discussion. For example in a - // meeting with two or more people actively participating. Typically - // all the primary people speaking would be in the same room (if not, - // see PHONE_CALL) - RecognitionMetadata_DISCUSSION RecognitionMetadata_InteractionType = 1 - // One or more persons lecturing or presenting to others, mostly - // uninterrupted. - RecognitionMetadata_PRESENTATION RecognitionMetadata_InteractionType = 2 - // A phone-call or video-conference in which two or more people, who are - // not in the same room, are actively participating. - RecognitionMetadata_PHONE_CALL RecognitionMetadata_InteractionType = 3 - // A recorded message intended for another person to listen to. - RecognitionMetadata_VOICEMAIL RecognitionMetadata_InteractionType = 4 - // Professionally produced audio (eg. TV Show, Podcast). - RecognitionMetadata_PROFESSIONALLY_PRODUCED RecognitionMetadata_InteractionType = 5 - // Transcribe spoken questions and queries into text. - RecognitionMetadata_VOICE_SEARCH RecognitionMetadata_InteractionType = 6 - // Transcribe voice commands, such as for controlling a device. - RecognitionMetadata_VOICE_COMMAND RecognitionMetadata_InteractionType = 7 - // Transcribe speech to text to create a written document, such as a - // text-message, email or report. - RecognitionMetadata_DICTATION RecognitionMetadata_InteractionType = 8 -) - -var RecognitionMetadata_InteractionType_name = map[int32]string{ - 0: "INTERACTION_TYPE_UNSPECIFIED", - 1: "DISCUSSION", - 2: "PRESENTATION", - 3: "PHONE_CALL", - 4: "VOICEMAIL", - 5: "PROFESSIONALLY_PRODUCED", - 6: "VOICE_SEARCH", - 7: "VOICE_COMMAND", - 8: "DICTATION", -} -var RecognitionMetadata_InteractionType_value = map[string]int32{ - "INTERACTION_TYPE_UNSPECIFIED": 0, - "DISCUSSION": 1, - "PRESENTATION": 2, - "PHONE_CALL": 3, - "VOICEMAIL": 4, - "PROFESSIONALLY_PRODUCED": 5, - "VOICE_SEARCH": 6, - "VOICE_COMMAND": 7, - "DICTATION": 8, -} - -func (x RecognitionMetadata_InteractionType) String() string { - return proto.EnumName(RecognitionMetadata_InteractionType_name, int32(x)) -} -func (RecognitionMetadata_InteractionType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{5, 0} -} - -// Enumerates the types of capture settings describing an audio file. -type RecognitionMetadata_MicrophoneDistance int32 - -const ( - // Audio type is not known. - RecognitionMetadata_MICROPHONE_DISTANCE_UNSPECIFIED RecognitionMetadata_MicrophoneDistance = 0 - // The audio was captured from a closely placed microphone. Eg. phone, - // dictaphone, or handheld microphone. Generally if there speaker is within - // 1 meter of the microphone. - RecognitionMetadata_NEARFIELD RecognitionMetadata_MicrophoneDistance = 1 - // The speaker if within 3 meters of the microphone. - RecognitionMetadata_MIDFIELD RecognitionMetadata_MicrophoneDistance = 2 - // The speaker is more than 3 meters away from the microphone. - RecognitionMetadata_FARFIELD RecognitionMetadata_MicrophoneDistance = 3 -) - -var RecognitionMetadata_MicrophoneDistance_name = map[int32]string{ - 0: "MICROPHONE_DISTANCE_UNSPECIFIED", - 1: "NEARFIELD", - 2: "MIDFIELD", - 3: "FARFIELD", -} -var RecognitionMetadata_MicrophoneDistance_value = map[string]int32{ - "MICROPHONE_DISTANCE_UNSPECIFIED": 0, - "NEARFIELD": 1, - "MIDFIELD": 2, - "FARFIELD": 3, -} - -func (x RecognitionMetadata_MicrophoneDistance) String() string { - return proto.EnumName(RecognitionMetadata_MicrophoneDistance_name, int32(x)) -} -func (RecognitionMetadata_MicrophoneDistance) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{5, 1} -} - -// The original media the speech was recorded on. -type RecognitionMetadata_OriginalMediaType int32 - -const ( - // Unknown original media type. - RecognitionMetadata_ORIGINAL_MEDIA_TYPE_UNSPECIFIED RecognitionMetadata_OriginalMediaType = 0 - // The speech data is an audio recording. - RecognitionMetadata_AUDIO RecognitionMetadata_OriginalMediaType = 1 - // The speech data originally recorded on a video. - RecognitionMetadata_VIDEO RecognitionMetadata_OriginalMediaType = 2 -) - -var RecognitionMetadata_OriginalMediaType_name = map[int32]string{ - 0: "ORIGINAL_MEDIA_TYPE_UNSPECIFIED", - 1: "AUDIO", - 2: "VIDEO", -} -var RecognitionMetadata_OriginalMediaType_value = map[string]int32{ - "ORIGINAL_MEDIA_TYPE_UNSPECIFIED": 0, - "AUDIO": 1, - "VIDEO": 2, -} - -func (x RecognitionMetadata_OriginalMediaType) String() string { - return proto.EnumName(RecognitionMetadata_OriginalMediaType_name, int32(x)) -} -func (RecognitionMetadata_OriginalMediaType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{5, 2} -} - -// How many speakers expected in the speech to be recognized. -type RecognitionMetadata_NumberOfSpeakers int32 - -const ( - // Unknown number of persons speaking. - RecognitionMetadata_NUMBER_OF_SPEAKERS_UNSPECIFIED RecognitionMetadata_NumberOfSpeakers = 0 - // Only one person is the prominent speaker (ignore background voices). - RecognitionMetadata_ONE_SPEAKER RecognitionMetadata_NumberOfSpeakers = 1 - // Two people are the prominent speakers (transcript should focus - // on the two most prominent speakers). - RecognitionMetadata_TWO_SPEAKERS RecognitionMetadata_NumberOfSpeakers = 2 - // Transcribe all voices. - RecognitionMetadata_MULTIPLE_SPEAKERS RecognitionMetadata_NumberOfSpeakers = 3 -) - -var RecognitionMetadata_NumberOfSpeakers_name = map[int32]string{ - 0: "NUMBER_OF_SPEAKERS_UNSPECIFIED", - 1: "ONE_SPEAKER", - 2: "TWO_SPEAKERS", - 3: "MULTIPLE_SPEAKERS", -} -var RecognitionMetadata_NumberOfSpeakers_value = map[string]int32{ - "NUMBER_OF_SPEAKERS_UNSPECIFIED": 0, - "ONE_SPEAKER": 1, - "TWO_SPEAKERS": 2, - "MULTIPLE_SPEAKERS": 3, -} - -func (x RecognitionMetadata_NumberOfSpeakers) String() string { - return proto.EnumName(RecognitionMetadata_NumberOfSpeakers_name, int32(x)) -} -func (RecognitionMetadata_NumberOfSpeakers) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{5, 3} -} - -// The type of device the speech was recorded with. -type RecognitionMetadata_RecordingDeviceType int32 - -const ( - // The recording device is unknown. - RecognitionMetadata_RECORDING_DEVICE_TYPE_UNSPECIFIED RecognitionMetadata_RecordingDeviceType = 0 - // Speech was recorded on a smartphone. - RecognitionMetadata_SMARTPHONE RecognitionMetadata_RecordingDeviceType = 1 - // Speech was recorded using a personal computer or tablet. - RecognitionMetadata_PC RecognitionMetadata_RecordingDeviceType = 2 - // Speech was recorded over a phone line. - RecognitionMetadata_PHONE_LINE RecognitionMetadata_RecordingDeviceType = 3 - // Speech was recorded in a vehicle. - RecognitionMetadata_VEHICLE RecognitionMetadata_RecordingDeviceType = 4 - // Speech was recorded outdoors. - RecognitionMetadata_OTHER_OUTDOOR_DEVICE RecognitionMetadata_RecordingDeviceType = 5 - // Speech was recorded indoors. - RecognitionMetadata_OTHER_INDOOR_DEVICE RecognitionMetadata_RecordingDeviceType = 6 -) - -var RecognitionMetadata_RecordingDeviceType_name = map[int32]string{ - 0: "RECORDING_DEVICE_TYPE_UNSPECIFIED", - 1: "SMARTPHONE", - 2: "PC", - 3: "PHONE_LINE", - 4: "VEHICLE", - 5: "OTHER_OUTDOOR_DEVICE", - 6: "OTHER_INDOOR_DEVICE", -} -var RecognitionMetadata_RecordingDeviceType_value = map[string]int32{ - "RECORDING_DEVICE_TYPE_UNSPECIFIED": 0, - "SMARTPHONE": 1, - "PC": 2, - "PHONE_LINE": 3, - "VEHICLE": 4, - "OTHER_OUTDOOR_DEVICE": 5, - "OTHER_INDOOR_DEVICE": 6, -} - -func (x RecognitionMetadata_RecordingDeviceType) String() string { - return proto.EnumName(RecognitionMetadata_RecordingDeviceType_name, int32(x)) -} -func (RecognitionMetadata_RecordingDeviceType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{5, 4} -} - // Indicates the type of speech event. type StreamingRecognizeResponse_SpeechEventType int32 @@ -368,7 +168,7 @@ func (x StreamingRecognizeResponse_SpeechEventType) String() string { return proto.EnumName(StreamingRecognizeResponse_SpeechEventType_name, int32(x)) } func (StreamingRecognizeResponse_SpeechEventType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{11, 0} + return fileDescriptor0, []int{10, 0} } // The top-level message sent by the client for the `Recognize` method. @@ -639,16 +439,11 @@ type RecognitionConfig struct { // `false`, no word-level time offset information is returned. The default is // `false`. EnableWordTimeOffsets bool `protobuf:"varint,8,opt,name=enable_word_time_offsets,json=enableWordTimeOffsets" json:"enable_word_time_offsets,omitempty"` - // *Optional* If 'true', adds punctuation to recognition result hypotheses. - // This feature is only available in select languages. Setting this for - // requests in other languages has no effect at all. - // The default 'false' value does not add punctuation to result hypotheses. - // NOTE: "This is currently offered as an experimental service, complimentary - // to all users. In the future this may be exclusively available as a - // premium feature." - EnableAutomaticPunctuation bool `protobuf:"varint,11,opt,name=enable_automatic_punctuation,json=enableAutomaticPunctuation" json:"enable_automatic_punctuation,omitempty"` - // *Optional* Metadata regarding this request. - Metadata *RecognitionMetadata `protobuf:"bytes,9,opt,name=metadata" json:"metadata,omitempty"` + // *Optional* Which model to select for the given request. Select the model + // best suited to your domain to get best results. If a model is not + // explicitly specified, then we auto-select a model based on the parameters + // in the RecognitionConfig. + Model string `protobuf:"bytes,13,opt,name=model" json:"model,omitempty"` } func (m *RecognitionConfig) Reset() { *m = RecognitionConfig{} } @@ -705,126 +500,9 @@ func (m *RecognitionConfig) GetEnableWordTimeOffsets() bool { return false } -func (m *RecognitionConfig) GetEnableAutomaticPunctuation() bool { +func (m *RecognitionConfig) GetModel() string { if m != nil { - return m.EnableAutomaticPunctuation - } - return false -} - -func (m *RecognitionConfig) GetMetadata() *RecognitionMetadata { - if m != nil { - return m.Metadata - } - return nil -} - -// Description of audio data to be recognized. -type RecognitionMetadata struct { - // The use case most closely describing the audio content to be recognized. - InteractionType RecognitionMetadata_InteractionType `protobuf:"varint,1,opt,name=interaction_type,json=interactionType,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_InteractionType" json:"interaction_type,omitempty"` - // The industry vertical to which this speech recognition request most - // closely applies. This is most indicative of the topics contained - // in the audio. Use the 6-digit NAICS code to identify the industry - // vertical - see https://www.naics.com/search/. - IndustryNaicsCodeOfAudio uint32 `protobuf:"varint,3,opt,name=industry_naics_code_of_audio,json=industryNaicsCodeOfAudio" json:"industry_naics_code_of_audio,omitempty"` - // The audio type that most closely describes the audio being recognized. - MicrophoneDistance RecognitionMetadata_MicrophoneDistance `protobuf:"varint,4,opt,name=microphone_distance,json=microphoneDistance,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_MicrophoneDistance" json:"microphone_distance,omitempty"` - // The original media the speech was recorded on. - OriginalMediaType RecognitionMetadata_OriginalMediaType `protobuf:"varint,5,opt,name=original_media_type,json=originalMediaType,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_OriginalMediaType" json:"original_media_type,omitempty"` - // How many people are speaking prominently in the audio and expected to be - // recognized. - NumberOfSpeakers RecognitionMetadata_NumberOfSpeakers `protobuf:"varint,6,opt,name=number_of_speakers,json=numberOfSpeakers,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_NumberOfSpeakers" json:"number_of_speakers,omitempty"` - // The type of device the speech was recorded with. - RecordingDeviceType RecognitionMetadata_RecordingDeviceType `protobuf:"varint,7,opt,name=recording_device_type,json=recordingDeviceType,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_RecordingDeviceType" json:"recording_device_type,omitempty"` - // The device used to make the recording. Examples 'Nexus 5X' or - // 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or - // 'Cardioid Microphone'. - RecordingDeviceName string `protobuf:"bytes,8,opt,name=recording_device_name,json=recordingDeviceName" json:"recording_device_name,omitempty"` - // Mime type of the original audio file. For example `audio/m4a`, - // `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`. - // A list of possible audio mime types is maintained at - // http://www.iana.org/assignments/media-types/media-types.xhtml#audio - OriginalMimeType string `protobuf:"bytes,9,opt,name=original_mime_type,json=originalMimeType" json:"original_mime_type,omitempty"` - // Obfuscated (privacy-protected) ID of the user, to identify number of - // unique users using the service. - ObfuscatedId int64 `protobuf:"varint,10,opt,name=obfuscated_id,json=obfuscatedId" json:"obfuscated_id,omitempty"` - // Description of the content. Eg. "Recordings of federal supreme court - // hearings from 2012". - AudioTopic string `protobuf:"bytes,11,opt,name=audio_topic,json=audioTopic" json:"audio_topic,omitempty"` -} - -func (m *RecognitionMetadata) Reset() { *m = RecognitionMetadata{} } -func (m *RecognitionMetadata) String() string { return proto.CompactTextString(m) } -func (*RecognitionMetadata) ProtoMessage() {} -func (*RecognitionMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } - -func (m *RecognitionMetadata) GetInteractionType() RecognitionMetadata_InteractionType { - if m != nil { - return m.InteractionType - } - return RecognitionMetadata_INTERACTION_TYPE_UNSPECIFIED -} - -func (m *RecognitionMetadata) GetIndustryNaicsCodeOfAudio() uint32 { - if m != nil { - return m.IndustryNaicsCodeOfAudio - } - return 0 -} - -func (m *RecognitionMetadata) GetMicrophoneDistance() RecognitionMetadata_MicrophoneDistance { - if m != nil { - return m.MicrophoneDistance - } - return RecognitionMetadata_MICROPHONE_DISTANCE_UNSPECIFIED -} - -func (m *RecognitionMetadata) GetOriginalMediaType() RecognitionMetadata_OriginalMediaType { - if m != nil { - return m.OriginalMediaType - } - return RecognitionMetadata_ORIGINAL_MEDIA_TYPE_UNSPECIFIED -} - -func (m *RecognitionMetadata) GetNumberOfSpeakers() RecognitionMetadata_NumberOfSpeakers { - if m != nil { - return m.NumberOfSpeakers - } - return RecognitionMetadata_NUMBER_OF_SPEAKERS_UNSPECIFIED -} - -func (m *RecognitionMetadata) GetRecordingDeviceType() RecognitionMetadata_RecordingDeviceType { - if m != nil { - return m.RecordingDeviceType - } - return RecognitionMetadata_RECORDING_DEVICE_TYPE_UNSPECIFIED -} - -func (m *RecognitionMetadata) GetRecordingDeviceName() string { - if m != nil { - return m.RecordingDeviceName - } - return "" -} - -func (m *RecognitionMetadata) GetOriginalMimeType() string { - if m != nil { - return m.OriginalMimeType - } - return "" -} - -func (m *RecognitionMetadata) GetObfuscatedId() int64 { - if m != nil { - return m.ObfuscatedId - } - return 0 -} - -func (m *RecognitionMetadata) GetAudioTopic() string { - if m != nil { - return m.AudioTopic + return m.Model } return "" } @@ -844,7 +522,7 @@ type SpeechContext struct { func (m *SpeechContext) Reset() { *m = SpeechContext{} } func (m *SpeechContext) String() string { return proto.CompactTextString(m) } func (*SpeechContext) ProtoMessage() {} -func (*SpeechContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (*SpeechContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } func (m *SpeechContext) GetPhrases() []string { if m != nil { @@ -858,7 +536,8 @@ func (m *SpeechContext) GetPhrases() []string { // returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. See // [audio limits](https://cloud.google.com/speech/limits#content). type RecognitionAudio struct { - // The audio source, which is either inline content or a GCS uri. + // The audio source, which is either inline content or a Google Cloud + // Storage uri. // // Types that are valid to be assigned to AudioSource: // *RecognitionAudio_Content @@ -869,7 +548,7 @@ type RecognitionAudio struct { func (m *RecognitionAudio) Reset() { *m = RecognitionAudio{} } func (m *RecognitionAudio) String() string { return proto.CompactTextString(m) } func (*RecognitionAudio) ProtoMessage() {} -func (*RecognitionAudio) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*RecognitionAudio) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } type isRecognitionAudio_AudioSource interface { isRecognitionAudio_AudioSource() @@ -984,7 +663,7 @@ type RecognizeResponse struct { func (m *RecognizeResponse) Reset() { *m = RecognizeResponse{} } func (m *RecognizeResponse) String() string { return proto.CompactTextString(m) } func (*RecognizeResponse) ProtoMessage() {} -func (*RecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*RecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *RecognizeResponse) GetResults() []*SpeechRecognitionResult { if m != nil { @@ -1007,7 +686,7 @@ type LongRunningRecognizeResponse struct { func (m *LongRunningRecognizeResponse) Reset() { *m = LongRunningRecognizeResponse{} } func (m *LongRunningRecognizeResponse) String() string { return proto.CompactTextString(m) } func (*LongRunningRecognizeResponse) ProtoMessage() {} -func (*LongRunningRecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (*LongRunningRecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (m *LongRunningRecognizeResponse) GetResults() []*SpeechRecognitionResult { if m != nil { @@ -1032,7 +711,7 @@ type LongRunningRecognizeMetadata struct { func (m *LongRunningRecognizeMetadata) Reset() { *m = LongRunningRecognizeMetadata{} } func (m *LongRunningRecognizeMetadata) String() string { return proto.CompactTextString(m) } func (*LongRunningRecognizeMetadata) ProtoMessage() {} -func (*LongRunningRecognizeMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (*LongRunningRecognizeMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } func (m *LongRunningRecognizeMetadata) GetProgressPercent() int32 { if m != nil { @@ -1110,8 +789,8 @@ type StreamingRecognizeResponse struct { Error *google_rpc.Status `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` // *Output-only* This repeated list contains zero or more results that // correspond to consecutive portions of the audio currently being processed. - // It contains zero or more `is_final=false` results followed by zero or one - // `is_final=true` result (the newly settled portion). + // It contains zero or one `is_final=true` result (the newly settled portion), + // followed by zero or more `is_final=false` results (the interim results). Results []*StreamingRecognitionResult `protobuf:"bytes,2,rep,name=results" json:"results,omitempty"` // *Output-only* Indicates the type of speech event. SpeechEventType StreamingRecognizeResponse_SpeechEventType `protobuf:"varint,4,opt,name=speech_event_type,json=speechEventType,enum=google.cloud.speech.v1p1beta1.StreamingRecognizeResponse_SpeechEventType" json:"speech_event_type,omitempty"` @@ -1120,7 +799,7 @@ type StreamingRecognizeResponse struct { func (m *StreamingRecognizeResponse) Reset() { *m = StreamingRecognizeResponse{} } func (m *StreamingRecognizeResponse) String() string { return proto.CompactTextString(m) } func (*StreamingRecognizeResponse) ProtoMessage() {} -func (*StreamingRecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (*StreamingRecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } func (m *StreamingRecognizeResponse) GetError() *google_rpc.Status { if m != nil { @@ -1148,6 +827,8 @@ func (m *StreamingRecognizeResponse) GetSpeechEventType() StreamingRecognizeResp type StreamingRecognitionResult struct { // *Output-only* May contain one or more recognition hypotheses (up to the // maximum specified in `max_alternatives`). + // These alternatives are ordered in terms of accuracy, with the top (first) + // alternative being the most probable, as ranked by the recognizer. Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives" json:"alternatives,omitempty"` // *Output-only* If `false`, this `StreamingRecognitionResult` represents an // interim result that may change. If `true`, this is the final time the @@ -1166,7 +847,7 @@ type StreamingRecognitionResult struct { func (m *StreamingRecognitionResult) Reset() { *m = StreamingRecognitionResult{} } func (m *StreamingRecognitionResult) String() string { return proto.CompactTextString(m) } func (*StreamingRecognitionResult) ProtoMessage() {} -func (*StreamingRecognitionResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (*StreamingRecognitionResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } func (m *StreamingRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative { if m != nil { @@ -1201,7 +882,7 @@ type SpeechRecognitionResult struct { func (m *SpeechRecognitionResult) Reset() { *m = SpeechRecognitionResult{} } func (m *SpeechRecognitionResult) String() string { return proto.CompactTextString(m) } func (*SpeechRecognitionResult) ProtoMessage() {} -func (*SpeechRecognitionResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (*SpeechRecognitionResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } func (m *SpeechRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative { if m != nil { @@ -1216,10 +897,10 @@ type SpeechRecognitionAlternative struct { Transcript string `protobuf:"bytes,1,opt,name=transcript" json:"transcript,omitempty"` // *Output-only* The confidence estimate between 0.0 and 1.0. A higher number // indicates an estimated greater likelihood that the recognized words are - // correct. This field is typically provided only for the top hypothesis, and - // only for `is_final=true` results. Clients should not rely on the - // `confidence` field as it is not guaranteed to be accurate, or even set, in - // any of the results. + // correct. This field is set only for the top alternative of a non-streaming + // result or, of a streaming result where `is_final=true`. + // This field is not guaranteed to be accurate and users should not rely on it + // to be always provided. // The default of 0.0 is a sentinel value indicating `confidence` was not set. Confidence float32 `protobuf:"fixed32,2,opt,name=confidence" json:"confidence,omitempty"` // *Output-only* A list of word-specific information for each recognized word. @@ -1229,7 +910,7 @@ type SpeechRecognitionAlternative struct { func (m *SpeechRecognitionAlternative) Reset() { *m = SpeechRecognitionAlternative{} } func (m *SpeechRecognitionAlternative) String() string { return proto.CompactTextString(m) } func (*SpeechRecognitionAlternative) ProtoMessage() {} -func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } func (m *SpeechRecognitionAlternative) GetTranscript() string { if m != nil { @@ -1252,9 +933,7 @@ func (m *SpeechRecognitionAlternative) GetWords() []*WordInfo { return nil } -// Word-specific information for recognized words. Word information is only -// included in the response when certain request parameters are set, such -// as `enable_word_time_offsets`. +// Word-specific information for recognized words. type WordInfo struct { // *Output-only* Time offset relative to the beginning of the audio, // and corresponding to the start of the spoken word. @@ -1277,7 +956,7 @@ type WordInfo struct { func (m *WordInfo) Reset() { *m = WordInfo{} } func (m *WordInfo) String() string { return proto.CompactTextString(m) } func (*WordInfo) ProtoMessage() {} -func (*WordInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (*WordInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } func (m *WordInfo) GetStartTime() *google_protobuf3.Duration { if m != nil { @@ -1306,7 +985,6 @@ func init() { proto.RegisterType((*StreamingRecognizeRequest)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognizeRequest") proto.RegisterType((*StreamingRecognitionConfig)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognitionConfig") proto.RegisterType((*RecognitionConfig)(nil), "google.cloud.speech.v1p1beta1.RecognitionConfig") - proto.RegisterType((*RecognitionMetadata)(nil), "google.cloud.speech.v1p1beta1.RecognitionMetadata") proto.RegisterType((*SpeechContext)(nil), "google.cloud.speech.v1p1beta1.SpeechContext") proto.RegisterType((*RecognitionAudio)(nil), "google.cloud.speech.v1p1beta1.RecognitionAudio") proto.RegisterType((*RecognizeResponse)(nil), "google.cloud.speech.v1p1beta1.RecognizeResponse") @@ -1318,11 +996,6 @@ func init() { proto.RegisterType((*SpeechRecognitionAlternative)(nil), "google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative") proto.RegisterType((*WordInfo)(nil), "google.cloud.speech.v1p1beta1.WordInfo") proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionConfig_AudioEncoding", RecognitionConfig_AudioEncoding_name, RecognitionConfig_AudioEncoding_value) - proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_InteractionType", RecognitionMetadata_InteractionType_name, RecognitionMetadata_InteractionType_value) - proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_MicrophoneDistance", RecognitionMetadata_MicrophoneDistance_name, RecognitionMetadata_MicrophoneDistance_value) - proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_OriginalMediaType", RecognitionMetadata_OriginalMediaType_name, RecognitionMetadata_OriginalMediaType_value) - proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_NumberOfSpeakers", RecognitionMetadata_NumberOfSpeakers_name, RecognitionMetadata_NumberOfSpeakers_value) - proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_RecordingDeviceType", RecognitionMetadata_RecordingDeviceType_name, RecognitionMetadata_RecordingDeviceType_value) proto.RegisterEnum("google.cloud.speech.v1p1beta1.StreamingRecognizeResponse_SpeechEventType", StreamingRecognizeResponse_SpeechEventType_name, StreamingRecognizeResponse_SpeechEventType_value) } @@ -1510,137 +1183,95 @@ var _Speech_serviceDesc = grpc.ServiceDesc{ ClientStreams: true, }, }, - Metadata: "google/cloud/speech/v1_1beta1/cloud_speech.proto", + Metadata: "google/cloud/speech/v1p1beta1/cloud_speech.proto", } -func init() { proto.RegisterFile("google/cloud/speech/v1_1beta1/cloud_speech.proto", fileDescriptor0) } +func init() { proto.RegisterFile("google/cloud/speech/v1p1beta1/cloud_speech.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 2005 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xbf, 0x73, 0xdb, 0xc8, - 0xf5, 0x37, 0x48, 0x51, 0x12, 0x9f, 0x7e, 0x41, 0xab, 0xf3, 0xd7, 0xb4, 0xac, 0xb3, 0x75, 0xf0, - 0xdc, 0x9d, 0xee, 0xbe, 0x37, 0xa4, 0xa5, 0x64, 0x2e, 0x67, 0xdf, 0xe4, 0x26, 0x10, 0x00, 0x99, - 0x98, 0x90, 0x00, 0x67, 0x49, 0xda, 0xf1, 0x35, 0x3b, 0x10, 0xb8, 0xa4, 0x31, 0x21, 0x01, 0x1c, - 0xb0, 0x50, 0x6c, 0x97, 0x69, 0x33, 0x49, 0x93, 0x99, 0x74, 0xa9, 0x72, 0x75, 0xca, 0x14, 0x69, - 0x52, 0x25, 0x45, 0xda, 0x34, 0x29, 0xaf, 0xc8, 0x1f, 0x91, 0x32, 0xb3, 0xbb, 0x00, 0x45, 0x91, - 0xb2, 0x2d, 0x6b, 0x72, 0x33, 0xe9, 0x88, 0xcf, 0xbe, 0xf7, 0x79, 0x6f, 0xdf, 0xbe, 0x7d, 0x7c, - 0x6f, 0xe1, 0xc1, 0x28, 0x8a, 0x46, 0x63, 0xda, 0xf0, 0xc7, 0x51, 0x36, 0x68, 0xa4, 0x31, 0xa5, - 0xfe, 0xf3, 0xc6, 0xd9, 0x21, 0x39, 0x3c, 0xa5, 0xcc, 0x3b, 0x94, 0x30, 0x91, 0x70, 0x3d, 0x4e, - 0x22, 0x16, 0xa1, 0xf7, 0xa5, 0x46, 0x5d, 0x2c, 0xd5, 0xf3, 0xa5, 0xb3, 0xc3, 0x58, 0x6a, 0xec, - 0xee, 0xe5, 0x84, 0x5e, 0x1c, 0x34, 0xbc, 0x30, 0x8c, 0x98, 0xc7, 0x82, 0x28, 0x4c, 0xa5, 0xf2, - 0xee, 0xfd, 0x7c, 0x75, 0x1c, 0x85, 0xa3, 0x24, 0x0b, 0xc3, 0x20, 0x1c, 0x35, 0xa2, 0x98, 0x26, - 0x17, 0x84, 0xee, 0xe6, 0x42, 0xe2, 0xeb, 0x34, 0x1b, 0x36, 0x06, 0x99, 0x14, 0xc8, 0xd7, 0xef, - 0xcd, 0xaf, 0xb3, 0x60, 0x42, 0x53, 0xe6, 0x4d, 0xe2, 0x5c, 0xe0, 0x56, 0x2e, 0x90, 0xc4, 0x7e, - 0x23, 0x65, 0x1e, 0xcb, 0x72, 0x66, 0xed, 0x0f, 0x0a, 0xa8, 0x98, 0xfa, 0xd1, 0x28, 0x0c, 0x5e, - 0x51, 0x4c, 0xbf, 0xc9, 0x68, 0xca, 0x50, 0x13, 0x96, 0xfd, 0x28, 0x1c, 0x06, 0xa3, 0x9a, 0xb2, - 0xaf, 0x1c, 0xac, 0x1d, 0x3d, 0xa8, 0xbf, 0x71, 0x87, 0xf5, 0x9c, 0x80, 0x3b, 0x64, 0x08, 0x3d, - 0x9c, 0xeb, 0x23, 0x0b, 0x2a, 0x5e, 0x36, 0x08, 0xa2, 0x5a, 0x49, 0x10, 0x35, 0xae, 0x4e, 0xa4, - 0x73, 0x35, 0x2c, 0xb5, 0xb5, 0x3f, 0x2a, 0x70, 0xa7, 0x15, 0x85, 0x23, 0x2c, 0x03, 0xf4, 0xbf, - 0xef, 0xf0, 0x5f, 0x14, 0xb8, 0xdd, 0x65, 0x09, 0xf5, 0x26, 0x97, 0xb9, 0x3b, 0x04, 0x35, 0x2d, - 0x16, 0xc9, 0x05, 0xc7, 0x1f, 0xbe, 0xc5, 0xde, 0x3c, 0xe7, 0xf9, 0x0e, 0x9a, 0x37, 0xf0, 0xd6, - 0x94, 0x54, 0x42, 0xe8, 0x43, 0xd8, 0x10, 0xee, 0x70, 0x1b, 0x8c, 0x86, 0x4c, 0x6c, 0x6a, 0xbd, - 0x79, 0x03, 0xaf, 0x0b, 0xd8, 0x90, 0xe8, 0xf1, 0x0e, 0x6c, 0x9f, 0xbb, 0x93, 0x48, 0x1f, 0xb5, - 0x3f, 0x2b, 0xb0, 0xfb, 0x7a, 0x6b, 0xff, 0xc5, 0x88, 0x7f, 0x02, 0x6a, 0x1a, 0x84, 0xa3, 0x31, - 0x25, 0x19, 0x63, 0x34, 0xf1, 0x42, 0x9f, 0x0a, 0x3f, 0x57, 0xf1, 0x96, 0xc4, 0xfb, 0x05, 0x8c, - 0x3e, 0x86, 0xad, 0x20, 0x64, 0x34, 0x09, 0x26, 0x24, 0xa1, 0x69, 0x36, 0x66, 0x69, 0xad, 0x2c, - 0x24, 0x37, 0x73, 0x18, 0x4b, 0x54, 0xfb, 0x5b, 0x05, 0xb6, 0x17, 0x7d, 0xfe, 0x1a, 0x56, 0x69, - 0xe8, 0x47, 0x83, 0x20, 0x94, 0x5e, 0x6f, 0x1e, 0x7d, 0xf5, 0xae, 0x5e, 0xd7, 0xc5, 0x29, 0x5b, - 0x39, 0x0b, 0x9e, 0xf2, 0xa1, 0x4f, 0x61, 0x3b, 0xf5, 0x26, 0xf1, 0x98, 0x92, 0xc4, 0x63, 0x94, - 0x3c, 0xa7, 0x09, 0x7b, 0x25, 0xb6, 0x51, 0xc1, 0x5b, 0x72, 0x01, 0x7b, 0x8c, 0x36, 0x39, 0x8c, - 0xee, 0xc3, 0xc6, 0xd8, 0x0b, 0x47, 0x99, 0x37, 0xa2, 0xc4, 0x8f, 0x06, 0x54, 0x6c, 0xa2, 0x8a, - 0xd7, 0x0b, 0xd0, 0x88, 0x06, 0x94, 0x87, 0x65, 0xe2, 0xbd, 0x20, 0xde, 0x98, 0xd1, 0x24, 0xf4, - 0x58, 0x70, 0x46, 0xd3, 0xda, 0x92, 0xe4, 0x9b, 0x78, 0x2f, 0xf4, 0x19, 0x98, 0x8b, 0xc6, 0x49, - 0x34, 0xf4, 0xc2, 0x80, 0xbd, 0x24, 0xc3, 0x80, 0x2f, 0xd5, 0x2a, 0x32, 0x82, 0x53, 0xfc, 0x44, - 0xc0, 0xa8, 0x0f, 0x5b, 0x72, 0x93, 0x32, 0x25, 0x5e, 0xb0, 0xb4, 0xb6, 0xbc, 0x5f, 0x3e, 0x58, - 0x3b, 0xfa, 0xec, 0x6d, 0x89, 0x27, 0x00, 0x43, 0x2a, 0xe1, 0xcd, 0x74, 0xf6, 0x33, 0x45, 0x3f, - 0x82, 0x1a, 0x0d, 0xbd, 0xd3, 0x31, 0x25, 0xbf, 0x88, 0x92, 0x01, 0xe1, 0xd5, 0x87, 0x44, 0xc3, - 0x61, 0x4a, 0x59, 0x5a, 0x5b, 0x15, 0x9e, 0xdc, 0x94, 0xeb, 0x4f, 0xa3, 0x64, 0xd0, 0x0b, 0x26, - 0xd4, 0x95, 0x8b, 0xe8, 0x27, 0xb0, 0x97, 0x2b, 0x7a, 0x19, 0x8b, 0x26, 0x1e, 0x0b, 0x7c, 0x12, - 0x67, 0xa1, 0xcf, 0x32, 0x51, 0xde, 0x6a, 0x6b, 0x42, 0x79, 0x57, 0xca, 0xe8, 0x85, 0x48, 0xe7, - 0x5c, 0x02, 0x39, 0xb0, 0x3a, 0xa1, 0xcc, 0x1b, 0x78, 0xcc, 0xab, 0x55, 0x45, 0x2a, 0x1e, 0x5d, - 0xfd, 0x50, 0xdb, 0xb9, 0x26, 0x9e, 0x72, 0x68, 0xbf, 0x52, 0x60, 0xe3, 0xc2, 0x21, 0xa3, 0x1a, - 0xbc, 0x67, 0x39, 0x86, 0x6b, 0xda, 0xce, 0x63, 0xd2, 0x77, 0xba, 0x1d, 0xcb, 0xb0, 0x4f, 0x6c, - 0xcb, 0x54, 0x6f, 0xa0, 0x75, 0x58, 0x6d, 0xd9, 0x8e, 0xa5, 0xe3, 0xc3, 0xcf, 0x55, 0x05, 0xad, - 0xc2, 0xd2, 0x49, 0x4b, 0x37, 0xd4, 0x12, 0xaa, 0x42, 0xa5, 0xdd, 0x6f, 0xe9, 0x4f, 0xd5, 0x32, - 0x5a, 0x81, 0xb2, 0xde, 0xc6, 0xea, 0x12, 0x02, 0x58, 0xd6, 0xdb, 0x98, 0x3c, 0x3d, 0x56, 0x2b, - 0x5c, 0xcf, 0x7d, 0xfc, 0x98, 0xb8, 0x9d, 0x7e, 0x57, 0x5d, 0x46, 0xbb, 0xf0, 0x7f, 0xdd, 0x8e, - 0x65, 0xfd, 0x8c, 0x3c, 0xb5, 0x7b, 0x4d, 0xd2, 0xb4, 0x74, 0xd3, 0xc2, 0xe4, 0xf8, 0x59, 0xcf, - 0x52, 0x57, 0xb4, 0xef, 0xd6, 0x60, 0xe7, 0x12, 0x7f, 0xd1, 0x04, 0x54, 0x91, 0xf2, 0x9e, 0xcf, - 0x61, 0xc2, 0x5e, 0xc6, 0x34, 0x4f, 0xe9, 0xe3, 0x77, 0xdf, 0x7d, 0xdd, 0x3e, 0xa7, 0xea, 0xbd, - 0x8c, 0x29, 0xde, 0x0a, 0x2e, 0x02, 0xe8, 0x2b, 0xd8, 0x0b, 0xc2, 0x41, 0x96, 0xb2, 0xe4, 0x25, - 0x09, 0xbd, 0xc0, 0x4f, 0x45, 0xde, 0x92, 0x68, 0x48, 0x64, 0xb1, 0xe4, 0x09, 0xbc, 0x81, 0x6b, - 0x85, 0x8c, 0xc3, 0x45, 0x78, 0x16, 0xbb, 0x43, 0x11, 0x4a, 0x74, 0x06, 0x3b, 0x93, 0xc0, 0x4f, - 0xa2, 0xf8, 0x79, 0x14, 0x52, 0x32, 0x08, 0x52, 0x26, 0xae, 0xf9, 0x92, 0xf0, 0xd8, 0xba, 0x86, - 0xc7, 0xed, 0x29, 0x9b, 0x99, 0x93, 0x61, 0x34, 0x59, 0xc0, 0x10, 0x83, 0x9d, 0x28, 0x09, 0x46, - 0x41, 0xe8, 0x8d, 0xc9, 0x84, 0x0e, 0x02, 0x4f, 0x46, 0xaa, 0x22, 0xec, 0x9a, 0xd7, 0xb0, 0xeb, - 0xe6, 0x6c, 0x6d, 0x4e, 0x26, 0x62, 0xb5, 0x1d, 0xcd, 0x43, 0xe8, 0x1b, 0x40, 0x61, 0x36, 0x39, - 0xa5, 0x09, 0x0f, 0x50, 0x1a, 0x53, 0xef, 0xe7, 0x34, 0xe1, 0xf7, 0x8c, 0x1b, 0x35, 0xae, 0x61, - 0xd4, 0x11, 0x64, 0xee, 0xb0, 0x9b, 0x53, 0x61, 0x35, 0x9c, 0x43, 0xd0, 0x2b, 0xb8, 0x99, 0x50, - 0x3f, 0x4a, 0x78, 0xc2, 0x92, 0x01, 0x3d, 0x0b, 0x7c, 0x2a, 0xb7, 0xba, 0x22, 0xac, 0x9e, 0x5c, - 0xc3, 0x2a, 0x2e, 0xf8, 0x4c, 0x41, 0x27, 0x36, 0xbb, 0x93, 0x2c, 0x82, 0xe8, 0xe8, 0x12, 0xdb, - 0xa1, 0x37, 0xa1, 0xe2, 0xe6, 0x57, 0x17, 0x74, 0x1c, 0x6f, 0x42, 0xd1, 0x67, 0x80, 0xce, 0x0f, - 0x86, 0x57, 0x0b, 0xe1, 0x6c, 0x55, 0x28, 0xa8, 0xd3, 0x88, 0x06, 0x13, 0x69, 0xe1, 0x3e, 0x6c, - 0x44, 0xa7, 0xc3, 0x2c, 0xf5, 0x3d, 0x46, 0x07, 0x24, 0x18, 0xd4, 0x60, 0x5f, 0x39, 0x28, 0xe3, - 0xf5, 0x73, 0xd0, 0x1e, 0xa0, 0x7b, 0xb0, 0x26, 0xff, 0xec, 0x58, 0x14, 0x07, 0xbe, 0xa8, 0x1c, - 0x55, 0x0c, 0x02, 0xea, 0x71, 0x44, 0xfb, 0xab, 0x02, 0x5b, 0x73, 0x99, 0x8e, 0xf6, 0x61, 0xcf, - 0x76, 0x7a, 0x16, 0xd6, 0x8d, 0x9e, 0xed, 0x3a, 0xa4, 0xf7, 0xac, 0x63, 0xcd, 0xdd, 0xf1, 0x4d, - 0x00, 0xd3, 0xee, 0x1a, 0xfd, 0x6e, 0xd7, 0x76, 0x1d, 0x55, 0x41, 0x2a, 0xac, 0x77, 0xb0, 0xd5, - 0xb5, 0x9c, 0x9e, 0xce, 0x55, 0xd4, 0x12, 0x97, 0xe8, 0x34, 0x5d, 0xc7, 0x22, 0x86, 0xde, 0x6a, - 0xa9, 0x65, 0xb4, 0x01, 0xd5, 0x27, 0xae, 0x6d, 0x58, 0x6d, 0xdd, 0x6e, 0xa9, 0x4b, 0xe8, 0x0e, - 0xdc, 0xea, 0x60, 0xf7, 0xc4, 0x12, 0x04, 0x7a, 0xab, 0xf5, 0x8c, 0x74, 0xb0, 0x6b, 0xf6, 0x0d, - 0xcb, 0x54, 0x2b, 0x9c, 0x4d, 0xc8, 0x92, 0xae, 0xa5, 0x63, 0xa3, 0xa9, 0x2e, 0xa3, 0x6d, 0xd8, - 0x90, 0x88, 0xe1, 0xb6, 0xdb, 0xba, 0x63, 0xaa, 0x2b, 0x9c, 0xd0, 0xb4, 0x8d, 0xdc, 0xde, 0xaa, - 0x36, 0x00, 0xb4, 0x98, 0xfe, 0xe8, 0x3e, 0xdc, 0x6b, 0xdb, 0x06, 0x76, 0xa5, 0x2b, 0xa6, 0xdd, - 0xed, 0xe9, 0x8e, 0x31, 0xbf, 0x99, 0x0d, 0xa8, 0xf2, 0x72, 0x75, 0x62, 0x5b, 0x2d, 0x53, 0x55, - 0x78, 0x1d, 0x6a, 0xdb, 0xa6, 0xfc, 0x2a, 0xf1, 0xaf, 0x93, 0x62, 0xad, 0xac, 0x39, 0xb0, 0xbd, - 0x90, 0xec, 0xdc, 0x88, 0x8b, 0xed, 0xc7, 0xb6, 0xa3, 0xb7, 0x48, 0xdb, 0x32, 0x6d, 0xfd, 0xb2, - 0x88, 0x55, 0xa1, 0xa2, 0xf7, 0x4d, 0xdb, 0x55, 0x15, 0xfe, 0xf3, 0x89, 0x6d, 0x5a, 0xae, 0x5a, - 0xd2, 0x62, 0x50, 0xe7, 0xf3, 0x18, 0x69, 0x70, 0xd7, 0xe9, 0xb7, 0x8f, 0x2d, 0x4c, 0xdc, 0x13, - 0xd2, 0xed, 0x58, 0xfa, 0x4f, 0x2d, 0xdc, 0x9d, 0x63, 0xdb, 0x82, 0x35, 0xbe, 0xa1, 0x7c, 0x55, - 0x1e, 0x40, 0xef, 0xa9, 0x3b, 0x15, 0x57, 0x4b, 0xe8, 0x26, 0x6c, 0xb7, 0xfb, 0xad, 0x9e, 0xdd, - 0x69, 0x59, 0xe7, 0x70, 0x59, 0xfb, 0x56, 0x91, 0xb5, 0x73, 0x3e, 0x5f, 0x3f, 0x84, 0x0f, 0xb0, - 0x65, 0xb8, 0x58, 0x14, 0x74, 0xd3, 0x7a, 0xc2, 0x83, 0x7d, 0xf9, 0xc1, 0x77, 0xdb, 0x3a, 0xee, - 0x89, 0x80, 0xaa, 0x0a, 0x5a, 0x86, 0x52, 0xc7, 0x98, 0x3d, 0x6e, 0x5e, 0xfa, 0xd5, 0x32, 0x5a, - 0x83, 0x95, 0x27, 0x56, 0xd3, 0x36, 0x5a, 0x96, 0xba, 0xc4, 0xff, 0x2b, 0xdc, 0x5e, 0x93, 0x6f, - 0xa8, 0xdf, 0x33, 0x5d, 0x17, 0xe7, 0xfc, 0x6a, 0x05, 0xdd, 0x82, 0x1d, 0xb9, 0x62, 0x3b, 0xb3, - 0x0b, 0xcb, 0xda, 0x27, 0xb0, 0x71, 0xe1, 0xcf, 0x15, 0xd5, 0x60, 0x25, 0x7e, 0x9e, 0x78, 0x29, - 0x4d, 0x6b, 0xca, 0x7e, 0xf9, 0xa0, 0x8a, 0x8b, 0x4f, 0x0d, 0x4f, 0x7b, 0xf5, 0x69, 0xc3, 0x89, - 0x76, 0x61, 0xa5, 0xe8, 0xee, 0x94, 0xbc, 0xbb, 0x2b, 0x00, 0x84, 0xa0, 0x9c, 0x25, 0x81, 0x68, - 0x43, 0xaa, 0xcd, 0x1b, 0x98, 0x7f, 0x1c, 0x6f, 0x82, 0x6c, 0xfe, 0x48, 0x1a, 0x65, 0x89, 0x4f, - 0x35, 0x3a, 0xed, 0x94, 0x78, 0x7f, 0x9a, 0xc6, 0x51, 0x98, 0x52, 0xd4, 0x81, 0x95, 0xa2, 0xc1, - 0x2a, 0x89, 0xf6, 0xe0, 0xf3, 0x2b, 0xb5, 0x07, 0x33, 0xce, 0xc9, 0x4e, 0x0c, 0x17, 0x34, 0x5a, - 0x0c, 0x7b, 0x97, 0x37, 0xf0, 0xdf, 0x9b, 0xc5, 0xbf, 0x2b, 0x97, 0x9b, 0x9c, 0xfe, 0x87, 0xca, - 0xb6, 0x69, 0x94, 0xd0, 0x34, 0x25, 0x31, 0x4d, 0xfc, 0x22, 0x84, 0x15, 0xd1, 0x36, 0x09, 0xbc, - 0x23, 0x61, 0xf4, 0x10, 0x20, 0x65, 0x5e, 0xc2, 0x44, 0x67, 0x93, 0x8f, 0x06, 0xbb, 0x85, 0x83, - 0xc5, 0xd0, 0x55, 0xef, 0x15, 0x43, 0x17, 0xae, 0x0a, 0x69, 0xfe, 0x8d, 0x4c, 0x50, 0xc7, 0x5e, - 0xca, 0x48, 0x16, 0x0f, 0x78, 0x63, 0x28, 0x08, 0xca, 0x6f, 0x25, 0xd8, 0xe4, 0x3a, 0x7d, 0xa1, - 0xc2, 0x41, 0xed, 0xbb, 0xd2, 0x62, 0x37, 0x3e, 0x13, 0xbd, 0x03, 0xa8, 0xd0, 0x24, 0x89, 0x92, - 0xbc, 0x19, 0x47, 0x05, 0x73, 0x12, 0xfb, 0xf5, 0xae, 0x18, 0xf7, 0xb0, 0x14, 0x40, 0xdd, 0xf9, - 0x38, 0x5f, 0x67, 0xe2, 0x98, 0x0b, 0x35, 0xca, 0x60, 0x3b, 0xef, 0x2a, 0xe9, 0x19, 0x0d, 0x99, - 0x2c, 0xe6, 0xf2, 0xcf, 0xdd, 0x7e, 0x47, 0xfa, 0xf3, 0x4d, 0xe5, 0x27, 0x6c, 0x71, 0x46, 0xd9, - 0x95, 0xa4, 0x17, 0x01, 0xad, 0x05, 0x5b, 0x73, 0x32, 0x68, 0x0f, 0x6a, 0xbc, 0x97, 0x32, 0x9a, - 0xc4, 0x7a, 0x62, 0x39, 0xbd, 0xb9, 0x2b, 0x7d, 0x07, 0x6e, 0x59, 0x8e, 0x29, 0x8a, 0x8d, 0xed, - 0x3c, 0x6e, 0x59, 0xa4, 0xdf, 0xe3, 0xb5, 0xdf, 0x31, 0x2c, 0x55, 0xd1, 0xfe, 0xf4, 0x9a, 0x81, - 0x47, 0x6e, 0x16, 0x11, 0x58, 0xbf, 0xd0, 0x8b, 0x2b, 0x22, 0x7a, 0x5f, 0xbe, 0x6b, 0x96, 0xce, - 0x34, 0xee, 0xf8, 0x02, 0x21, 0xba, 0x0d, 0xab, 0x41, 0x4a, 0x86, 0xbc, 0xe0, 0xe6, 0xf3, 0xcf, - 0x4a, 0x90, 0x9e, 0xf0, 0x4f, 0xb4, 0x07, 0x3c, 0xa1, 0x4e, 0x83, 0x71, 0xc0, 0x5e, 0x8a, 0xe4, - 0x29, 0xe1, 0x73, 0x40, 0x7b, 0x05, 0xb7, 0x5e, 0x73, 0x19, 0xbe, 0x77, 0xa7, 0xb5, 0xdf, 0x2b, - 0xb0, 0xf7, 0x26, 0x71, 0x74, 0x17, 0x80, 0x25, 0x5e, 0x98, 0xfa, 0x49, 0x10, 0xcb, 0xeb, 0x55, - 0xc5, 0x33, 0x08, 0x5f, 0x17, 0x73, 0xe0, 0x80, 0x16, 0x73, 0x5f, 0x09, 0xcf, 0x20, 0xe8, 0xc7, - 0x50, 0xe1, 0x23, 0x05, 0x1f, 0xf4, 0xb8, 0xeb, 0x1f, 0xbf, 0xc5, 0x75, 0x3e, 0x5f, 0xd8, 0xe1, - 0x30, 0xc2, 0x52, 0x4b, 0xfb, 0x8d, 0x02, 0xab, 0x05, 0x86, 0xbe, 0xb8, 0x70, 0x8b, 0xe5, 0x55, - 0xb9, 0xbd, 0x70, 0x09, 0xcd, 0xfc, 0x69, 0x65, 0xf6, 0x12, 0xff, 0x90, 0x4f, 0x8e, 0x83, 0xd9, - 0xdb, 0xff, 0x06, 0xbd, 0x15, 0x1a, 0x8a, 0x19, 0x07, 0x21, 0x58, 0xe2, 0x5e, 0xe4, 0xe3, 0x9d, - 0xf8, 0x7d, 0xf4, 0xcf, 0x32, 0x2c, 0xcb, 0x80, 0xa1, 0xdf, 0x29, 0x50, 0x9d, 0x66, 0x3d, 0xba, - 0xe2, 0x4b, 0xc3, 0xf4, 0x11, 0x61, 0xf7, 0xc1, 0xd5, 0x15, 0xe4, 0x85, 0xd2, 0x3e, 0xfa, 0xe5, - 0x3f, 0xfe, 0xf5, 0xdb, 0xd2, 0xbe, 0x76, 0xa7, 0x31, 0x95, 0xca, 0xdf, 0xb7, 0x1e, 0x25, 0x85, - 0xf0, 0x23, 0xe5, 0x53, 0xf4, 0xad, 0x02, 0xef, 0x5d, 0x56, 0x39, 0xd1, 0xa3, 0xb7, 0x98, 0x7c, - 0xc3, 0x13, 0xcd, 0xee, 0xfb, 0x85, 0xee, 0xcc, 0x43, 0x57, 0xdd, 0x2d, 0x1e, 0xba, 0xb4, 0x43, - 0xe1, 0xdb, 0xff, 0x6b, 0x1f, 0x2d, 0xfa, 0x36, 0xa3, 0x70, 0xc1, 0xcd, 0x5f, 0x2b, 0x80, 0x16, - 0xcb, 0x07, 0xfa, 0xe2, 0x1a, 0x15, 0x47, 0xba, 0xf8, 0xf0, 0xda, 0xb5, 0xea, 0x40, 0x79, 0xa0, - 0x1c, 0xbf, 0x82, 0x0f, 0xfc, 0x68, 0xf2, 0x66, 0x8e, 0xe3, 0x35, 0x79, 0xf8, 0x1d, 0x9e, 0x35, - 0x1d, 0xe5, 0x6b, 0x23, 0x97, 0x1e, 0x45, 0x7c, 0xf8, 0xaf, 0x47, 0xc9, 0xa8, 0x31, 0xa2, 0xa1, - 0xc8, 0xa9, 0x86, 0x5c, 0xf2, 0xe2, 0x20, 0x9d, 0x7f, 0x8b, 0xcc, 0xc9, 0xbe, 0x94, 0xc0, 0xbf, - 0x15, 0xe5, 0x74, 0x59, 0xa8, 0xfc, 0xe0, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xdf, 0x32, 0x25, - 0x1e, 0xbd, 0x14, 0x00, 0x00, + // 1343 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0xbb, 0x6f, 0x5b, 0xb7, + 0x1a, 0xf7, 0xb1, 0x2c, 0x3f, 0xbe, 0xf8, 0x21, 0xf3, 0xe6, 0xde, 0xc8, 0x8a, 0x93, 0xeb, 0x7b, + 0x82, 0x9b, 0x38, 0x69, 0x21, 0xd9, 0x6e, 0x91, 0xe6, 0x81, 0x16, 0x90, 0xe5, 0x63, 0x4b, 0x80, + 0x2d, 0x1b, 0xb4, 0x5c, 0xb7, 0x59, 0x08, 0x5a, 0xa2, 0x94, 0x03, 0x48, 0xe4, 0x29, 0x49, 0xa5, + 0xb1, 0xc7, 0xae, 0x45, 0xbb, 0x14, 0xe8, 0xd6, 0xa9, 0x9d, 0x3b, 0x76, 0xe8, 0xd2, 0x3d, 0x6b, + 0x97, 0x8e, 0x1d, 0xfa, 0x47, 0x74, 0x2c, 0x48, 0x9e, 0x23, 0x4b, 0x7e, 0x27, 0x68, 0x80, 0x6e, + 0x87, 0xbf, 0xef, 0xc1, 0x1f, 0x3f, 0x7e, 0x0f, 0x1e, 0x58, 0x6a, 0x09, 0xd1, 0x6a, 0xb3, 0x42, + 0xbd, 0x2d, 0xba, 0x8d, 0x82, 0x8a, 0x18, 0xab, 0x3f, 0x2f, 0xbc, 0x58, 0x8e, 0x96, 0x0f, 0x98, + 0xa6, 0xcb, 0x0e, 0x26, 0x0e, 0xce, 0x47, 0x52, 0x68, 0x81, 0x6e, 0x39, 0x8b, 0xbc, 0x15, 0xe5, + 0x63, 0x51, 0xcf, 0x22, 0x37, 0x1f, 0x3b, 0xa4, 0x51, 0x58, 0xa0, 0x9c, 0x0b, 0x4d, 0x75, 0x28, + 0xb8, 0x72, 0xc6, 0xb9, 0x3b, 0xb1, 0xb4, 0x2d, 0x78, 0x4b, 0x76, 0x39, 0x0f, 0x79, 0xab, 0x20, + 0x22, 0x26, 0x07, 0x94, 0xe6, 0x62, 0x25, 0xbb, 0x3a, 0xe8, 0x36, 0x0b, 0x94, 0x1f, 0xc6, 0xa2, + 0xdb, 0x27, 0x45, 0x8d, 0xae, 0xb3, 0x8d, 0xe5, 0xff, 0x3d, 0x29, 0xd7, 0x61, 0x87, 0x29, 0x4d, + 0x3b, 0x51, 0xac, 0x70, 0x23, 0x56, 0x90, 0x51, 0xbd, 0xa0, 0x34, 0xd5, 0xdd, 0x78, 0x53, 0xff, + 0x7b, 0x0f, 0x32, 0x98, 0xd5, 0x45, 0x8b, 0x87, 0x47, 0x0c, 0xb3, 0xcf, 0xba, 0x4c, 0x69, 0x54, + 0x86, 0xd1, 0xba, 0xe0, 0xcd, 0xb0, 0x95, 0xf5, 0x16, 0xbc, 0xc5, 0x6b, 0x2b, 0x4b, 0xf9, 0x0b, + 0x0f, 0x9f, 0x8f, 0x1d, 0x18, 0x42, 0x25, 0x6b, 0x87, 0x63, 0x7b, 0x14, 0x40, 0x9a, 0x76, 0x1b, + 0xa1, 0xc8, 0x0e, 0x5b, 0x47, 0x85, 0xab, 0x3b, 0x2a, 0x1a, 0x33, 0xec, 0xac, 0xfd, 0x1f, 0x3d, + 0xb8, 0xb9, 0x29, 0x78, 0x0b, 0xbb, 0xd8, 0xfd, 0xf3, 0x09, 0xff, 0xe2, 0xc1, 0xdc, 0xae, 0x96, + 0x8c, 0x76, 0xce, 0xa2, 0xdb, 0x84, 0x8c, 0x4a, 0x84, 0x64, 0x80, 0xf8, 0xe3, 0x4b, 0xf6, 0x3b, + 0xe9, 0xf3, 0xf8, 0x04, 0xe5, 0x21, 0x3c, 0xd3, 0x73, 0xea, 0x20, 0xf4, 0x7f, 0x98, 0xb2, 0x74, + 0xcc, 0x1e, 0x9a, 0x71, 0x6d, 0x0f, 0x35, 0x59, 0x1e, 0xc2, 0x93, 0x16, 0x2e, 0x39, 0x74, 0xf5, + 0x5f, 0x30, 0x7b, 0x4c, 0x47, 0x3a, 0x8e, 0xfe, 0xcf, 0x1e, 0xe4, 0xce, 0xdf, 0xed, 0x6f, 0x8c, + 0xf8, 0x7d, 0xc8, 0xa8, 0x90, 0xb7, 0xda, 0x8c, 0x74, 0xb5, 0x66, 0x92, 0xf2, 0x3a, 0xb3, 0x3c, + 0xc7, 0xf1, 0x8c, 0xc3, 0xf7, 0x12, 0x18, 0xdd, 0x83, 0x99, 0x90, 0x6b, 0x26, 0xc3, 0x0e, 0x91, + 0x4c, 0x75, 0xdb, 0x5a, 0x65, 0x53, 0x56, 0x73, 0x3a, 0x86, 0xb1, 0x43, 0xfd, 0x57, 0x23, 0x30, + 0x7b, 0x9a, 0xf3, 0x33, 0x18, 0x67, 0xbc, 0x2e, 0x1a, 0x21, 0x77, 0xac, 0xa7, 0x57, 0x3e, 0x7a, + 0x5d, 0xd6, 0x79, 0x7b, 0xcb, 0x41, 0xec, 0x05, 0xf7, 0xfc, 0xa1, 0x07, 0x30, 0xab, 0x68, 0x27, + 0x6a, 0x33, 0x22, 0xa9, 0x66, 0xe4, 0x39, 0x93, 0xfa, 0xc8, 0x1e, 0x23, 0x8d, 0x67, 0x9c, 0x00, + 0x53, 0xcd, 0xca, 0x06, 0x46, 0x77, 0x60, 0xaa, 0x4d, 0x79, 0xab, 0x4b, 0x5b, 0x8c, 0xd4, 0x45, + 0x83, 0xd9, 0x43, 0x4c, 0xe0, 0xc9, 0x04, 0x2c, 0x89, 0x06, 0x33, 0x61, 0xe9, 0xd0, 0x97, 0x84, + 0xb6, 0x35, 0x93, 0x9c, 0xea, 0xf0, 0x05, 0x53, 0xd9, 0x11, 0xe7, 0xaf, 0x43, 0x5f, 0x16, 0xfb, + 0x60, 0xa3, 0x1a, 0x49, 0xd1, 0xa4, 0x3c, 0xd4, 0x87, 0xa4, 0x19, 0x1a, 0x51, 0x36, 0xed, 0x22, + 0xd8, 0xc3, 0xd7, 0x2d, 0x8c, 0xf6, 0x60, 0xc6, 0x1d, 0xd2, 0xa5, 0xc4, 0x4b, 0xad, 0xb2, 0xa3, + 0x0b, 0xa9, 0xc5, 0x6b, 0x2b, 0xef, 0x5e, 0x96, 0x78, 0x16, 0x28, 0x39, 0x23, 0x3c, 0xad, 0xfa, + 0x97, 0x0a, 0x7d, 0x00, 0x59, 0xc6, 0xe9, 0x41, 0x9b, 0x91, 0xcf, 0x85, 0x6c, 0x10, 0xd3, 0x7d, + 0x88, 0x68, 0x36, 0x15, 0xd3, 0x2a, 0x3b, 0x6e, 0x99, 0xfc, 0xdb, 0xc9, 0xf7, 0x85, 0x6c, 0xd4, + 0xc2, 0x0e, 0xdb, 0x76, 0x42, 0x74, 0x1d, 0xd2, 0x1d, 0xd1, 0x60, 0xed, 0xec, 0x94, 0x0d, 0x81, + 0x5b, 0xf8, 0x5f, 0x7a, 0x30, 0x35, 0x10, 0x68, 0x94, 0x85, 0xeb, 0x41, 0xb5, 0xb4, 0xbd, 0x56, + 0xa9, 0x6e, 0x90, 0xbd, 0xea, 0xee, 0x4e, 0x50, 0xaa, 0xac, 0x57, 0x82, 0xb5, 0xcc, 0x10, 0x9a, + 0x84, 0xf1, 0xcd, 0x4a, 0x35, 0x28, 0xe2, 0xe5, 0x87, 0x19, 0x0f, 0x8d, 0xc3, 0xc8, 0xfa, 0x66, + 0xb1, 0x94, 0x19, 0x46, 0x13, 0x90, 0xde, 0xda, 0xdb, 0x2c, 0xee, 0x67, 0x52, 0x68, 0x0c, 0x52, + 0xc5, 0x2d, 0x9c, 0x19, 0x41, 0x00, 0xa3, 0xc5, 0x2d, 0x4c, 0xf6, 0x57, 0x33, 0x69, 0x63, 0xb7, + 0xbd, 0xb1, 0x41, 0xb6, 0x77, 0xf6, 0x76, 0x33, 0xa3, 0x28, 0x07, 0xff, 0xd9, 0xdd, 0x09, 0x82, + 0x4f, 0xc8, 0x7e, 0xa5, 0x56, 0x26, 0xe5, 0xa0, 0xb8, 0x16, 0x60, 0xb2, 0xfa, 0x69, 0x2d, 0xc8, + 0x8c, 0xf9, 0xf7, 0x61, 0x6a, 0xe0, 0xf4, 0x28, 0x0b, 0x63, 0xd1, 0x73, 0x49, 0x15, 0x53, 0x59, + 0x6f, 0x21, 0xb5, 0x38, 0x81, 0x93, 0xa5, 0x8f, 0x7b, 0xcd, 0xb4, 0xd7, 0x11, 0x50, 0x0e, 0xc6, + 0x92, 0xf2, 0xf3, 0xe2, 0xf2, 0x4b, 0x00, 0x84, 0x20, 0xd5, 0x95, 0xa1, 0xcd, 0x93, 0x89, 0xf2, + 0x10, 0x36, 0x8b, 0xd5, 0x69, 0x70, 0xd5, 0x49, 0x94, 0xe8, 0xca, 0x3a, 0xf3, 0x59, 0x2f, 0x95, + 0x4d, 0x03, 0x51, 0x91, 0xe0, 0x8a, 0xa1, 0x1d, 0x18, 0x4b, 0x2a, 0x60, 0xd8, 0xde, 0xdf, 0xc3, + 0x2b, 0xdd, 0x5f, 0x1f, 0x39, 0x57, 0x2a, 0x38, 0x71, 0xe3, 0x47, 0x30, 0x7f, 0x76, 0x87, 0x7d, + 0x6b, 0x3b, 0xbe, 0xf2, 0xce, 0xde, 0x72, 0x8b, 0x69, 0xda, 0xa0, 0x9a, 0xc6, 0x79, 0xdd, 0x92, + 0x4c, 0x29, 0x12, 0x31, 0x59, 0x4f, 0x42, 0x98, 0xb6, 0x79, 0x6d, 0xf1, 0x1d, 0x07, 0xa3, 0xc7, + 0x00, 0x4a, 0x53, 0xa9, 0x6d, 0xea, 0xc5, 0xbd, 0x3b, 0x97, 0x10, 0x4c, 0xa6, 0x62, 0xbe, 0x96, + 0x4c, 0x45, 0x3c, 0x61, 0xb5, 0xcd, 0x1a, 0xad, 0x41, 0xa6, 0x4d, 0x95, 0x26, 0xdd, 0xa8, 0x61, + 0x2a, 0xd7, 0x3a, 0x48, 0x5d, 0xea, 0x60, 0xda, 0xd8, 0xec, 0x59, 0x13, 0x03, 0xfa, 0xbf, 0x0f, + 0x9f, 0x6e, 0x97, 0x7d, 0xd1, 0x5b, 0x84, 0x34, 0x93, 0x52, 0xc8, 0xb8, 0x5b, 0xa2, 0xc4, 0xb3, + 0x8c, 0xea, 0xf9, 0x5d, 0x3b, 0x8f, 0xb1, 0x53, 0x40, 0xbb, 0x27, 0xe3, 0xfc, 0x26, 0x23, 0xe1, + 0x44, 0xa8, 0x51, 0x17, 0x66, 0xe3, 0xb2, 0x67, 0x2f, 0x18, 0xd7, 0x44, 0x1f, 0x46, 0xcc, 0x76, + 0x93, 0xe9, 0x95, 0xca, 0x6b, 0xba, 0x3f, 0x3e, 0x54, 0x7c, 0xc3, 0x81, 0xf1, 0x58, 0x3b, 0x8c, + 0x18, 0x8e, 0x5b, 0x4b, 0x0f, 0xf0, 0x37, 0x61, 0xe6, 0x84, 0x0e, 0x9a, 0x87, 0xac, 0x29, 0xb4, + 0x52, 0x99, 0x04, 0x1f, 0x07, 0xd5, 0xda, 0x89, 0x62, 0xbe, 0x09, 0x37, 0x82, 0xea, 0x1a, 0xd9, + 0x5e, 0x27, 0xbb, 0x95, 0xea, 0xc6, 0x66, 0x40, 0xf6, 0x6a, 0xb5, 0x00, 0x17, 0xab, 0xa5, 0x20, + 0xe3, 0xf9, 0x3f, 0x9d, 0x33, 0x91, 0xdc, 0x61, 0x11, 0x81, 0xc9, 0x81, 0x66, 0xe9, 0xd9, 0xe8, + 0x3d, 0x7d, 0xdd, 0x2c, 0xed, 0xeb, 0xac, 0x78, 0xc0, 0x21, 0x9a, 0x83, 0xf1, 0x50, 0x91, 0x66, + 0xc8, 0x69, 0x3b, 0x1e, 0x50, 0x63, 0xa1, 0x5a, 0x37, 0x4b, 0x34, 0x0f, 0x26, 0xa1, 0x0e, 0xc2, + 0x76, 0xa8, 0x0f, 0x6d, 0xf2, 0x0c, 0xe3, 0x63, 0xc0, 0x3f, 0x82, 0x1b, 0xe7, 0x14, 0xc3, 0x5b, + 0x27, 0xed, 0x7f, 0xe7, 0xc1, 0xfc, 0x45, 0xea, 0xe8, 0x36, 0x80, 0x96, 0x94, 0xab, 0xba, 0x0c, + 0x23, 0x57, 0x5e, 0x13, 0xb8, 0x0f, 0x31, 0x72, 0x3b, 0xa8, 0x1b, 0x2c, 0x19, 0xcc, 0xc3, 0xb8, + 0x0f, 0x41, 0x1f, 0x42, 0xda, 0xf4, 0x7c, 0x33, 0x89, 0x0d, 0xf5, 0x7b, 0x97, 0x50, 0x37, 0x03, + 0xa0, 0xc2, 0x9b, 0x02, 0x3b, 0x2b, 0xff, 0x6b, 0x0f, 0xc6, 0x13, 0x0c, 0x3d, 0x1a, 0xa8, 0x62, + 0x57, 0x2a, 0x73, 0xa7, 0x8a, 0x70, 0x2d, 0x7e, 0xfb, 0xf6, 0x17, 0xf1, 0xfb, 0x66, 0xb4, 0x37, + 0xfa, 0xab, 0xff, 0x02, 0xbb, 0x31, 0xc6, 0xed, 0x10, 0x42, 0x08, 0x46, 0x0c, 0x8b, 0x78, 0xfe, + 0xda, 0xef, 0x95, 0xdf, 0x52, 0x30, 0xea, 0x02, 0x86, 0xbe, 0xf5, 0x60, 0xa2, 0x97, 0xf5, 0xe8, + 0x8a, 0x4f, 0xc1, 0xde, 0x2b, 0x2f, 0xb7, 0x74, 0x75, 0x03, 0x57, 0x50, 0xfe, 0xdd, 0x2f, 0x7e, + 0xfd, 0xe3, 0x9b, 0xe1, 0x05, 0xff, 0x66, 0xdf, 0xaf, 0x88, 0x33, 0x7b, 0x22, 0x13, 0xe5, 0x27, + 0xde, 0x03, 0xf4, 0x83, 0x07, 0xd7, 0xcf, 0xea, 0x9c, 0xe8, 0xc9, 0x25, 0x5b, 0x5e, 0xf0, 0x86, + 0xce, 0xdd, 0x4a, 0x6c, 0xfb, 0x7e, 0x52, 0xf2, 0xdb, 0xc9, 0x4f, 0x8a, 0xbf, 0x6c, 0xb9, 0xbd, + 0xe3, 0xdf, 0x3d, 0xcd, 0xad, 0xcf, 0x60, 0x80, 0xe6, 0x57, 0x1e, 0xa0, 0xd3, 0xed, 0x03, 0x3d, + 0x7a, 0x83, 0x8e, 0xe3, 0x28, 0x3e, 0x7e, 0xe3, 0x5e, 0xb5, 0xe8, 0x2d, 0x79, 0xab, 0x47, 0xf0, + 0xbf, 0xba, 0xe8, 0x5c, 0xec, 0x63, 0xf5, 0x9a, 0xbb, 0xfc, 0x1d, 0x93, 0x35, 0x3b, 0xde, 0xb3, + 0x52, 0xac, 0xdd, 0x12, 0xe6, 0x75, 0x96, 0x17, 0xb2, 0x55, 0x68, 0x31, 0x6e, 0x73, 0xaa, 0xe0, + 0x44, 0x34, 0x0a, 0xd5, 0x39, 0xff, 0x91, 0x4f, 0x1d, 0xf0, 0xa7, 0xe7, 0x1d, 0x8c, 0x5a, 0x93, + 0xf7, 0xfe, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x2f, 0x7b, 0x24, 0x37, 0x79, 0x0e, 0x00, 0x00, } diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/texttospeech/v1beta1/cloud_tts.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/texttospeech/v1beta1/cloud_tts.pb.go new file mode 100644 index 000000000..aefcc3d4e --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/texttospeech/v1beta1/cloud_tts.pb.go @@ -0,0 +1,672 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/texttospeech/v1beta1/cloud_tts.proto + +/* +Package texttospeech is a generated protocol buffer package. + +It is generated from these files: + google/cloud/texttospeech/v1beta1/cloud_tts.proto + +It has these top-level messages: + ListVoicesRequest + ListVoicesResponse + Voice + SynthesizeSpeechRequest + SynthesisInput + VoiceSelectionParams + AudioConfig + SynthesizeSpeechResponse +*/ +package texttospeech + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Gender of the voice as described in +// [SSML voice element](https://www.w3.org/TR/speech-synthesis11/#edef_voice). +type SsmlVoiceGender int32 + +const ( + // An unspecified gender. + // In VoiceSelectionParams, this means that the client doesn't care which + // gender the selected voice will have. In the Voice field of + // ListVoicesResponse, this may mean that the voice doesn't fit any of the + // other categories in this enum, or that the gender of the voice isn't known. + SsmlVoiceGender_SSML_VOICE_GENDER_UNSPECIFIED SsmlVoiceGender = 0 + // A male voice. + SsmlVoiceGender_MALE SsmlVoiceGender = 1 + // A female voice. + SsmlVoiceGender_FEMALE SsmlVoiceGender = 2 + // A gender-neutral voice. + SsmlVoiceGender_NEUTRAL SsmlVoiceGender = 3 +) + +var SsmlVoiceGender_name = map[int32]string{ + 0: "SSML_VOICE_GENDER_UNSPECIFIED", + 1: "MALE", + 2: "FEMALE", + 3: "NEUTRAL", +} +var SsmlVoiceGender_value = map[string]int32{ + "SSML_VOICE_GENDER_UNSPECIFIED": 0, + "MALE": 1, + "FEMALE": 2, + "NEUTRAL": 3, +} + +func (x SsmlVoiceGender) String() string { + return proto.EnumName(SsmlVoiceGender_name, int32(x)) +} +func (SsmlVoiceGender) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// Configuration to set up audio encoder. The encoding determines the output +// audio format that we'd like. +type AudioEncoding int32 + +const ( + // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][]. + AudioEncoding_AUDIO_ENCODING_UNSPECIFIED AudioEncoding = 0 + // Uncompressed 16-bit signed little-endian samples (Linear PCM). + // Audio content returned as LINEAR16 also contains a WAV header. + AudioEncoding_LINEAR16 AudioEncoding = 1 + // MP3 audio. + AudioEncoding_MP3 AudioEncoding = 2 + // Opus encoded audio wrapped in an ogg container. The result will be a + // file which can be played natively on Android, and in browsers (at least + // Chrome and Firefox). The quality of the encoding is considerably higher + // than MP3 while using approximately the same bitrate. + AudioEncoding_OGG_OPUS AudioEncoding = 3 +) + +var AudioEncoding_name = map[int32]string{ + 0: "AUDIO_ENCODING_UNSPECIFIED", + 1: "LINEAR16", + 2: "MP3", + 3: "OGG_OPUS", +} +var AudioEncoding_value = map[string]int32{ + "AUDIO_ENCODING_UNSPECIFIED": 0, + "LINEAR16": 1, + "MP3": 2, + "OGG_OPUS": 3, +} + +func (x AudioEncoding) String() string { + return proto.EnumName(AudioEncoding_name, int32(x)) +} +func (AudioEncoding) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +// The top-level message sent by the client for the `ListVoices` method. +type ListVoicesRequest struct { +} + +func (m *ListVoicesRequest) Reset() { *m = ListVoicesRequest{} } +func (m *ListVoicesRequest) String() string { return proto.CompactTextString(m) } +func (*ListVoicesRequest) ProtoMessage() {} +func (*ListVoicesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// The message returned to the client by the `ListVoices` method. +type ListVoicesResponse struct { + // The list of voices. + Voices []*Voice `protobuf:"bytes,1,rep,name=voices" json:"voices,omitempty"` +} + +func (m *ListVoicesResponse) Reset() { *m = ListVoicesResponse{} } +func (m *ListVoicesResponse) String() string { return proto.CompactTextString(m) } +func (*ListVoicesResponse) ProtoMessage() {} +func (*ListVoicesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *ListVoicesResponse) GetVoices() []*Voice { + if m != nil { + return m.Voices + } + return nil +} + +// Description of a voice supported by the TTS service. +type Voice struct { + // The languages that this voice supports, expressed as + // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags (e.g. + // "en-US", "es-419", "cmn-tw"). + LanguageCodes []string `protobuf:"bytes,1,rep,name=language_codes,json=languageCodes" json:"language_codes,omitempty"` + // The name of this voice. Each distinct voice has a unique name. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // The gender of this voice. + SsmlGender SsmlVoiceGender `protobuf:"varint,3,opt,name=ssml_gender,json=ssmlGender,enum=google.cloud.texttospeech.v1beta1.SsmlVoiceGender" json:"ssml_gender,omitempty"` + // The natural sample rate (in hertz) for this voice. + NaturalSampleRateHertz int32 `protobuf:"varint,4,opt,name=natural_sample_rate_hertz,json=naturalSampleRateHertz" json:"natural_sample_rate_hertz,omitempty"` +} + +func (m *Voice) Reset() { *m = Voice{} } +func (m *Voice) String() string { return proto.CompactTextString(m) } +func (*Voice) ProtoMessage() {} +func (*Voice) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Voice) GetLanguageCodes() []string { + if m != nil { + return m.LanguageCodes + } + return nil +} + +func (m *Voice) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Voice) GetSsmlGender() SsmlVoiceGender { + if m != nil { + return m.SsmlGender + } + return SsmlVoiceGender_SSML_VOICE_GENDER_UNSPECIFIED +} + +func (m *Voice) GetNaturalSampleRateHertz() int32 { + if m != nil { + return m.NaturalSampleRateHertz + } + return 0 +} + +// The top-level message sent by the client for the `SynthesizeSpeech` method. +type SynthesizeSpeechRequest struct { + // Required. The Synthesizer requires either plain text or SSML as input. + Input *SynthesisInput `protobuf:"bytes,1,opt,name=input" json:"input,omitempty"` + // Required. The desired voice of the synthesized audio. + Voice *VoiceSelectionParams `protobuf:"bytes,2,opt,name=voice" json:"voice,omitempty"` + // Required. The configuration of the synthesized audio. + AudioConfig *AudioConfig `protobuf:"bytes,3,opt,name=audio_config,json=audioConfig" json:"audio_config,omitempty"` +} + +func (m *SynthesizeSpeechRequest) Reset() { *m = SynthesizeSpeechRequest{} } +func (m *SynthesizeSpeechRequest) String() string { return proto.CompactTextString(m) } +func (*SynthesizeSpeechRequest) ProtoMessage() {} +func (*SynthesizeSpeechRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *SynthesizeSpeechRequest) GetInput() *SynthesisInput { + if m != nil { + return m.Input + } + return nil +} + +func (m *SynthesizeSpeechRequest) GetVoice() *VoiceSelectionParams { + if m != nil { + return m.Voice + } + return nil +} + +func (m *SynthesizeSpeechRequest) GetAudioConfig() *AudioConfig { + if m != nil { + return m.AudioConfig + } + return nil +} + +// Contains text input to be synthesized. Either `text` or `ssml` must be +// supplied. Supplying both or neither returns +// [google.rpc.Code.INVALID_ARGUMENT][]. The input size is limited to 5000 +// characters. +type SynthesisInput struct { + // The input source, which is either plain text or SSML. + // + // Types that are valid to be assigned to InputSource: + // *SynthesisInput_Text + // *SynthesisInput_Ssml + InputSource isSynthesisInput_InputSource `protobuf_oneof:"input_source"` +} + +func (m *SynthesisInput) Reset() { *m = SynthesisInput{} } +func (m *SynthesisInput) String() string { return proto.CompactTextString(m) } +func (*SynthesisInput) ProtoMessage() {} +func (*SynthesisInput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +type isSynthesisInput_InputSource interface { + isSynthesisInput_InputSource() +} + +type SynthesisInput_Text struct { + Text string `protobuf:"bytes,1,opt,name=text,oneof"` +} +type SynthesisInput_Ssml struct { + Ssml string `protobuf:"bytes,2,opt,name=ssml,oneof"` +} + +func (*SynthesisInput_Text) isSynthesisInput_InputSource() {} +func (*SynthesisInput_Ssml) isSynthesisInput_InputSource() {} + +func (m *SynthesisInput) GetInputSource() isSynthesisInput_InputSource { + if m != nil { + return m.InputSource + } + return nil +} + +func (m *SynthesisInput) GetText() string { + if x, ok := m.GetInputSource().(*SynthesisInput_Text); ok { + return x.Text + } + return "" +} + +func (m *SynthesisInput) GetSsml() string { + if x, ok := m.GetInputSource().(*SynthesisInput_Ssml); ok { + return x.Ssml + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SynthesisInput) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SynthesisInput_OneofMarshaler, _SynthesisInput_OneofUnmarshaler, _SynthesisInput_OneofSizer, []interface{}{ + (*SynthesisInput_Text)(nil), + (*SynthesisInput_Ssml)(nil), + } +} + +func _SynthesisInput_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SynthesisInput) + // input_source + switch x := m.InputSource.(type) { + case *SynthesisInput_Text: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Text) + case *SynthesisInput_Ssml: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Ssml) + case nil: + default: + return fmt.Errorf("SynthesisInput.InputSource has unexpected type %T", x) + } + return nil +} + +func _SynthesisInput_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SynthesisInput) + switch tag { + case 1: // input_source.text + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.InputSource = &SynthesisInput_Text{x} + return true, err + case 2: // input_source.ssml + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.InputSource = &SynthesisInput_Ssml{x} + return true, err + default: + return false, nil + } +} + +func _SynthesisInput_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SynthesisInput) + // input_source + switch x := m.InputSource.(type) { + case *SynthesisInput_Text: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Text))) + n += len(x.Text) + case *SynthesisInput_Ssml: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Ssml))) + n += len(x.Ssml) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Description of which voice to use for a synthesis request. +type VoiceSelectionParams struct { + // The language (and optionally also the region) of the voice expressed as a + // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g. + // "en-US". Required. This should not include a script tag (e.g. use + // "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred + // from the input provided in the SynthesisInput. The TTS service + // will use this parameter to help choose an appropriate voice. Note that + // the TTS service may choose a voice with a slightly different language code + // than the one selected; it may substitute a different region + // (e.g. using en-US rather than en-CA if there isn't a Canadian voice + // available), or even a different language, e.g. using "nb" (Norwegian + // Bokmal) instead of "no" (Norwegian)". + LanguageCode string `protobuf:"bytes,1,opt,name=language_code,json=languageCode" json:"language_code,omitempty"` + // The name of the voice. Optional; if not set, the service will choose a + // voice based on the other parameters such as language_code and gender. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // The preferred gender of the voice. Optional; if not set, the service will + // choose a voice based on the other parameters such as language_code and + // name. Note that this is only a preference, not requirement; if a + // voice of the appropriate gender is not available, the synthesizer should + // substitute a voice with a different gender rather than failing the request. + SsmlGender SsmlVoiceGender `protobuf:"varint,3,opt,name=ssml_gender,json=ssmlGender,enum=google.cloud.texttospeech.v1beta1.SsmlVoiceGender" json:"ssml_gender,omitempty"` +} + +func (m *VoiceSelectionParams) Reset() { *m = VoiceSelectionParams{} } +func (m *VoiceSelectionParams) String() string { return proto.CompactTextString(m) } +func (*VoiceSelectionParams) ProtoMessage() {} +func (*VoiceSelectionParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *VoiceSelectionParams) GetLanguageCode() string { + if m != nil { + return m.LanguageCode + } + return "" +} + +func (m *VoiceSelectionParams) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *VoiceSelectionParams) GetSsmlGender() SsmlVoiceGender { + if m != nil { + return m.SsmlGender + } + return SsmlVoiceGender_SSML_VOICE_GENDER_UNSPECIFIED +} + +// Description of audio data to be synthesized. +type AudioConfig struct { + // Required. The format of the requested audio byte stream. + AudioEncoding AudioEncoding `protobuf:"varint,1,opt,name=audio_encoding,json=audioEncoding,enum=google.cloud.texttospeech.v1beta1.AudioEncoding" json:"audio_encoding,omitempty"` + // Optional speaking rate/speed, in the range [0.25, 4.0]. 1.0 is the normal + // native speed supported by the specific voice. 2.0 is twice as fast, and + // 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 speed. Any + // other values < 0.25 or > 4.0 will return an error. + SpeakingRate float64 `protobuf:"fixed64,2,opt,name=speaking_rate,json=speakingRate" json:"speaking_rate,omitempty"` + // Optional speaking pitch, in the range [-20.0, 20.0]. 20 means increase 20 + // semitones from the original pitch. -20 means decrease 20 semitones from the + // original pitch. + Pitch float64 `protobuf:"fixed64,3,opt,name=pitch" json:"pitch,omitempty"` + // Optional volume gain (in dB) of the normal native volume supported by the + // specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of + // 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB) + // will play at approximately half the amplitude of the normal native signal + // amplitude. A value of +6.0 (dB) will play at approximately twice the + // amplitude of the normal native signal amplitude. Strongly recommend not to + // exceed +10 (dB) as there's usually no effective increase in loudness for + // any value greater than that. + VolumeGainDb float64 `protobuf:"fixed64,4,opt,name=volume_gain_db,json=volumeGainDb" json:"volume_gain_db,omitempty"` + // The synthesis sample rate (in hertz) for this audio. Optional. If this is + // different from the voice's natural sample rate, then the synthesizer will + // honor this request by converting to the desired sample rate (which might + // result in worse audio quality), unless the specified sample rate is not + // supported for the encoding chosen, in which case it will fail the request + // and return [google.rpc.Code.INVALID_ARGUMENT][]. + SampleRateHertz int32 `protobuf:"varint,5,opt,name=sample_rate_hertz,json=sampleRateHertz" json:"sample_rate_hertz,omitempty"` +} + +func (m *AudioConfig) Reset() { *m = AudioConfig{} } +func (m *AudioConfig) String() string { return proto.CompactTextString(m) } +func (*AudioConfig) ProtoMessage() {} +func (*AudioConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *AudioConfig) GetAudioEncoding() AudioEncoding { + if m != nil { + return m.AudioEncoding + } + return AudioEncoding_AUDIO_ENCODING_UNSPECIFIED +} + +func (m *AudioConfig) GetSpeakingRate() float64 { + if m != nil { + return m.SpeakingRate + } + return 0 +} + +func (m *AudioConfig) GetPitch() float64 { + if m != nil { + return m.Pitch + } + return 0 +} + +func (m *AudioConfig) GetVolumeGainDb() float64 { + if m != nil { + return m.VolumeGainDb + } + return 0 +} + +func (m *AudioConfig) GetSampleRateHertz() int32 { + if m != nil { + return m.SampleRateHertz + } + return 0 +} + +// The message returned to the client by the `SynthesizeSpeech` method. +type SynthesizeSpeechResponse struct { + // The audio data bytes encoded as specified in the request, including the + // header (For LINEAR16 audio, we include the WAV header). Note: as + // with all bytes fields, protobuffers use a pure binary representation, + // whereas JSON representations use base64. + AudioContent []byte `protobuf:"bytes,1,opt,name=audio_content,json=audioContent,proto3" json:"audio_content,omitempty"` +} + +func (m *SynthesizeSpeechResponse) Reset() { *m = SynthesizeSpeechResponse{} } +func (m *SynthesizeSpeechResponse) String() string { return proto.CompactTextString(m) } +func (*SynthesizeSpeechResponse) ProtoMessage() {} +func (*SynthesizeSpeechResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *SynthesizeSpeechResponse) GetAudioContent() []byte { + if m != nil { + return m.AudioContent + } + return nil +} + +func init() { + proto.RegisterType((*ListVoicesRequest)(nil), "google.cloud.texttospeech.v1beta1.ListVoicesRequest") + proto.RegisterType((*ListVoicesResponse)(nil), "google.cloud.texttospeech.v1beta1.ListVoicesResponse") + proto.RegisterType((*Voice)(nil), "google.cloud.texttospeech.v1beta1.Voice") + proto.RegisterType((*SynthesizeSpeechRequest)(nil), "google.cloud.texttospeech.v1beta1.SynthesizeSpeechRequest") + proto.RegisterType((*SynthesisInput)(nil), "google.cloud.texttospeech.v1beta1.SynthesisInput") + proto.RegisterType((*VoiceSelectionParams)(nil), "google.cloud.texttospeech.v1beta1.VoiceSelectionParams") + proto.RegisterType((*AudioConfig)(nil), "google.cloud.texttospeech.v1beta1.AudioConfig") + proto.RegisterType((*SynthesizeSpeechResponse)(nil), "google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse") + proto.RegisterEnum("google.cloud.texttospeech.v1beta1.SsmlVoiceGender", SsmlVoiceGender_name, SsmlVoiceGender_value) + proto.RegisterEnum("google.cloud.texttospeech.v1beta1.AudioEncoding", AudioEncoding_name, AudioEncoding_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for TextToSpeech service + +type TextToSpeechClient interface { + // Returns a list of [Voice][google.cloud.texttospeech.v1beta1.Voice] + // supported for synthesis. + ListVoices(ctx context.Context, in *ListVoicesRequest, opts ...grpc.CallOption) (*ListVoicesResponse, error) + // Synthesizes speech synchronously: receive results after all text input + // has been processed. + SynthesizeSpeech(ctx context.Context, in *SynthesizeSpeechRequest, opts ...grpc.CallOption) (*SynthesizeSpeechResponse, error) +} + +type textToSpeechClient struct { + cc *grpc.ClientConn +} + +func NewTextToSpeechClient(cc *grpc.ClientConn) TextToSpeechClient { + return &textToSpeechClient{cc} +} + +func (c *textToSpeechClient) ListVoices(ctx context.Context, in *ListVoicesRequest, opts ...grpc.CallOption) (*ListVoicesResponse, error) { + out := new(ListVoicesResponse) + err := grpc.Invoke(ctx, "/google.cloud.texttospeech.v1beta1.TextToSpeech/ListVoices", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *textToSpeechClient) SynthesizeSpeech(ctx context.Context, in *SynthesizeSpeechRequest, opts ...grpc.CallOption) (*SynthesizeSpeechResponse, error) { + out := new(SynthesizeSpeechResponse) + err := grpc.Invoke(ctx, "/google.cloud.texttospeech.v1beta1.TextToSpeech/SynthesizeSpeech", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for TextToSpeech service + +type TextToSpeechServer interface { + // Returns a list of [Voice][google.cloud.texttospeech.v1beta1.Voice] + // supported for synthesis. + ListVoices(context.Context, *ListVoicesRequest) (*ListVoicesResponse, error) + // Synthesizes speech synchronously: receive results after all text input + // has been processed. + SynthesizeSpeech(context.Context, *SynthesizeSpeechRequest) (*SynthesizeSpeechResponse, error) +} + +func RegisterTextToSpeechServer(s *grpc.Server, srv TextToSpeechServer) { + s.RegisterService(&_TextToSpeech_serviceDesc, srv) +} + +func _TextToSpeech_ListVoices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListVoicesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TextToSpeechServer).ListVoices(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.texttospeech.v1beta1.TextToSpeech/ListVoices", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TextToSpeechServer).ListVoices(ctx, req.(*ListVoicesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TextToSpeech_SynthesizeSpeech_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SynthesizeSpeechRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TextToSpeechServer).SynthesizeSpeech(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.texttospeech.v1beta1.TextToSpeech/SynthesizeSpeech", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TextToSpeechServer).SynthesizeSpeech(ctx, req.(*SynthesizeSpeechRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _TextToSpeech_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.texttospeech.v1beta1.TextToSpeech", + HandlerType: (*TextToSpeechServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListVoices", + Handler: _TextToSpeech_ListVoices_Handler, + }, + { + MethodName: "SynthesizeSpeech", + Handler: _TextToSpeech_SynthesizeSpeech_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/texttospeech/v1beta1/cloud_tts.proto", +} + +func init() { proto.RegisterFile("google/cloud/texttospeech/v1beta1/cloud_tts.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 844 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0x4d, 0x6f, 0x1b, 0x45, + 0x18, 0xee, 0xd8, 0x71, 0xda, 0xbc, 0x5e, 0x3b, 0xce, 0x10, 0x51, 0x13, 0x51, 0xe4, 0x6e, 0xa8, + 0x64, 0xe5, 0x60, 0x63, 0x97, 0x0f, 0x91, 0x1e, 0xc0, 0xb1, 0xb7, 0xae, 0x25, 0x7f, 0x31, 0x9b, + 0xa4, 0x12, 0x97, 0xd5, 0x64, 0x3d, 0x6c, 0x56, 0xac, 0x67, 0x16, 0xcf, 0x6c, 0x54, 0x7a, 0x44, + 0x9c, 0x39, 0xc0, 0x5f, 0xe0, 0x07, 0xf0, 0x5b, 0x40, 0xe2, 0x17, 0xf0, 0x0f, 0xb8, 0x70, 0x44, + 0x33, 0xbb, 0x71, 0x1d, 0x07, 0x51, 0x97, 0x03, 0xb7, 0x9d, 0x67, 0xfc, 0x3c, 0xf3, 0xbe, 0xcf, + 0x3c, 0x7e, 0x07, 0x5a, 0x81, 0x10, 0x41, 0xc4, 0x9a, 0x7e, 0x24, 0x92, 0x59, 0x53, 0xb1, 0x17, + 0x4a, 0x09, 0x19, 0x33, 0xe6, 0x5f, 0x36, 0xaf, 0x5a, 0x17, 0x4c, 0xd1, 0x56, 0xba, 0xe5, 0x29, + 0x25, 0x1b, 0xf1, 0x42, 0x28, 0x81, 0x1f, 0xa6, 0x94, 0x86, 0xc1, 0x1b, 0xab, 0x94, 0x46, 0x46, + 0x39, 0x78, 0x37, 0x53, 0xa5, 0x71, 0xd8, 0xa4, 0x9c, 0x0b, 0x45, 0x55, 0x28, 0x78, 0x26, 0x60, + 0xbf, 0x05, 0x7b, 0xc3, 0x50, 0xaa, 0x73, 0x11, 0xfa, 0x4c, 0x12, 0xf6, 0x4d, 0xc2, 0xa4, 0xb2, + 0xcf, 0x01, 0xaf, 0x82, 0x32, 0x16, 0x5c, 0x32, 0xfc, 0x39, 0x6c, 0x5f, 0x19, 0xa4, 0x8a, 0x6a, + 0xf9, 0x7a, 0xb1, 0x5d, 0x6f, 0xbc, 0xf6, 0xf0, 0x86, 0x91, 0x20, 0x19, 0xcf, 0xfe, 0x15, 0x41, + 0xc1, 0x20, 0xf8, 0x11, 0x94, 0x23, 0xca, 0x83, 0x84, 0x06, 0xcc, 0xf3, 0xc5, 0x2c, 0xd3, 0xdc, + 0x21, 0xa5, 0x6b, 0xb4, 0xab, 0x41, 0x8c, 0x61, 0x8b, 0xd3, 0x39, 0xab, 0xe6, 0x6a, 0xa8, 0xbe, + 0x43, 0xcc, 0x37, 0x76, 0xa1, 0x28, 0xe5, 0x3c, 0xf2, 0x02, 0xc6, 0x67, 0x6c, 0x51, 0xcd, 0xd7, + 0x50, 0xbd, 0xdc, 0x6e, 0x6f, 0x50, 0x8b, 0x2b, 0xe7, 0x91, 0x39, 0xbd, 0x6f, 0x98, 0x04, 0xb4, + 0x4c, 0xfa, 0x8d, 0x3f, 0x85, 0x77, 0x38, 0x55, 0xc9, 0x82, 0x46, 0x9e, 0xa4, 0xf3, 0x38, 0x62, + 0xde, 0x82, 0x2a, 0xe6, 0x5d, 0xb2, 0x85, 0x7a, 0x59, 0xdd, 0xaa, 0xa1, 0x7a, 0x81, 0xbc, 0x9d, + 0xfd, 0xc0, 0x35, 0xfb, 0x84, 0x2a, 0xf6, 0x4c, 0xef, 0xda, 0xdf, 0xe7, 0xe0, 0xbe, 0xfb, 0x2d, + 0x57, 0x97, 0x4c, 0x86, 0x2f, 0x99, 0x6b, 0xce, 0xcc, 0x8c, 0xc4, 0x7d, 0x28, 0x84, 0x3c, 0x4e, + 0x54, 0x15, 0xd5, 0x50, 0xbd, 0xd8, 0x6e, 0x6d, 0x52, 0x65, 0x26, 0x25, 0x07, 0x9a, 0x48, 0x52, + 0x3e, 0x1e, 0x41, 0xc1, 0x78, 0x68, 0x9c, 0x28, 0xb6, 0x3f, 0xd9, 0xd4, 0x7a, 0x97, 0x45, 0xcc, + 0xd7, 0xf7, 0x3d, 0xa5, 0x0b, 0x3a, 0x97, 0x24, 0x55, 0xc1, 0x5f, 0x80, 0x45, 0x93, 0x59, 0x28, + 0x3c, 0x5f, 0xf0, 0xaf, 0xc2, 0xc0, 0x98, 0x58, 0x6c, 0x37, 0x36, 0x50, 0xed, 0x68, 0x5a, 0xd7, + 0xb0, 0x48, 0x91, 0xbe, 0x5a, 0xd8, 0x43, 0x28, 0xdf, 0x2c, 0x1d, 0xef, 0xc3, 0x96, 0x96, 0x30, + 0xbd, 0xef, 0x3c, 0xbb, 0x43, 0xcc, 0x4a, 0xa3, 0xda, 0xf7, 0xf4, 0x4a, 0x35, 0xaa, 0x57, 0x27, + 0x65, 0xb0, 0x4c, 0xa3, 0x9e, 0x14, 0xc9, 0xc2, 0x67, 0xf6, 0xcf, 0x08, 0xf6, 0xff, 0xa9, 0x01, + 0x7c, 0x08, 0xa5, 0x1b, 0xc1, 0x49, 0xd5, 0x89, 0xb5, 0x9a, 0x9b, 0xff, 0x2d, 0x36, 0xf6, 0x9f, + 0x08, 0x8a, 0x2b, 0x8e, 0xe0, 0xe7, 0x50, 0x4e, 0x7d, 0x65, 0xdc, 0x17, 0xb3, 0x90, 0x07, 0xa6, + 0xbc, 0x72, 0xfb, 0x83, 0x4d, 0x9d, 0x75, 0x32, 0x1e, 0x29, 0xd1, 0xd5, 0xa5, 0x6e, 0x5b, 0xc6, + 0x8c, 0x7e, 0x1d, 0xf2, 0xc0, 0x24, 0xd3, 0xb4, 0x86, 0x88, 0x75, 0x0d, 0xea, 0x38, 0xe2, 0x7d, + 0x28, 0xc4, 0xa1, 0xf2, 0x2f, 0x4d, 0x73, 0x88, 0xa4, 0x0b, 0xfc, 0x3e, 0x94, 0xaf, 0x44, 0x94, + 0xcc, 0x99, 0x17, 0xd0, 0x90, 0x7b, 0xb3, 0x0b, 0x93, 0x67, 0x44, 0xac, 0x14, 0xed, 0xd3, 0x90, + 0xf7, 0x2e, 0xf0, 0x11, 0xec, 0xdd, 0x0e, 0x7e, 0xc1, 0x04, 0x7f, 0x57, 0xae, 0x25, 0xfe, 0x33, + 0xa8, 0xde, 0x0e, 0x7c, 0x36, 0x24, 0x0e, 0xa1, 0xb4, 0x4c, 0x96, 0x62, 0x3c, 0xbd, 0x7d, 0x8b, + 0x58, 0xd7, 0x51, 0xd1, 0xd8, 0xd1, 0x73, 0xd8, 0x5d, 0x73, 0x15, 0x3f, 0x84, 0x07, 0xae, 0x3b, + 0x1a, 0x7a, 0xe7, 0x93, 0x41, 0xd7, 0xf1, 0xfa, 0xce, 0xb8, 0xe7, 0x10, 0xef, 0x6c, 0xec, 0x4e, + 0x9d, 0xee, 0xe0, 0xe9, 0xc0, 0xe9, 0x55, 0xee, 0xe0, 0x7b, 0xb0, 0x35, 0xea, 0x0c, 0x9d, 0x0a, + 0xc2, 0x00, 0xdb, 0x4f, 0x1d, 0xf3, 0x9d, 0xc3, 0x45, 0xb8, 0x3b, 0x76, 0xce, 0x4e, 0x49, 0x67, + 0x58, 0xc9, 0x1f, 0x9d, 0x42, 0xe9, 0x86, 0x8d, 0xf8, 0x3d, 0x38, 0xe8, 0x9c, 0xf5, 0x06, 0x13, + 0xcf, 0x19, 0x77, 0x27, 0xbd, 0xc1, 0xb8, 0xbf, 0xa6, 0x69, 0xc1, 0xbd, 0xe1, 0x60, 0xec, 0x74, + 0x48, 0xeb, 0xe3, 0x0a, 0xc2, 0x77, 0x21, 0x3f, 0x9a, 0x3e, 0xae, 0xe4, 0x34, 0x3c, 0xe9, 0xf7, + 0xbd, 0xc9, 0xf4, 0xcc, 0xad, 0xe4, 0xdb, 0xbf, 0xe7, 0xc0, 0x3a, 0x65, 0x2f, 0xd4, 0xa9, 0x48, + 0x9b, 0xc5, 0x3f, 0x22, 0x80, 0x57, 0x03, 0x12, 0x7f, 0xb8, 0xc1, 0xed, 0xde, 0x1a, 0xb2, 0x07, + 0x1f, 0xbd, 0x21, 0x2b, 0x35, 0xd8, 0xbe, 0xff, 0xdd, 0x6f, 0x7f, 0xfc, 0x94, 0xdb, 0xc3, 0xbb, + 0xcb, 0x37, 0x21, 0x1d, 0xae, 0xf8, 0x17, 0x04, 0x95, 0xf5, 0x6b, 0xc1, 0xc7, 0x6f, 0x30, 0x71, + 0xd6, 0x86, 0xd7, 0xc1, 0x93, 0xff, 0xc4, 0xcd, 0xca, 0x3c, 0x34, 0x65, 0x3e, 0xb0, 0xab, 0xcb, + 0x32, 0x35, 0xff, 0x58, 0x2e, 0x7f, 0x7f, 0x8c, 0x8e, 0x4e, 0x7e, 0x40, 0xf0, 0xc8, 0x17, 0xf3, + 0xd7, 0x9f, 0x73, 0xb2, 0xb7, 0xea, 0xff, 0x54, 0xbf, 0x5c, 0x53, 0xf4, 0xe5, 0x28, 0xe3, 0x05, + 0x42, 0xff, 0xfb, 0x1b, 0x62, 0x11, 0x34, 0x03, 0xc6, 0xcd, 0xbb, 0xd6, 0x4c, 0xb7, 0x68, 0x1c, + 0xca, 0x7f, 0x79, 0x4e, 0x9f, 0xac, 0x82, 0x7f, 0x21, 0x74, 0xb1, 0x6d, 0xc8, 0x8f, 0xff, 0x0e, + 0x00, 0x00, 0xff, 0xff, 0x7f, 0xac, 0x4e, 0x87, 0x8a, 0x07, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1p2beta1/geometry.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1p2beta1/geometry.pb.go new file mode 100644 index 000000000..470f3423f --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1p2beta1/geometry.pb.go @@ -0,0 +1,183 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/vision/v1p2beta1/geometry.proto + +/* +Package vision is a generated protocol buffer package. + +It is generated from these files: + google/cloud/vision/v1p2beta1/geometry.proto + google/cloud/vision/v1p2beta1/image_annotator.proto + google/cloud/vision/v1p2beta1/text_annotation.proto + google/cloud/vision/v1p2beta1/web_detection.proto + +It has these top-level messages: + Vertex + BoundingPoly + Position + Feature + ImageSource + Image + FaceAnnotation + LocationInfo + Property + EntityAnnotation + SafeSearchAnnotation + LatLongRect + ColorInfo + DominantColorsAnnotation + ImageProperties + CropHint + CropHintsAnnotation + CropHintsParams + WebDetectionParams + ImageContext + AnnotateImageRequest + ImageAnnotationContext + AnnotateImageResponse + BatchAnnotateImagesRequest + BatchAnnotateImagesResponse + AsyncAnnotateFileRequest + AsyncAnnotateFileResponse + AsyncBatchAnnotateFilesRequest + AsyncBatchAnnotateFilesResponse + InputConfig + OutputConfig + GcsSource + GcsDestination + OperationMetadata + TextAnnotation + Page + Block + Paragraph + Word + Symbol + WebDetection +*/ +package vision + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A vertex represents a 2D point in the image. +// NOTE: the vertex coordinates are in the same scale as the original image. +type Vertex struct { + // X coordinate. + X int32 `protobuf:"varint,1,opt,name=x" json:"x,omitempty"` + // Y coordinate. + Y int32 `protobuf:"varint,2,opt,name=y" json:"y,omitempty"` +} + +func (m *Vertex) Reset() { *m = Vertex{} } +func (m *Vertex) String() string { return proto.CompactTextString(m) } +func (*Vertex) ProtoMessage() {} +func (*Vertex) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Vertex) GetX() int32 { + if m != nil { + return m.X + } + return 0 +} + +func (m *Vertex) GetY() int32 { + if m != nil { + return m.Y + } + return 0 +} + +// A bounding polygon for the detected image annotation. +type BoundingPoly struct { + // The bounding polygon vertices. + Vertices []*Vertex `protobuf:"bytes,1,rep,name=vertices" json:"vertices,omitempty"` +} + +func (m *BoundingPoly) Reset() { *m = BoundingPoly{} } +func (m *BoundingPoly) String() string { return proto.CompactTextString(m) } +func (*BoundingPoly) ProtoMessage() {} +func (*BoundingPoly) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *BoundingPoly) GetVertices() []*Vertex { + if m != nil { + return m.Vertices + } + return nil +} + +// A 3D position in the image, used primarily for Face detection landmarks. +// A valid Position must have both x and y coordinates. +// The position coordinates are in the same scale as the original image. +type Position struct { + // X coordinate. + X float32 `protobuf:"fixed32,1,opt,name=x" json:"x,omitempty"` + // Y coordinate. + Y float32 `protobuf:"fixed32,2,opt,name=y" json:"y,omitempty"` + // Z coordinate (or depth). + Z float32 `protobuf:"fixed32,3,opt,name=z" json:"z,omitempty"` +} + +func (m *Position) Reset() { *m = Position{} } +func (m *Position) String() string { return proto.CompactTextString(m) } +func (*Position) ProtoMessage() {} +func (*Position) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Position) GetX() float32 { + if m != nil { + return m.X + } + return 0 +} + +func (m *Position) GetY() float32 { + if m != nil { + return m.Y + } + return 0 +} + +func (m *Position) GetZ() float32 { + if m != nil { + return m.Z + } + return 0 +} + +func init() { + proto.RegisterType((*Vertex)(nil), "google.cloud.vision.v1p2beta1.Vertex") + proto.RegisterType((*BoundingPoly)(nil), "google.cloud.vision.v1p2beta1.BoundingPoly") + proto.RegisterType((*Position)(nil), "google.cloud.vision.v1p2beta1.Position") +} + +func init() { proto.RegisterFile("google/cloud/vision/v1p2beta1/geometry.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 243 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x90, 0xb1, 0x4b, 0xc3, 0x40, + 0x14, 0x87, 0x79, 0x29, 0x96, 0x72, 0xd6, 0x25, 0x53, 0x16, 0xa1, 0x06, 0x85, 0x0e, 0x72, 0x47, + 0xab, 0x9b, 0x93, 0x71, 0x70, 0x8d, 0x19, 0x1c, 0xdc, 0xd2, 0xf4, 0xf1, 0x38, 0x48, 0xef, 0x85, + 0xcb, 0x35, 0xf4, 0x8a, 0x7f, 0xb8, 0xa3, 0xf4, 0xae, 0x54, 0x1c, 0xda, 0xf1, 0x77, 0xf7, 0x3d, + 0x3e, 0xf8, 0xc4, 0x23, 0x31, 0x53, 0x8b, 0xaa, 0x69, 0x79, 0xbb, 0x56, 0x83, 0xee, 0x35, 0x1b, + 0x35, 0x2c, 0xba, 0xe5, 0x0a, 0x5d, 0xbd, 0x50, 0x84, 0xbc, 0x41, 0x67, 0xbd, 0xec, 0x2c, 0x3b, + 0x4e, 0x6f, 0x23, 0x2d, 0x03, 0x2d, 0x23, 0x2d, 0x4f, 0x74, 0x7e, 0x2f, 0xc6, 0x9f, 0x68, 0x1d, + 0xee, 0xd2, 0xa9, 0x80, 0x5d, 0x06, 0x33, 0x98, 0x5f, 0x55, 0x10, 0x96, 0xcf, 0x92, 0xb8, 0x7c, + 0xfe, 0x21, 0xa6, 0x05, 0x6f, 0xcd, 0x5a, 0x1b, 0x2a, 0xb9, 0xf5, 0xe9, 0xab, 0x98, 0x0c, 0x68, + 0x9d, 0x6e, 0xb0, 0xcf, 0x60, 0x36, 0x9a, 0x5f, 0x2f, 0x1f, 0xe4, 0x45, 0x8f, 0x8c, 0x92, 0xea, + 0x74, 0x96, 0x3f, 0x8b, 0x49, 0xc9, 0xbd, 0x76, 0x9a, 0xcd, 0x9f, 0x3a, 0xf9, 0xa7, 0x4e, 0x2a, + 0xf0, 0x87, 0xb5, 0xcf, 0x46, 0x71, 0xed, 0x8b, 0x6f, 0x71, 0xd7, 0xf0, 0xe6, 0xb2, 0xab, 0xb8, + 0x79, 0x3f, 0x26, 0x28, 0x0f, 0x05, 0x4a, 0xf8, 0x7a, 0x3b, 0xf2, 0xc4, 0x6d, 0x6d, 0x48, 0xb2, + 0x25, 0x45, 0x68, 0x42, 0x1f, 0x15, 0xbf, 0xea, 0x4e, 0xf7, 0x67, 0x82, 0xbe, 0xc4, 0x87, 0x1f, + 0x80, 0xd5, 0x38, 0x9c, 0x3c, 0xfd, 0x06, 0x00, 0x00, 0xff, 0xff, 0x5a, 0xa2, 0xee, 0x2b, 0x82, + 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1p2beta1/image_annotator.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1p2beta1/image_annotator.pb.go new file mode 100644 index 000000000..e06fe9c7d --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1p2beta1/image_annotator.pb.go @@ -0,0 +1,1933 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/vision/v1p2beta1/image_annotator.proto + +package vision + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" +import google_type "google.golang.org/genproto/googleapis/type/color" +import google_type1 "google.golang.org/genproto/googleapis/type/latlng" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A bucketized representation of likelihood, which is intended to give clients +// highly stable results across model upgrades. +type Likelihood int32 + +const ( + // Unknown likelihood. + Likelihood_UNKNOWN Likelihood = 0 + // It is very unlikely that the image belongs to the specified vertical. + Likelihood_VERY_UNLIKELY Likelihood = 1 + // It is unlikely that the image belongs to the specified vertical. + Likelihood_UNLIKELY Likelihood = 2 + // It is possible that the image belongs to the specified vertical. + Likelihood_POSSIBLE Likelihood = 3 + // It is likely that the image belongs to the specified vertical. + Likelihood_LIKELY Likelihood = 4 + // It is very likely that the image belongs to the specified vertical. + Likelihood_VERY_LIKELY Likelihood = 5 +) + +var Likelihood_name = map[int32]string{ + 0: "UNKNOWN", + 1: "VERY_UNLIKELY", + 2: "UNLIKELY", + 3: "POSSIBLE", + 4: "LIKELY", + 5: "VERY_LIKELY", +} +var Likelihood_value = map[string]int32{ + "UNKNOWN": 0, + "VERY_UNLIKELY": 1, + "UNLIKELY": 2, + "POSSIBLE": 3, + "LIKELY": 4, + "VERY_LIKELY": 5, +} + +func (x Likelihood) String() string { + return proto.EnumName(Likelihood_name, int32(x)) +} +func (Likelihood) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +// Type of Google Cloud Vision API feature to be extracted. +type Feature_Type int32 + +const ( + // Unspecified feature type. + Feature_TYPE_UNSPECIFIED Feature_Type = 0 + // Run face detection. + Feature_FACE_DETECTION Feature_Type = 1 + // Run landmark detection. + Feature_LANDMARK_DETECTION Feature_Type = 2 + // Run logo detection. + Feature_LOGO_DETECTION Feature_Type = 3 + // Run label detection. + Feature_LABEL_DETECTION Feature_Type = 4 + // Run text detection / optical character recognition (OCR). Text detection + // is optimized for areas of text within a larger image; if the image is + // a document, use `DOCUMENT_TEXT_DETECTION` instead. + Feature_TEXT_DETECTION Feature_Type = 5 + // Run dense text document OCR. Takes precedence when both + // `DOCUMENT_TEXT_DETECTION` and `TEXT_DETECTION` are present. + Feature_DOCUMENT_TEXT_DETECTION Feature_Type = 11 + // Run Safe Search to detect potentially unsafe + // or undesirable content. + Feature_SAFE_SEARCH_DETECTION Feature_Type = 6 + // Compute a set of image properties, such as the + // image's dominant colors. + Feature_IMAGE_PROPERTIES Feature_Type = 7 + // Run crop hints. + Feature_CROP_HINTS Feature_Type = 9 + // Run web detection. + Feature_WEB_DETECTION Feature_Type = 10 +) + +var Feature_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "FACE_DETECTION", + 2: "LANDMARK_DETECTION", + 3: "LOGO_DETECTION", + 4: "LABEL_DETECTION", + 5: "TEXT_DETECTION", + 11: "DOCUMENT_TEXT_DETECTION", + 6: "SAFE_SEARCH_DETECTION", + 7: "IMAGE_PROPERTIES", + 9: "CROP_HINTS", + 10: "WEB_DETECTION", +} +var Feature_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "FACE_DETECTION": 1, + "LANDMARK_DETECTION": 2, + "LOGO_DETECTION": 3, + "LABEL_DETECTION": 4, + "TEXT_DETECTION": 5, + "DOCUMENT_TEXT_DETECTION": 11, + "SAFE_SEARCH_DETECTION": 6, + "IMAGE_PROPERTIES": 7, + "CROP_HINTS": 9, + "WEB_DETECTION": 10, +} + +func (x Feature_Type) String() string { + return proto.EnumName(Feature_Type_name, int32(x)) +} +func (Feature_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0, 0} } + +// Face landmark (feature) type. +// Left and right are defined from the vantage of the viewer of the image +// without considering mirror projections typical of photos. So, `LEFT_EYE`, +// typically, is the person's right eye. +type FaceAnnotation_Landmark_Type int32 + +const ( + // Unknown face landmark detected. Should not be filled. + FaceAnnotation_Landmark_UNKNOWN_LANDMARK FaceAnnotation_Landmark_Type = 0 + // Left eye. + FaceAnnotation_Landmark_LEFT_EYE FaceAnnotation_Landmark_Type = 1 + // Right eye. + FaceAnnotation_Landmark_RIGHT_EYE FaceAnnotation_Landmark_Type = 2 + // Left of left eyebrow. + FaceAnnotation_Landmark_LEFT_OF_LEFT_EYEBROW FaceAnnotation_Landmark_Type = 3 + // Right of left eyebrow. + FaceAnnotation_Landmark_RIGHT_OF_LEFT_EYEBROW FaceAnnotation_Landmark_Type = 4 + // Left of right eyebrow. + FaceAnnotation_Landmark_LEFT_OF_RIGHT_EYEBROW FaceAnnotation_Landmark_Type = 5 + // Right of right eyebrow. + FaceAnnotation_Landmark_RIGHT_OF_RIGHT_EYEBROW FaceAnnotation_Landmark_Type = 6 + // Midpoint between eyes. + FaceAnnotation_Landmark_MIDPOINT_BETWEEN_EYES FaceAnnotation_Landmark_Type = 7 + // Nose tip. + FaceAnnotation_Landmark_NOSE_TIP FaceAnnotation_Landmark_Type = 8 + // Upper lip. + FaceAnnotation_Landmark_UPPER_LIP FaceAnnotation_Landmark_Type = 9 + // Lower lip. + FaceAnnotation_Landmark_LOWER_LIP FaceAnnotation_Landmark_Type = 10 + // Mouth left. + FaceAnnotation_Landmark_MOUTH_LEFT FaceAnnotation_Landmark_Type = 11 + // Mouth right. + FaceAnnotation_Landmark_MOUTH_RIGHT FaceAnnotation_Landmark_Type = 12 + // Mouth center. + FaceAnnotation_Landmark_MOUTH_CENTER FaceAnnotation_Landmark_Type = 13 + // Nose, bottom right. + FaceAnnotation_Landmark_NOSE_BOTTOM_RIGHT FaceAnnotation_Landmark_Type = 14 + // Nose, bottom left. + FaceAnnotation_Landmark_NOSE_BOTTOM_LEFT FaceAnnotation_Landmark_Type = 15 + // Nose, bottom center. + FaceAnnotation_Landmark_NOSE_BOTTOM_CENTER FaceAnnotation_Landmark_Type = 16 + // Left eye, top boundary. + FaceAnnotation_Landmark_LEFT_EYE_TOP_BOUNDARY FaceAnnotation_Landmark_Type = 17 + // Left eye, right corner. + FaceAnnotation_Landmark_LEFT_EYE_RIGHT_CORNER FaceAnnotation_Landmark_Type = 18 + // Left eye, bottom boundary. + FaceAnnotation_Landmark_LEFT_EYE_BOTTOM_BOUNDARY FaceAnnotation_Landmark_Type = 19 + // Left eye, left corner. + FaceAnnotation_Landmark_LEFT_EYE_LEFT_CORNER FaceAnnotation_Landmark_Type = 20 + // Right eye, top boundary. + FaceAnnotation_Landmark_RIGHT_EYE_TOP_BOUNDARY FaceAnnotation_Landmark_Type = 21 + // Right eye, right corner. + FaceAnnotation_Landmark_RIGHT_EYE_RIGHT_CORNER FaceAnnotation_Landmark_Type = 22 + // Right eye, bottom boundary. + FaceAnnotation_Landmark_RIGHT_EYE_BOTTOM_BOUNDARY FaceAnnotation_Landmark_Type = 23 + // Right eye, left corner. + FaceAnnotation_Landmark_RIGHT_EYE_LEFT_CORNER FaceAnnotation_Landmark_Type = 24 + // Left eyebrow, upper midpoint. + FaceAnnotation_Landmark_LEFT_EYEBROW_UPPER_MIDPOINT FaceAnnotation_Landmark_Type = 25 + // Right eyebrow, upper midpoint. + FaceAnnotation_Landmark_RIGHT_EYEBROW_UPPER_MIDPOINT FaceAnnotation_Landmark_Type = 26 + // Left ear tragion. + FaceAnnotation_Landmark_LEFT_EAR_TRAGION FaceAnnotation_Landmark_Type = 27 + // Right ear tragion. + FaceAnnotation_Landmark_RIGHT_EAR_TRAGION FaceAnnotation_Landmark_Type = 28 + // Left eye pupil. + FaceAnnotation_Landmark_LEFT_EYE_PUPIL FaceAnnotation_Landmark_Type = 29 + // Right eye pupil. + FaceAnnotation_Landmark_RIGHT_EYE_PUPIL FaceAnnotation_Landmark_Type = 30 + // Forehead glabella. + FaceAnnotation_Landmark_FOREHEAD_GLABELLA FaceAnnotation_Landmark_Type = 31 + // Chin gnathion. + FaceAnnotation_Landmark_CHIN_GNATHION FaceAnnotation_Landmark_Type = 32 + // Chin left gonion. + FaceAnnotation_Landmark_CHIN_LEFT_GONION FaceAnnotation_Landmark_Type = 33 + // Chin right gonion. + FaceAnnotation_Landmark_CHIN_RIGHT_GONION FaceAnnotation_Landmark_Type = 34 +) + +var FaceAnnotation_Landmark_Type_name = map[int32]string{ + 0: "UNKNOWN_LANDMARK", + 1: "LEFT_EYE", + 2: "RIGHT_EYE", + 3: "LEFT_OF_LEFT_EYEBROW", + 4: "RIGHT_OF_LEFT_EYEBROW", + 5: "LEFT_OF_RIGHT_EYEBROW", + 6: "RIGHT_OF_RIGHT_EYEBROW", + 7: "MIDPOINT_BETWEEN_EYES", + 8: "NOSE_TIP", + 9: "UPPER_LIP", + 10: "LOWER_LIP", + 11: "MOUTH_LEFT", + 12: "MOUTH_RIGHT", + 13: "MOUTH_CENTER", + 14: "NOSE_BOTTOM_RIGHT", + 15: "NOSE_BOTTOM_LEFT", + 16: "NOSE_BOTTOM_CENTER", + 17: "LEFT_EYE_TOP_BOUNDARY", + 18: "LEFT_EYE_RIGHT_CORNER", + 19: "LEFT_EYE_BOTTOM_BOUNDARY", + 20: "LEFT_EYE_LEFT_CORNER", + 21: "RIGHT_EYE_TOP_BOUNDARY", + 22: "RIGHT_EYE_RIGHT_CORNER", + 23: "RIGHT_EYE_BOTTOM_BOUNDARY", + 24: "RIGHT_EYE_LEFT_CORNER", + 25: "LEFT_EYEBROW_UPPER_MIDPOINT", + 26: "RIGHT_EYEBROW_UPPER_MIDPOINT", + 27: "LEFT_EAR_TRAGION", + 28: "RIGHT_EAR_TRAGION", + 29: "LEFT_EYE_PUPIL", + 30: "RIGHT_EYE_PUPIL", + 31: "FOREHEAD_GLABELLA", + 32: "CHIN_GNATHION", + 33: "CHIN_LEFT_GONION", + 34: "CHIN_RIGHT_GONION", +} +var FaceAnnotation_Landmark_Type_value = map[string]int32{ + "UNKNOWN_LANDMARK": 0, + "LEFT_EYE": 1, + "RIGHT_EYE": 2, + "LEFT_OF_LEFT_EYEBROW": 3, + "RIGHT_OF_LEFT_EYEBROW": 4, + "LEFT_OF_RIGHT_EYEBROW": 5, + "RIGHT_OF_RIGHT_EYEBROW": 6, + "MIDPOINT_BETWEEN_EYES": 7, + "NOSE_TIP": 8, + "UPPER_LIP": 9, + "LOWER_LIP": 10, + "MOUTH_LEFT": 11, + "MOUTH_RIGHT": 12, + "MOUTH_CENTER": 13, + "NOSE_BOTTOM_RIGHT": 14, + "NOSE_BOTTOM_LEFT": 15, + "NOSE_BOTTOM_CENTER": 16, + "LEFT_EYE_TOP_BOUNDARY": 17, + "LEFT_EYE_RIGHT_CORNER": 18, + "LEFT_EYE_BOTTOM_BOUNDARY": 19, + "LEFT_EYE_LEFT_CORNER": 20, + "RIGHT_EYE_TOP_BOUNDARY": 21, + "RIGHT_EYE_RIGHT_CORNER": 22, + "RIGHT_EYE_BOTTOM_BOUNDARY": 23, + "RIGHT_EYE_LEFT_CORNER": 24, + "LEFT_EYEBROW_UPPER_MIDPOINT": 25, + "RIGHT_EYEBROW_UPPER_MIDPOINT": 26, + "LEFT_EAR_TRAGION": 27, + "RIGHT_EAR_TRAGION": 28, + "LEFT_EYE_PUPIL": 29, + "RIGHT_EYE_PUPIL": 30, + "FOREHEAD_GLABELLA": 31, + "CHIN_GNATHION": 32, + "CHIN_LEFT_GONION": 33, + "CHIN_RIGHT_GONION": 34, +} + +func (x FaceAnnotation_Landmark_Type) String() string { + return proto.EnumName(FaceAnnotation_Landmark_Type_name, int32(x)) +} +func (FaceAnnotation_Landmark_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor1, []int{3, 0, 0} +} + +// Batch operation states. +type OperationMetadata_State int32 + +const ( + // Invalid. + OperationMetadata_STATE_UNSPECIFIED OperationMetadata_State = 0 + // Request is received. + OperationMetadata_CREATED OperationMetadata_State = 1 + // Request is actively being processed. + OperationMetadata_RUNNING OperationMetadata_State = 2 + // The batch processing is done. + OperationMetadata_DONE OperationMetadata_State = 3 + // The batch processing was cancelled. + OperationMetadata_CANCELLED OperationMetadata_State = 4 +) + +var OperationMetadata_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "CREATED", + 2: "RUNNING", + 3: "DONE", + 4: "CANCELLED", +} +var OperationMetadata_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "CREATED": 1, + "RUNNING": 2, + "DONE": 3, + "CANCELLED": 4, +} + +func (x OperationMetadata_State) String() string { + return proto.EnumName(OperationMetadata_State_name, int32(x)) +} +func (OperationMetadata_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{30, 0} } + +// The type of Google Cloud Vision API detection to perform, and the maximum +// number of results to return for that type. Multiple `Feature` objects can +// be specified in the `features` list. +type Feature struct { + // The feature type. + Type Feature_Type `protobuf:"varint,1,opt,name=type,enum=google.cloud.vision.v1p2beta1.Feature_Type" json:"type,omitempty"` + // Maximum number of results of this type. Does not apply to + // `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`. + MaxResults int32 `protobuf:"varint,2,opt,name=max_results,json=maxResults" json:"max_results,omitempty"` + // Model to use for the feature. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + Model string `protobuf:"bytes,3,opt,name=model" json:"model,omitempty"` +} + +func (m *Feature) Reset() { *m = Feature{} } +func (m *Feature) String() string { return proto.CompactTextString(m) } +func (*Feature) ProtoMessage() {} +func (*Feature) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Feature) GetType() Feature_Type { + if m != nil { + return m.Type + } + return Feature_TYPE_UNSPECIFIED +} + +func (m *Feature) GetMaxResults() int32 { + if m != nil { + return m.MaxResults + } + return 0 +} + +func (m *Feature) GetModel() string { + if m != nil { + return m.Model + } + return "" +} + +// External image source (Google Cloud Storage or web URL image location). +type ImageSource struct { + // **Use `image_uri` instead.** + // + // The Google Cloud Storage URI of the form + // `gs://bucket_name/object_name`. Object versioning is not supported. See + // [Google Cloud Storage Request + // URIs](https://cloud.google.com/storage/docs/reference-uris) for more info. + GcsImageUri string `protobuf:"bytes,1,opt,name=gcs_image_uri,json=gcsImageUri" json:"gcs_image_uri,omitempty"` + // The URI of the source image. Can be either: + // + // 1. A Google Cloud Storage URI of the form + // `gs://bucket_name/object_name`. Object versioning is not supported. See + // [Google Cloud Storage Request + // URIs](https://cloud.google.com/storage/docs/reference-uris) for more + // info. + // + // 2. A publicly-accessible image HTTP/HTTPS URL. When fetching images from + // HTTP/HTTPS URLs, Google cannot guarantee that the request will be + // completed. Your request may fail if the specified host denies the + // request (e.g. due to request throttling or DOS prevention), or if Google + // throttles requests to the site for abuse prevention. You should not + // depend on externally-hosted images for production applications. + // + // When both `gcs_image_uri` and `image_uri` are specified, `image_uri` takes + // precedence. + ImageUri string `protobuf:"bytes,2,opt,name=image_uri,json=imageUri" json:"image_uri,omitempty"` +} + +func (m *ImageSource) Reset() { *m = ImageSource{} } +func (m *ImageSource) String() string { return proto.CompactTextString(m) } +func (*ImageSource) ProtoMessage() {} +func (*ImageSource) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *ImageSource) GetGcsImageUri() string { + if m != nil { + return m.GcsImageUri + } + return "" +} + +func (m *ImageSource) GetImageUri() string { + if m != nil { + return m.ImageUri + } + return "" +} + +// Client image to perform Google Cloud Vision API tasks over. +type Image struct { + // Image content, represented as a stream of bytes. + // Note: As with all `bytes` fields, protobuffers use a pure binary + // representation, whereas JSON representations use base64. + Content []byte `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + // Google Cloud Storage image location, or publicly-accessible image + // URL. If both `content` and `source` are provided for an image, `content` + // takes precedence and is used to perform the image annotation request. + Source *ImageSource `protobuf:"bytes,2,opt,name=source" json:"source,omitempty"` +} + +func (m *Image) Reset() { *m = Image{} } +func (m *Image) String() string { return proto.CompactTextString(m) } +func (*Image) ProtoMessage() {} +func (*Image) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *Image) GetContent() []byte { + if m != nil { + return m.Content + } + return nil +} + +func (m *Image) GetSource() *ImageSource { + if m != nil { + return m.Source + } + return nil +} + +// A face annotation object contains the results of face detection. +type FaceAnnotation struct { + // The bounding polygon around the face. The coordinates of the bounding box + // are in the original image's scale, as returned in `ImageParams`. + // The bounding box is computed to "frame" the face in accordance with human + // expectations. It is based on the landmarker results. + // Note that one or more x and/or y coordinates may not be generated in the + // `BoundingPoly` (the polygon will be unbounded) if only a partial face + // appears in the image to be annotated. + BoundingPoly *BoundingPoly `protobuf:"bytes,1,opt,name=bounding_poly,json=boundingPoly" json:"bounding_poly,omitempty"` + // The `fd_bounding_poly` bounding polygon is tighter than the + // `boundingPoly`, and encloses only the skin part of the face. Typically, it + // is used to eliminate the face from any image analysis that detects the + // "amount of skin" visible in an image. It is not based on the + // landmarker results, only on the initial face detection, hence + // the fd (face detection) prefix. + FdBoundingPoly *BoundingPoly `protobuf:"bytes,2,opt,name=fd_bounding_poly,json=fdBoundingPoly" json:"fd_bounding_poly,omitempty"` + // Detected face landmarks. + Landmarks []*FaceAnnotation_Landmark `protobuf:"bytes,3,rep,name=landmarks" json:"landmarks,omitempty"` + // Roll angle, which indicates the amount of clockwise/anti-clockwise rotation + // of the face relative to the image vertical about the axis perpendicular to + // the face. Range [-180,180]. + RollAngle float32 `protobuf:"fixed32,4,opt,name=roll_angle,json=rollAngle" json:"roll_angle,omitempty"` + // Yaw angle, which indicates the leftward/rightward angle that the face is + // pointing relative to the vertical plane perpendicular to the image. Range + // [-180,180]. + PanAngle float32 `protobuf:"fixed32,5,opt,name=pan_angle,json=panAngle" json:"pan_angle,omitempty"` + // Pitch angle, which indicates the upwards/downwards angle that the face is + // pointing relative to the image's horizontal plane. Range [-180,180]. + TiltAngle float32 `protobuf:"fixed32,6,opt,name=tilt_angle,json=tiltAngle" json:"tilt_angle,omitempty"` + // Detection confidence. Range [0, 1]. + DetectionConfidence float32 `protobuf:"fixed32,7,opt,name=detection_confidence,json=detectionConfidence" json:"detection_confidence,omitempty"` + // Face landmarking confidence. Range [0, 1]. + LandmarkingConfidence float32 `protobuf:"fixed32,8,opt,name=landmarking_confidence,json=landmarkingConfidence" json:"landmarking_confidence,omitempty"` + // Joy likelihood. + JoyLikelihood Likelihood `protobuf:"varint,9,opt,name=joy_likelihood,json=joyLikelihood,enum=google.cloud.vision.v1p2beta1.Likelihood" json:"joy_likelihood,omitempty"` + // Sorrow likelihood. + SorrowLikelihood Likelihood `protobuf:"varint,10,opt,name=sorrow_likelihood,json=sorrowLikelihood,enum=google.cloud.vision.v1p2beta1.Likelihood" json:"sorrow_likelihood,omitempty"` + // Anger likelihood. + AngerLikelihood Likelihood `protobuf:"varint,11,opt,name=anger_likelihood,json=angerLikelihood,enum=google.cloud.vision.v1p2beta1.Likelihood" json:"anger_likelihood,omitempty"` + // Surprise likelihood. + SurpriseLikelihood Likelihood `protobuf:"varint,12,opt,name=surprise_likelihood,json=surpriseLikelihood,enum=google.cloud.vision.v1p2beta1.Likelihood" json:"surprise_likelihood,omitempty"` + // Under-exposed likelihood. + UnderExposedLikelihood Likelihood `protobuf:"varint,13,opt,name=under_exposed_likelihood,json=underExposedLikelihood,enum=google.cloud.vision.v1p2beta1.Likelihood" json:"under_exposed_likelihood,omitempty"` + // Blurred likelihood. + BlurredLikelihood Likelihood `protobuf:"varint,14,opt,name=blurred_likelihood,json=blurredLikelihood,enum=google.cloud.vision.v1p2beta1.Likelihood" json:"blurred_likelihood,omitempty"` + // Headwear likelihood. + HeadwearLikelihood Likelihood `protobuf:"varint,15,opt,name=headwear_likelihood,json=headwearLikelihood,enum=google.cloud.vision.v1p2beta1.Likelihood" json:"headwear_likelihood,omitempty"` +} + +func (m *FaceAnnotation) Reset() { *m = FaceAnnotation{} } +func (m *FaceAnnotation) String() string { return proto.CompactTextString(m) } +func (*FaceAnnotation) ProtoMessage() {} +func (*FaceAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *FaceAnnotation) GetBoundingPoly() *BoundingPoly { + if m != nil { + return m.BoundingPoly + } + return nil +} + +func (m *FaceAnnotation) GetFdBoundingPoly() *BoundingPoly { + if m != nil { + return m.FdBoundingPoly + } + return nil +} + +func (m *FaceAnnotation) GetLandmarks() []*FaceAnnotation_Landmark { + if m != nil { + return m.Landmarks + } + return nil +} + +func (m *FaceAnnotation) GetRollAngle() float32 { + if m != nil { + return m.RollAngle + } + return 0 +} + +func (m *FaceAnnotation) GetPanAngle() float32 { + if m != nil { + return m.PanAngle + } + return 0 +} + +func (m *FaceAnnotation) GetTiltAngle() float32 { + if m != nil { + return m.TiltAngle + } + return 0 +} + +func (m *FaceAnnotation) GetDetectionConfidence() float32 { + if m != nil { + return m.DetectionConfidence + } + return 0 +} + +func (m *FaceAnnotation) GetLandmarkingConfidence() float32 { + if m != nil { + return m.LandmarkingConfidence + } + return 0 +} + +func (m *FaceAnnotation) GetJoyLikelihood() Likelihood { + if m != nil { + return m.JoyLikelihood + } + return Likelihood_UNKNOWN +} + +func (m *FaceAnnotation) GetSorrowLikelihood() Likelihood { + if m != nil { + return m.SorrowLikelihood + } + return Likelihood_UNKNOWN +} + +func (m *FaceAnnotation) GetAngerLikelihood() Likelihood { + if m != nil { + return m.AngerLikelihood + } + return Likelihood_UNKNOWN +} + +func (m *FaceAnnotation) GetSurpriseLikelihood() Likelihood { + if m != nil { + return m.SurpriseLikelihood + } + return Likelihood_UNKNOWN +} + +func (m *FaceAnnotation) GetUnderExposedLikelihood() Likelihood { + if m != nil { + return m.UnderExposedLikelihood + } + return Likelihood_UNKNOWN +} + +func (m *FaceAnnotation) GetBlurredLikelihood() Likelihood { + if m != nil { + return m.BlurredLikelihood + } + return Likelihood_UNKNOWN +} + +func (m *FaceAnnotation) GetHeadwearLikelihood() Likelihood { + if m != nil { + return m.HeadwearLikelihood + } + return Likelihood_UNKNOWN +} + +// A face-specific landmark (for example, a face feature). +type FaceAnnotation_Landmark struct { + // Face landmark type. + Type FaceAnnotation_Landmark_Type `protobuf:"varint,3,opt,name=type,enum=google.cloud.vision.v1p2beta1.FaceAnnotation_Landmark_Type" json:"type,omitempty"` + // Face landmark position. + Position *Position `protobuf:"bytes,4,opt,name=position" json:"position,omitempty"` +} + +func (m *FaceAnnotation_Landmark) Reset() { *m = FaceAnnotation_Landmark{} } +func (m *FaceAnnotation_Landmark) String() string { return proto.CompactTextString(m) } +func (*FaceAnnotation_Landmark) ProtoMessage() {} +func (*FaceAnnotation_Landmark) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3, 0} } + +func (m *FaceAnnotation_Landmark) GetType() FaceAnnotation_Landmark_Type { + if m != nil { + return m.Type + } + return FaceAnnotation_Landmark_UNKNOWN_LANDMARK +} + +func (m *FaceAnnotation_Landmark) GetPosition() *Position { + if m != nil { + return m.Position + } + return nil +} + +// Detected entity location information. +type LocationInfo struct { + // lat/long location coordinates. + LatLng *google_type1.LatLng `protobuf:"bytes,1,opt,name=lat_lng,json=latLng" json:"lat_lng,omitempty"` +} + +func (m *LocationInfo) Reset() { *m = LocationInfo{} } +func (m *LocationInfo) String() string { return proto.CompactTextString(m) } +func (*LocationInfo) ProtoMessage() {} +func (*LocationInfo) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *LocationInfo) GetLatLng() *google_type1.LatLng { + if m != nil { + return m.LatLng + } + return nil +} + +// A `Property` consists of a user-supplied name/value pair. +type Property struct { + // Name of the property. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Value of the property. + Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + // Value of numeric properties. + Uint64Value uint64 `protobuf:"varint,3,opt,name=uint64_value,json=uint64Value" json:"uint64_value,omitempty"` +} + +func (m *Property) Reset() { *m = Property{} } +func (m *Property) String() string { return proto.CompactTextString(m) } +func (*Property) ProtoMessage() {} +func (*Property) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +func (m *Property) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Property) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *Property) GetUint64Value() uint64 { + if m != nil { + return m.Uint64Value + } + return 0 +} + +// Set of detected entity features. +type EntityAnnotation struct { + // Opaque entity ID. Some IDs may be available in + // [Google Knowledge Graph Search + // API](https://developers.google.com/knowledge-graph/). + Mid string `protobuf:"bytes,1,opt,name=mid" json:"mid,omitempty"` + // The language code for the locale in which the entity textual + // `description` is expressed. + Locale string `protobuf:"bytes,2,opt,name=locale" json:"locale,omitempty"` + // Entity textual description, expressed in its `locale` language. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // Overall score of the result. Range [0, 1]. + Score float32 `protobuf:"fixed32,4,opt,name=score" json:"score,omitempty"` + // **Deprecated. Use `score` instead.** + // The accuracy of the entity detection in an image. + // For example, for an image in which the "Eiffel Tower" entity is detected, + // this field represents the confidence that there is a tower in the query + // image. Range [0, 1]. + Confidence float32 `protobuf:"fixed32,5,opt,name=confidence" json:"confidence,omitempty"` + // The relevancy of the ICA (Image Content Annotation) label to the + // image. For example, the relevancy of "tower" is likely higher to an image + // containing the detected "Eiffel Tower" than to an image containing a + // detected distant towering building, even though the confidence that + // there is a tower in each image may be the same. Range [0, 1]. + Topicality float32 `protobuf:"fixed32,6,opt,name=topicality" json:"topicality,omitempty"` + // Image region to which this entity belongs. Not produced + // for `LABEL_DETECTION` features. + BoundingPoly *BoundingPoly `protobuf:"bytes,7,opt,name=bounding_poly,json=boundingPoly" json:"bounding_poly,omitempty"` + // The location information for the detected entity. Multiple + // `LocationInfo` elements can be present because one location may + // indicate the location of the scene in the image, and another location + // may indicate the location of the place where the image was taken. + // Location information is usually present for landmarks. + Locations []*LocationInfo `protobuf:"bytes,8,rep,name=locations" json:"locations,omitempty"` + // Some entities may have optional user-supplied `Property` (name/value) + // fields, such a score or string that qualifies the entity. + Properties []*Property `protobuf:"bytes,9,rep,name=properties" json:"properties,omitempty"` +} + +func (m *EntityAnnotation) Reset() { *m = EntityAnnotation{} } +func (m *EntityAnnotation) String() string { return proto.CompactTextString(m) } +func (*EntityAnnotation) ProtoMessage() {} +func (*EntityAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +func (m *EntityAnnotation) GetMid() string { + if m != nil { + return m.Mid + } + return "" +} + +func (m *EntityAnnotation) GetLocale() string { + if m != nil { + return m.Locale + } + return "" +} + +func (m *EntityAnnotation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *EntityAnnotation) GetScore() float32 { + if m != nil { + return m.Score + } + return 0 +} + +func (m *EntityAnnotation) GetConfidence() float32 { + if m != nil { + return m.Confidence + } + return 0 +} + +func (m *EntityAnnotation) GetTopicality() float32 { + if m != nil { + return m.Topicality + } + return 0 +} + +func (m *EntityAnnotation) GetBoundingPoly() *BoundingPoly { + if m != nil { + return m.BoundingPoly + } + return nil +} + +func (m *EntityAnnotation) GetLocations() []*LocationInfo { + if m != nil { + return m.Locations + } + return nil +} + +func (m *EntityAnnotation) GetProperties() []*Property { + if m != nil { + return m.Properties + } + return nil +} + +// Set of features pertaining to the image, computed by computer vision +// methods over safe-search verticals (for example, adult, spoof, medical, +// violence). +type SafeSearchAnnotation struct { + // Represents the adult content likelihood for the image. Adult content may + // contain elements such as nudity, pornographic images or cartoons, or + // sexual activities. + Adult Likelihood `protobuf:"varint,1,opt,name=adult,enum=google.cloud.vision.v1p2beta1.Likelihood" json:"adult,omitempty"` + // Spoof likelihood. The likelihood that an modification + // was made to the image's canonical version to make it appear + // funny or offensive. + Spoof Likelihood `protobuf:"varint,2,opt,name=spoof,enum=google.cloud.vision.v1p2beta1.Likelihood" json:"spoof,omitempty"` + // Likelihood that this is a medical image. + Medical Likelihood `protobuf:"varint,3,opt,name=medical,enum=google.cloud.vision.v1p2beta1.Likelihood" json:"medical,omitempty"` + // Likelihood that this image contains violent content. + Violence Likelihood `protobuf:"varint,4,opt,name=violence,enum=google.cloud.vision.v1p2beta1.Likelihood" json:"violence,omitempty"` + // Likelihood that the request image contains racy content. Racy content may + // include (but is not limited to) skimpy or sheer clothing, strategically + // covered nudity, lewd or provocative poses, or close-ups of sensitive + // body areas. + Racy Likelihood `protobuf:"varint,9,opt,name=racy,enum=google.cloud.vision.v1p2beta1.Likelihood" json:"racy,omitempty"` +} + +func (m *SafeSearchAnnotation) Reset() { *m = SafeSearchAnnotation{} } +func (m *SafeSearchAnnotation) String() string { return proto.CompactTextString(m) } +func (*SafeSearchAnnotation) ProtoMessage() {} +func (*SafeSearchAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } + +func (m *SafeSearchAnnotation) GetAdult() Likelihood { + if m != nil { + return m.Adult + } + return Likelihood_UNKNOWN +} + +func (m *SafeSearchAnnotation) GetSpoof() Likelihood { + if m != nil { + return m.Spoof + } + return Likelihood_UNKNOWN +} + +func (m *SafeSearchAnnotation) GetMedical() Likelihood { + if m != nil { + return m.Medical + } + return Likelihood_UNKNOWN +} + +func (m *SafeSearchAnnotation) GetViolence() Likelihood { + if m != nil { + return m.Violence + } + return Likelihood_UNKNOWN +} + +func (m *SafeSearchAnnotation) GetRacy() Likelihood { + if m != nil { + return m.Racy + } + return Likelihood_UNKNOWN +} + +// Rectangle determined by min and max `LatLng` pairs. +type LatLongRect struct { + // Min lat/long pair. + MinLatLng *google_type1.LatLng `protobuf:"bytes,1,opt,name=min_lat_lng,json=minLatLng" json:"min_lat_lng,omitempty"` + // Max lat/long pair. + MaxLatLng *google_type1.LatLng `protobuf:"bytes,2,opt,name=max_lat_lng,json=maxLatLng" json:"max_lat_lng,omitempty"` +} + +func (m *LatLongRect) Reset() { *m = LatLongRect{} } +func (m *LatLongRect) String() string { return proto.CompactTextString(m) } +func (*LatLongRect) ProtoMessage() {} +func (*LatLongRect) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } + +func (m *LatLongRect) GetMinLatLng() *google_type1.LatLng { + if m != nil { + return m.MinLatLng + } + return nil +} + +func (m *LatLongRect) GetMaxLatLng() *google_type1.LatLng { + if m != nil { + return m.MaxLatLng + } + return nil +} + +// Color information consists of RGB channels, score, and the fraction of +// the image that the color occupies in the image. +type ColorInfo struct { + // RGB components of the color. + Color *google_type.Color `protobuf:"bytes,1,opt,name=color" json:"color,omitempty"` + // Image-specific score for this color. Value in range [0, 1]. + Score float32 `protobuf:"fixed32,2,opt,name=score" json:"score,omitempty"` + // The fraction of pixels the color occupies in the image. + // Value in range [0, 1]. + PixelFraction float32 `protobuf:"fixed32,3,opt,name=pixel_fraction,json=pixelFraction" json:"pixel_fraction,omitempty"` +} + +func (m *ColorInfo) Reset() { *m = ColorInfo{} } +func (m *ColorInfo) String() string { return proto.CompactTextString(m) } +func (*ColorInfo) ProtoMessage() {} +func (*ColorInfo) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } + +func (m *ColorInfo) GetColor() *google_type.Color { + if m != nil { + return m.Color + } + return nil +} + +func (m *ColorInfo) GetScore() float32 { + if m != nil { + return m.Score + } + return 0 +} + +func (m *ColorInfo) GetPixelFraction() float32 { + if m != nil { + return m.PixelFraction + } + return 0 +} + +// Set of dominant colors and their corresponding scores. +type DominantColorsAnnotation struct { + // RGB color values with their score and pixel fraction. + Colors []*ColorInfo `protobuf:"bytes,1,rep,name=colors" json:"colors,omitempty"` +} + +func (m *DominantColorsAnnotation) Reset() { *m = DominantColorsAnnotation{} } +func (m *DominantColorsAnnotation) String() string { return proto.CompactTextString(m) } +func (*DominantColorsAnnotation) ProtoMessage() {} +func (*DominantColorsAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } + +func (m *DominantColorsAnnotation) GetColors() []*ColorInfo { + if m != nil { + return m.Colors + } + return nil +} + +// Stores image properties, such as dominant colors. +type ImageProperties struct { + // If present, dominant colors completed successfully. + DominantColors *DominantColorsAnnotation `protobuf:"bytes,1,opt,name=dominant_colors,json=dominantColors" json:"dominant_colors,omitempty"` +} + +func (m *ImageProperties) Reset() { *m = ImageProperties{} } +func (m *ImageProperties) String() string { return proto.CompactTextString(m) } +func (*ImageProperties) ProtoMessage() {} +func (*ImageProperties) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11} } + +func (m *ImageProperties) GetDominantColors() *DominantColorsAnnotation { + if m != nil { + return m.DominantColors + } + return nil +} + +// Single crop hint that is used to generate a new crop when serving an image. +type CropHint struct { + // The bounding polygon for the crop region. The coordinates of the bounding + // box are in the original image's scale, as returned in `ImageParams`. + BoundingPoly *BoundingPoly `protobuf:"bytes,1,opt,name=bounding_poly,json=boundingPoly" json:"bounding_poly,omitempty"` + // Confidence of this being a salient region. Range [0, 1]. + Confidence float32 `protobuf:"fixed32,2,opt,name=confidence" json:"confidence,omitempty"` + // Fraction of importance of this salient region with respect to the original + // image. + ImportanceFraction float32 `protobuf:"fixed32,3,opt,name=importance_fraction,json=importanceFraction" json:"importance_fraction,omitempty"` +} + +func (m *CropHint) Reset() { *m = CropHint{} } +func (m *CropHint) String() string { return proto.CompactTextString(m) } +func (*CropHint) ProtoMessage() {} +func (*CropHint) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} } + +func (m *CropHint) GetBoundingPoly() *BoundingPoly { + if m != nil { + return m.BoundingPoly + } + return nil +} + +func (m *CropHint) GetConfidence() float32 { + if m != nil { + return m.Confidence + } + return 0 +} + +func (m *CropHint) GetImportanceFraction() float32 { + if m != nil { + return m.ImportanceFraction + } + return 0 +} + +// Set of crop hints that are used to generate new crops when serving images. +type CropHintsAnnotation struct { + // Crop hint results. + CropHints []*CropHint `protobuf:"bytes,1,rep,name=crop_hints,json=cropHints" json:"crop_hints,omitempty"` +} + +func (m *CropHintsAnnotation) Reset() { *m = CropHintsAnnotation{} } +func (m *CropHintsAnnotation) String() string { return proto.CompactTextString(m) } +func (*CropHintsAnnotation) ProtoMessage() {} +func (*CropHintsAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{13} } + +func (m *CropHintsAnnotation) GetCropHints() []*CropHint { + if m != nil { + return m.CropHints + } + return nil +} + +// Parameters for crop hints annotation request. +type CropHintsParams struct { + // Aspect ratios in floats, representing the ratio of the width to the height + // of the image. For example, if the desired aspect ratio is 4/3, the + // corresponding float value should be 1.33333. If not specified, the + // best possible crop is returned. The number of provided aspect ratios is + // limited to a maximum of 16; any aspect ratios provided after the 16th are + // ignored. + AspectRatios []float32 `protobuf:"fixed32,1,rep,packed,name=aspect_ratios,json=aspectRatios" json:"aspect_ratios,omitempty"` +} + +func (m *CropHintsParams) Reset() { *m = CropHintsParams{} } +func (m *CropHintsParams) String() string { return proto.CompactTextString(m) } +func (*CropHintsParams) ProtoMessage() {} +func (*CropHintsParams) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{14} } + +func (m *CropHintsParams) GetAspectRatios() []float32 { + if m != nil { + return m.AspectRatios + } + return nil +} + +// Parameters for web detection request. +type WebDetectionParams struct { + // Whether to include results derived from the geo information in the image. + IncludeGeoResults bool `protobuf:"varint,2,opt,name=include_geo_results,json=includeGeoResults" json:"include_geo_results,omitempty"` +} + +func (m *WebDetectionParams) Reset() { *m = WebDetectionParams{} } +func (m *WebDetectionParams) String() string { return proto.CompactTextString(m) } +func (*WebDetectionParams) ProtoMessage() {} +func (*WebDetectionParams) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{15} } + +func (m *WebDetectionParams) GetIncludeGeoResults() bool { + if m != nil { + return m.IncludeGeoResults + } + return false +} + +// Image context and/or feature-specific parameters. +type ImageContext struct { + // lat/long rectangle that specifies the location of the image. + LatLongRect *LatLongRect `protobuf:"bytes,1,opt,name=lat_long_rect,json=latLongRect" json:"lat_long_rect,omitempty"` + // List of languages to use for TEXT_DETECTION. In most cases, an empty value + // yields the best results since it enables automatic language detection. For + // languages based on the Latin alphabet, setting `language_hints` is not + // needed. In rare cases, when the language of the text in the image is known, + // setting a hint will help get better results (although it will be a + // significant hindrance if the hint is wrong). Text detection returns an + // error if one or more of the specified languages is not one of the + // [supported languages](/vision/docs/languages). + LanguageHints []string `protobuf:"bytes,2,rep,name=language_hints,json=languageHints" json:"language_hints,omitempty"` + // Parameters for crop hints annotation request. + CropHintsParams *CropHintsParams `protobuf:"bytes,4,opt,name=crop_hints_params,json=cropHintsParams" json:"crop_hints_params,omitempty"` + // Parameters for web detection. + WebDetectionParams *WebDetectionParams `protobuf:"bytes,6,opt,name=web_detection_params,json=webDetectionParams" json:"web_detection_params,omitempty"` +} + +func (m *ImageContext) Reset() { *m = ImageContext{} } +func (m *ImageContext) String() string { return proto.CompactTextString(m) } +func (*ImageContext) ProtoMessage() {} +func (*ImageContext) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{16} } + +func (m *ImageContext) GetLatLongRect() *LatLongRect { + if m != nil { + return m.LatLongRect + } + return nil +} + +func (m *ImageContext) GetLanguageHints() []string { + if m != nil { + return m.LanguageHints + } + return nil +} + +func (m *ImageContext) GetCropHintsParams() *CropHintsParams { + if m != nil { + return m.CropHintsParams + } + return nil +} + +func (m *ImageContext) GetWebDetectionParams() *WebDetectionParams { + if m != nil { + return m.WebDetectionParams + } + return nil +} + +// Request for performing Google Cloud Vision API tasks over a user-provided +// image, with user-requested features. +type AnnotateImageRequest struct { + // The image to be processed. + Image *Image `protobuf:"bytes,1,opt,name=image" json:"image,omitempty"` + // Requested features. + Features []*Feature `protobuf:"bytes,2,rep,name=features" json:"features,omitempty"` + // Additional context that may accompany the image. + ImageContext *ImageContext `protobuf:"bytes,3,opt,name=image_context,json=imageContext" json:"image_context,omitempty"` +} + +func (m *AnnotateImageRequest) Reset() { *m = AnnotateImageRequest{} } +func (m *AnnotateImageRequest) String() string { return proto.CompactTextString(m) } +func (*AnnotateImageRequest) ProtoMessage() {} +func (*AnnotateImageRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{17} } + +func (m *AnnotateImageRequest) GetImage() *Image { + if m != nil { + return m.Image + } + return nil +} + +func (m *AnnotateImageRequest) GetFeatures() []*Feature { + if m != nil { + return m.Features + } + return nil +} + +func (m *AnnotateImageRequest) GetImageContext() *ImageContext { + if m != nil { + return m.ImageContext + } + return nil +} + +// If an image was produced from a file (e.g. a PDF), this message gives +// information about the source of that image. +type ImageAnnotationContext struct { + // The URI of the file used to produce the image. + Uri string `protobuf:"bytes,1,opt,name=uri" json:"uri,omitempty"` + // If the file was a PDF or TIFF, this field gives the page number within + // the file used to produce the image. + PageNumber int32 `protobuf:"varint,2,opt,name=page_number,json=pageNumber" json:"page_number,omitempty"` +} + +func (m *ImageAnnotationContext) Reset() { *m = ImageAnnotationContext{} } +func (m *ImageAnnotationContext) String() string { return proto.CompactTextString(m) } +func (*ImageAnnotationContext) ProtoMessage() {} +func (*ImageAnnotationContext) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{18} } + +func (m *ImageAnnotationContext) GetUri() string { + if m != nil { + return m.Uri + } + return "" +} + +func (m *ImageAnnotationContext) GetPageNumber() int32 { + if m != nil { + return m.PageNumber + } + return 0 +} + +// Response to an image annotation request. +type AnnotateImageResponse struct { + // If present, face detection has completed successfully. + FaceAnnotations []*FaceAnnotation `protobuf:"bytes,1,rep,name=face_annotations,json=faceAnnotations" json:"face_annotations,omitempty"` + // If present, landmark detection has completed successfully. + LandmarkAnnotations []*EntityAnnotation `protobuf:"bytes,2,rep,name=landmark_annotations,json=landmarkAnnotations" json:"landmark_annotations,omitempty"` + // If present, logo detection has completed successfully. + LogoAnnotations []*EntityAnnotation `protobuf:"bytes,3,rep,name=logo_annotations,json=logoAnnotations" json:"logo_annotations,omitempty"` + // If present, label detection has completed successfully. + LabelAnnotations []*EntityAnnotation `protobuf:"bytes,4,rep,name=label_annotations,json=labelAnnotations" json:"label_annotations,omitempty"` + // If present, text (OCR) detection has completed successfully. + TextAnnotations []*EntityAnnotation `protobuf:"bytes,5,rep,name=text_annotations,json=textAnnotations" json:"text_annotations,omitempty"` + // If present, text (OCR) detection or document (OCR) text detection has + // completed successfully. + // This annotation provides the structural hierarchy for the OCR detected + // text. + FullTextAnnotation *TextAnnotation `protobuf:"bytes,12,opt,name=full_text_annotation,json=fullTextAnnotation" json:"full_text_annotation,omitempty"` + // If present, safe-search annotation has completed successfully. + SafeSearchAnnotation *SafeSearchAnnotation `protobuf:"bytes,6,opt,name=safe_search_annotation,json=safeSearchAnnotation" json:"safe_search_annotation,omitempty"` + // If present, image properties were extracted successfully. + ImagePropertiesAnnotation *ImageProperties `protobuf:"bytes,8,opt,name=image_properties_annotation,json=imagePropertiesAnnotation" json:"image_properties_annotation,omitempty"` + // If present, crop hints have completed successfully. + CropHintsAnnotation *CropHintsAnnotation `protobuf:"bytes,11,opt,name=crop_hints_annotation,json=cropHintsAnnotation" json:"crop_hints_annotation,omitempty"` + // If present, web detection has completed successfully. + WebDetection *WebDetection `protobuf:"bytes,13,opt,name=web_detection,json=webDetection" json:"web_detection,omitempty"` + // If set, represents the error message for the operation. + // Note that filled-in image annotations are guaranteed to be + // correct, even when `error` is set. + Error *google_rpc.Status `protobuf:"bytes,9,opt,name=error" json:"error,omitempty"` + // If present, contextual information is needed to understand where this image + // comes from. + Context *ImageAnnotationContext `protobuf:"bytes,21,opt,name=context" json:"context,omitempty"` +} + +func (m *AnnotateImageResponse) Reset() { *m = AnnotateImageResponse{} } +func (m *AnnotateImageResponse) String() string { return proto.CompactTextString(m) } +func (*AnnotateImageResponse) ProtoMessage() {} +func (*AnnotateImageResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{19} } + +func (m *AnnotateImageResponse) GetFaceAnnotations() []*FaceAnnotation { + if m != nil { + return m.FaceAnnotations + } + return nil +} + +func (m *AnnotateImageResponse) GetLandmarkAnnotations() []*EntityAnnotation { + if m != nil { + return m.LandmarkAnnotations + } + return nil +} + +func (m *AnnotateImageResponse) GetLogoAnnotations() []*EntityAnnotation { + if m != nil { + return m.LogoAnnotations + } + return nil +} + +func (m *AnnotateImageResponse) GetLabelAnnotations() []*EntityAnnotation { + if m != nil { + return m.LabelAnnotations + } + return nil +} + +func (m *AnnotateImageResponse) GetTextAnnotations() []*EntityAnnotation { + if m != nil { + return m.TextAnnotations + } + return nil +} + +func (m *AnnotateImageResponse) GetFullTextAnnotation() *TextAnnotation { + if m != nil { + return m.FullTextAnnotation + } + return nil +} + +func (m *AnnotateImageResponse) GetSafeSearchAnnotation() *SafeSearchAnnotation { + if m != nil { + return m.SafeSearchAnnotation + } + return nil +} + +func (m *AnnotateImageResponse) GetImagePropertiesAnnotation() *ImageProperties { + if m != nil { + return m.ImagePropertiesAnnotation + } + return nil +} + +func (m *AnnotateImageResponse) GetCropHintsAnnotation() *CropHintsAnnotation { + if m != nil { + return m.CropHintsAnnotation + } + return nil +} + +func (m *AnnotateImageResponse) GetWebDetection() *WebDetection { + if m != nil { + return m.WebDetection + } + return nil +} + +func (m *AnnotateImageResponse) GetError() *google_rpc.Status { + if m != nil { + return m.Error + } + return nil +} + +func (m *AnnotateImageResponse) GetContext() *ImageAnnotationContext { + if m != nil { + return m.Context + } + return nil +} + +// Multiple image annotation requests are batched into a single service call. +type BatchAnnotateImagesRequest struct { + // Individual image annotation requests for this batch. + Requests []*AnnotateImageRequest `protobuf:"bytes,1,rep,name=requests" json:"requests,omitempty"` +} + +func (m *BatchAnnotateImagesRequest) Reset() { *m = BatchAnnotateImagesRequest{} } +func (m *BatchAnnotateImagesRequest) String() string { return proto.CompactTextString(m) } +func (*BatchAnnotateImagesRequest) ProtoMessage() {} +func (*BatchAnnotateImagesRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{20} } + +func (m *BatchAnnotateImagesRequest) GetRequests() []*AnnotateImageRequest { + if m != nil { + return m.Requests + } + return nil +} + +// Response to a batch image annotation request. +type BatchAnnotateImagesResponse struct { + // Individual responses to image annotation requests within the batch. + Responses []*AnnotateImageResponse `protobuf:"bytes,1,rep,name=responses" json:"responses,omitempty"` +} + +func (m *BatchAnnotateImagesResponse) Reset() { *m = BatchAnnotateImagesResponse{} } +func (m *BatchAnnotateImagesResponse) String() string { return proto.CompactTextString(m) } +func (*BatchAnnotateImagesResponse) ProtoMessage() {} +func (*BatchAnnotateImagesResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{21} } + +func (m *BatchAnnotateImagesResponse) GetResponses() []*AnnotateImageResponse { + if m != nil { + return m.Responses + } + return nil +} + +// An offline file annotation request. +type AsyncAnnotateFileRequest struct { + // Required. Information about the input file. + InputConfig *InputConfig `protobuf:"bytes,1,opt,name=input_config,json=inputConfig" json:"input_config,omitempty"` + // Required. Requested features. + Features []*Feature `protobuf:"bytes,2,rep,name=features" json:"features,omitempty"` + // Additional context that may accompany the image(s) in the file. + ImageContext *ImageContext `protobuf:"bytes,3,opt,name=image_context,json=imageContext" json:"image_context,omitempty"` + // Required. The desired output location and metadata (e.g. format). + OutputConfig *OutputConfig `protobuf:"bytes,4,opt,name=output_config,json=outputConfig" json:"output_config,omitempty"` +} + +func (m *AsyncAnnotateFileRequest) Reset() { *m = AsyncAnnotateFileRequest{} } +func (m *AsyncAnnotateFileRequest) String() string { return proto.CompactTextString(m) } +func (*AsyncAnnotateFileRequest) ProtoMessage() {} +func (*AsyncAnnotateFileRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{22} } + +func (m *AsyncAnnotateFileRequest) GetInputConfig() *InputConfig { + if m != nil { + return m.InputConfig + } + return nil +} + +func (m *AsyncAnnotateFileRequest) GetFeatures() []*Feature { + if m != nil { + return m.Features + } + return nil +} + +func (m *AsyncAnnotateFileRequest) GetImageContext() *ImageContext { + if m != nil { + return m.ImageContext + } + return nil +} + +func (m *AsyncAnnotateFileRequest) GetOutputConfig() *OutputConfig { + if m != nil { + return m.OutputConfig + } + return nil +} + +// The response for a single offline file annotation request. +type AsyncAnnotateFileResponse struct { + // The output location and metadata from AsyncAnnotateFileRequest. + OutputConfig *OutputConfig `protobuf:"bytes,1,opt,name=output_config,json=outputConfig" json:"output_config,omitempty"` +} + +func (m *AsyncAnnotateFileResponse) Reset() { *m = AsyncAnnotateFileResponse{} } +func (m *AsyncAnnotateFileResponse) String() string { return proto.CompactTextString(m) } +func (*AsyncAnnotateFileResponse) ProtoMessage() {} +func (*AsyncAnnotateFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{23} } + +func (m *AsyncAnnotateFileResponse) GetOutputConfig() *OutputConfig { + if m != nil { + return m.OutputConfig + } + return nil +} + +// Multiple async file annotation requests are batched into a single service +// call. +type AsyncBatchAnnotateFilesRequest struct { + // Individual async file annotation requests for this batch. + Requests []*AsyncAnnotateFileRequest `protobuf:"bytes,1,rep,name=requests" json:"requests,omitempty"` +} + +func (m *AsyncBatchAnnotateFilesRequest) Reset() { *m = AsyncBatchAnnotateFilesRequest{} } +func (m *AsyncBatchAnnotateFilesRequest) String() string { return proto.CompactTextString(m) } +func (*AsyncBatchAnnotateFilesRequest) ProtoMessage() {} +func (*AsyncBatchAnnotateFilesRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{24} } + +func (m *AsyncBatchAnnotateFilesRequest) GetRequests() []*AsyncAnnotateFileRequest { + if m != nil { + return m.Requests + } + return nil +} + +// Response to an async batch file annotation request. +type AsyncBatchAnnotateFilesResponse struct { + // The list of file annotation responses, one for each request in + // AsyncBatchAnnotateFilesRequest. + Responses []*AsyncAnnotateFileResponse `protobuf:"bytes,1,rep,name=responses" json:"responses,omitempty"` +} + +func (m *AsyncBatchAnnotateFilesResponse) Reset() { *m = AsyncBatchAnnotateFilesResponse{} } +func (m *AsyncBatchAnnotateFilesResponse) String() string { return proto.CompactTextString(m) } +func (*AsyncBatchAnnotateFilesResponse) ProtoMessage() {} +func (*AsyncBatchAnnotateFilesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor1, []int{25} +} + +func (m *AsyncBatchAnnotateFilesResponse) GetResponses() []*AsyncAnnotateFileResponse { + if m != nil { + return m.Responses + } + return nil +} + +// The desired input location and metadata. +type InputConfig struct { + // The Google Cloud Storage location to read the input from. + GcsSource *GcsSource `protobuf:"bytes,1,opt,name=gcs_source,json=gcsSource" json:"gcs_source,omitempty"` + // The type of the file. Currently only "application/pdf" and "image/tiff" + // are supported. Wildcards are not supported. + MimeType string `protobuf:"bytes,2,opt,name=mime_type,json=mimeType" json:"mime_type,omitempty"` +} + +func (m *InputConfig) Reset() { *m = InputConfig{} } +func (m *InputConfig) String() string { return proto.CompactTextString(m) } +func (*InputConfig) ProtoMessage() {} +func (*InputConfig) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{26} } + +func (m *InputConfig) GetGcsSource() *GcsSource { + if m != nil { + return m.GcsSource + } + return nil +} + +func (m *InputConfig) GetMimeType() string { + if m != nil { + return m.MimeType + } + return "" +} + +// The desired output location and metadata. +type OutputConfig struct { + // The Google Cloud Storage location to write the output(s) to. + GcsDestination *GcsDestination `protobuf:"bytes,1,opt,name=gcs_destination,json=gcsDestination" json:"gcs_destination,omitempty"` + // The max number of response protos to put into each output JSON file on GCS. + // The valid range is [1, 100]. If not specified, the default value is 20. + // + // For example, for one pdf file with 100 pages, 100 response protos will + // be generated. If `batch_size` = 20, then 5 json files each + // containing 20 response protos will be written under the prefix + // `gcs_destination`.`uri`. + // + // Currently, batch_size only applies to GcsDestination, with potential future + // support for other output configurations. + BatchSize int32 `protobuf:"varint,2,opt,name=batch_size,json=batchSize" json:"batch_size,omitempty"` +} + +func (m *OutputConfig) Reset() { *m = OutputConfig{} } +func (m *OutputConfig) String() string { return proto.CompactTextString(m) } +func (*OutputConfig) ProtoMessage() {} +func (*OutputConfig) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{27} } + +func (m *OutputConfig) GetGcsDestination() *GcsDestination { + if m != nil { + return m.GcsDestination + } + return nil +} + +func (m *OutputConfig) GetBatchSize() int32 { + if m != nil { + return m.BatchSize + } + return 0 +} + +// The Google Cloud Storage location where the input will be read from. +type GcsSource struct { + // Google Cloud Storage URI for the input file. This must only be a GCS + // object. Wildcards are not currently supported. + Uri string `protobuf:"bytes,1,opt,name=uri" json:"uri,omitempty"` +} + +func (m *GcsSource) Reset() { *m = GcsSource{} } +func (m *GcsSource) String() string { return proto.CompactTextString(m) } +func (*GcsSource) ProtoMessage() {} +func (*GcsSource) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{28} } + +func (m *GcsSource) GetUri() string { + if m != nil { + return m.Uri + } + return "" +} + +// The Google Cloud Storage location where the output will be written to. +type GcsDestination struct { + // Google Cloud Storage URI where the results will be stored. Results will + // be in JSON format and preceded by its corresponding input URI. This field + // can either represent a single file, or a prefix for multiple outputs. + // Prefixes must end in a `/`. + // + // Examples: + // + // * File: gs://bucket-name/filename.json + // * Prefix: gs://bucket-name/prefix/here/ + // * File: gs://bucket-name/prefix/here + // + // If multiple outputs, each response is still AnnotateFileResponse, each of + // which contains some subset of the full list of AnnotateImageResponse. + // Multiple outputs can happen if, for example, the output JSON is too large + // and overflows into multiple sharded files. + Uri string `protobuf:"bytes,1,opt,name=uri" json:"uri,omitempty"` +} + +func (m *GcsDestination) Reset() { *m = GcsDestination{} } +func (m *GcsDestination) String() string { return proto.CompactTextString(m) } +func (*GcsDestination) ProtoMessage() {} +func (*GcsDestination) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{29} } + +func (m *GcsDestination) GetUri() string { + if m != nil { + return m.Uri + } + return "" +} + +// Contains metadata for the BatchAnnotateImages operation. +type OperationMetadata struct { + // Current state of the batch operation. + State OperationMetadata_State `protobuf:"varint,1,opt,name=state,enum=google.cloud.vision.v1p2beta1.OperationMetadata_State" json:"state,omitempty"` + // The time when the batch request was received. + CreateTime *google_protobuf3.Timestamp `protobuf:"bytes,5,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // The time when the operation result was last updated. + UpdateTime *google_protobuf3.Timestamp `protobuf:"bytes,6,opt,name=update_time,json=updateTime" json:"update_time,omitempty"` +} + +func (m *OperationMetadata) Reset() { *m = OperationMetadata{} } +func (m *OperationMetadata) String() string { return proto.CompactTextString(m) } +func (*OperationMetadata) ProtoMessage() {} +func (*OperationMetadata) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{30} } + +func (m *OperationMetadata) GetState() OperationMetadata_State { + if m != nil { + return m.State + } + return OperationMetadata_STATE_UNSPECIFIED +} + +func (m *OperationMetadata) GetCreateTime() *google_protobuf3.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *OperationMetadata) GetUpdateTime() *google_protobuf3.Timestamp { + if m != nil { + return m.UpdateTime + } + return nil +} + +func init() { + proto.RegisterType((*Feature)(nil), "google.cloud.vision.v1p2beta1.Feature") + proto.RegisterType((*ImageSource)(nil), "google.cloud.vision.v1p2beta1.ImageSource") + proto.RegisterType((*Image)(nil), "google.cloud.vision.v1p2beta1.Image") + proto.RegisterType((*FaceAnnotation)(nil), "google.cloud.vision.v1p2beta1.FaceAnnotation") + proto.RegisterType((*FaceAnnotation_Landmark)(nil), "google.cloud.vision.v1p2beta1.FaceAnnotation.Landmark") + proto.RegisterType((*LocationInfo)(nil), "google.cloud.vision.v1p2beta1.LocationInfo") + proto.RegisterType((*Property)(nil), "google.cloud.vision.v1p2beta1.Property") + proto.RegisterType((*EntityAnnotation)(nil), "google.cloud.vision.v1p2beta1.EntityAnnotation") + proto.RegisterType((*SafeSearchAnnotation)(nil), "google.cloud.vision.v1p2beta1.SafeSearchAnnotation") + proto.RegisterType((*LatLongRect)(nil), "google.cloud.vision.v1p2beta1.LatLongRect") + proto.RegisterType((*ColorInfo)(nil), "google.cloud.vision.v1p2beta1.ColorInfo") + proto.RegisterType((*DominantColorsAnnotation)(nil), "google.cloud.vision.v1p2beta1.DominantColorsAnnotation") + proto.RegisterType((*ImageProperties)(nil), "google.cloud.vision.v1p2beta1.ImageProperties") + proto.RegisterType((*CropHint)(nil), "google.cloud.vision.v1p2beta1.CropHint") + proto.RegisterType((*CropHintsAnnotation)(nil), "google.cloud.vision.v1p2beta1.CropHintsAnnotation") + proto.RegisterType((*CropHintsParams)(nil), "google.cloud.vision.v1p2beta1.CropHintsParams") + proto.RegisterType((*WebDetectionParams)(nil), "google.cloud.vision.v1p2beta1.WebDetectionParams") + proto.RegisterType((*ImageContext)(nil), "google.cloud.vision.v1p2beta1.ImageContext") + proto.RegisterType((*AnnotateImageRequest)(nil), "google.cloud.vision.v1p2beta1.AnnotateImageRequest") + proto.RegisterType((*ImageAnnotationContext)(nil), "google.cloud.vision.v1p2beta1.ImageAnnotationContext") + proto.RegisterType((*AnnotateImageResponse)(nil), "google.cloud.vision.v1p2beta1.AnnotateImageResponse") + proto.RegisterType((*BatchAnnotateImagesRequest)(nil), "google.cloud.vision.v1p2beta1.BatchAnnotateImagesRequest") + proto.RegisterType((*BatchAnnotateImagesResponse)(nil), "google.cloud.vision.v1p2beta1.BatchAnnotateImagesResponse") + proto.RegisterType((*AsyncAnnotateFileRequest)(nil), "google.cloud.vision.v1p2beta1.AsyncAnnotateFileRequest") + proto.RegisterType((*AsyncAnnotateFileResponse)(nil), "google.cloud.vision.v1p2beta1.AsyncAnnotateFileResponse") + proto.RegisterType((*AsyncBatchAnnotateFilesRequest)(nil), "google.cloud.vision.v1p2beta1.AsyncBatchAnnotateFilesRequest") + proto.RegisterType((*AsyncBatchAnnotateFilesResponse)(nil), "google.cloud.vision.v1p2beta1.AsyncBatchAnnotateFilesResponse") + proto.RegisterType((*InputConfig)(nil), "google.cloud.vision.v1p2beta1.InputConfig") + proto.RegisterType((*OutputConfig)(nil), "google.cloud.vision.v1p2beta1.OutputConfig") + proto.RegisterType((*GcsSource)(nil), "google.cloud.vision.v1p2beta1.GcsSource") + proto.RegisterType((*GcsDestination)(nil), "google.cloud.vision.v1p2beta1.GcsDestination") + proto.RegisterType((*OperationMetadata)(nil), "google.cloud.vision.v1p2beta1.OperationMetadata") + proto.RegisterEnum("google.cloud.vision.v1p2beta1.Likelihood", Likelihood_name, Likelihood_value) + proto.RegisterEnum("google.cloud.vision.v1p2beta1.Feature_Type", Feature_Type_name, Feature_Type_value) + proto.RegisterEnum("google.cloud.vision.v1p2beta1.FaceAnnotation_Landmark_Type", FaceAnnotation_Landmark_Type_name, FaceAnnotation_Landmark_Type_value) + proto.RegisterEnum("google.cloud.vision.v1p2beta1.OperationMetadata_State", OperationMetadata_State_name, OperationMetadata_State_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ImageAnnotator service + +type ImageAnnotatorClient interface { + // Run image detection and annotation for a batch of images. + BatchAnnotateImages(ctx context.Context, in *BatchAnnotateImagesRequest, opts ...grpc.CallOption) (*BatchAnnotateImagesResponse, error) + // Run async image detection and annotation for a list of generic files (e.g. + // PDF) which may contain multiple pages and multiple images per page. + // Progress and results can be retrieved through the + // `google.longrunning.Operations` interface. + // `Operation.metadata` contains `OperationMetadata` (metadata). + // `Operation.response` contains `AsyncBatchAnnotateFilesResponse` (results). + AsyncBatchAnnotateFiles(ctx context.Context, in *AsyncBatchAnnotateFilesRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) +} + +type imageAnnotatorClient struct { + cc *grpc.ClientConn +} + +func NewImageAnnotatorClient(cc *grpc.ClientConn) ImageAnnotatorClient { + return &imageAnnotatorClient{cc} +} + +func (c *imageAnnotatorClient) BatchAnnotateImages(ctx context.Context, in *BatchAnnotateImagesRequest, opts ...grpc.CallOption) (*BatchAnnotateImagesResponse, error) { + out := new(BatchAnnotateImagesResponse) + err := grpc.Invoke(ctx, "/google.cloud.vision.v1p2beta1.ImageAnnotator/BatchAnnotateImages", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *imageAnnotatorClient) AsyncBatchAnnotateFiles(ctx context.Context, in *AsyncBatchAnnotateFilesRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.vision.v1p2beta1.ImageAnnotator/AsyncBatchAnnotateFiles", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ImageAnnotator service + +type ImageAnnotatorServer interface { + // Run image detection and annotation for a batch of images. + BatchAnnotateImages(context.Context, *BatchAnnotateImagesRequest) (*BatchAnnotateImagesResponse, error) + // Run async image detection and annotation for a list of generic files (e.g. + // PDF) which may contain multiple pages and multiple images per page. + // Progress and results can be retrieved through the + // `google.longrunning.Operations` interface. + // `Operation.metadata` contains `OperationMetadata` (metadata). + // `Operation.response` contains `AsyncBatchAnnotateFilesResponse` (results). + AsyncBatchAnnotateFiles(context.Context, *AsyncBatchAnnotateFilesRequest) (*google_longrunning.Operation, error) +} + +func RegisterImageAnnotatorServer(s *grpc.Server, srv ImageAnnotatorServer) { + s.RegisterService(&_ImageAnnotator_serviceDesc, srv) +} + +func _ImageAnnotator_BatchAnnotateImages_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BatchAnnotateImagesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImageAnnotatorServer).BatchAnnotateImages(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.vision.v1p2beta1.ImageAnnotator/BatchAnnotateImages", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImageAnnotatorServer).BatchAnnotateImages(ctx, req.(*BatchAnnotateImagesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ImageAnnotator_AsyncBatchAnnotateFiles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AsyncBatchAnnotateFilesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImageAnnotatorServer).AsyncBatchAnnotateFiles(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.vision.v1p2beta1.ImageAnnotator/AsyncBatchAnnotateFiles", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImageAnnotatorServer).AsyncBatchAnnotateFiles(ctx, req.(*AsyncBatchAnnotateFilesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ImageAnnotator_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.vision.v1p2beta1.ImageAnnotator", + HandlerType: (*ImageAnnotatorServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "BatchAnnotateImages", + Handler: _ImageAnnotator_BatchAnnotateImages_Handler, + }, + { + MethodName: "AsyncBatchAnnotateFiles", + Handler: _ImageAnnotator_AsyncBatchAnnotateFiles_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/vision/v1p2beta1/image_annotator.proto", +} + +func init() { + proto.RegisterFile("google/cloud/vision/v1p2beta1/image_annotator.proto", fileDescriptor1) +} + +var fileDescriptor1 = []byte{ + // 2880 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcf, 0x73, 0xdb, 0xc6, + 0xf5, 0x0f, 0xa9, 0x5f, 0xe4, 0x23, 0x25, 0x41, 0xab, 0x1f, 0xa6, 0x65, 0x2b, 0x56, 0x90, 0x6f, + 0xbe, 0x5f, 0x7d, 0xdd, 0x94, 0x1a, 0xcb, 0x49, 0xda, 0x3a, 0xcd, 0xa4, 0x14, 0x09, 0x49, 0x1c, + 0x53, 0x24, 0xbb, 0x84, 0xec, 0xd8, 0x93, 0x0e, 0x0a, 0x81, 0x4b, 0x1a, 0x09, 0x08, 0x20, 0x00, + 0x68, 0x8b, 0x39, 0x66, 0xa6, 0x7f, 0x41, 0x6f, 0xbd, 0x77, 0x7a, 0x6a, 0x2f, 0xed, 0xdf, 0xd0, + 0x7b, 0xa7, 0x87, 0x5e, 0x7a, 0x6b, 0x0f, 0x3d, 0xf6, 0xd4, 0xe9, 0xf4, 0xd4, 0xd9, 0x1f, 0x00, + 0x17, 0x94, 0x64, 0x8a, 0xce, 0x74, 0xa6, 0x27, 0x62, 0xdf, 0xdb, 0xcf, 0xe7, 0x2d, 0xde, 0xbe, + 0x7d, 0xfb, 0x76, 0x41, 0x78, 0xd8, 0xf7, 0xbc, 0xbe, 0x43, 0xf6, 0x2d, 0xc7, 0x1b, 0x76, 0xf7, + 0x5f, 0xda, 0xa1, 0xed, 0xb9, 0xfb, 0x2f, 0x1f, 0xf8, 0x07, 0xe7, 0x24, 0x32, 0x1f, 0xec, 0xdb, + 0x03, 0xb3, 0x4f, 0x0c, 0xd3, 0x75, 0xbd, 0xc8, 0x8c, 0xbc, 0xa0, 0xec, 0x07, 0x5e, 0xe4, 0xa1, + 0x1d, 0x0e, 0x2a, 0x33, 0x50, 0x99, 0x83, 0xca, 0x09, 0x68, 0xfb, 0xae, 0xe0, 0x34, 0x7d, 0x7b, + 0x5f, 0x40, 0x6d, 0xcf, 0x0d, 0x39, 0x78, 0xfb, 0xfd, 0xd7, 0x5b, 0xec, 0x13, 0x6f, 0x40, 0xa2, + 0x60, 0x24, 0x7a, 0x4f, 0x19, 0x5f, 0x44, 0x2e, 0x22, 0x63, 0x6c, 0x43, 0x80, 0x1e, 0xbc, 0x1e, + 0xf4, 0x8a, 0x9c, 0x1b, 0x5d, 0x12, 0x11, 0x4b, 0x82, 0xbc, 0x2b, 0x20, 0x8e, 0xe7, 0xf6, 0x83, + 0xa1, 0xeb, 0xda, 0x6e, 0x7f, 0xdf, 0xf3, 0x49, 0x90, 0x1a, 0xfa, 0x3d, 0xd1, 0x89, 0xb5, 0xce, + 0x87, 0xbd, 0xfd, 0xc8, 0x1e, 0x90, 0x30, 0x32, 0x07, 0xbe, 0xe8, 0x70, 0x4b, 0x74, 0x08, 0x7c, + 0x6b, 0x3f, 0x8c, 0xcc, 0x68, 0x18, 0x4e, 0x28, 0xa2, 0x91, 0x4f, 0xf6, 0x2d, 0xcf, 0x89, 0x5d, + 0xb9, 0x5d, 0x92, 0x15, 0x8e, 0x19, 0x39, 0x6e, 0x9f, 0x6b, 0xd4, 0x7f, 0x65, 0x61, 0xe9, 0x88, + 0x98, 0xd1, 0x30, 0x20, 0xe8, 0x53, 0x98, 0xa7, 0x1d, 0x4a, 0x99, 0xdd, 0xcc, 0xde, 0xca, 0xc1, + 0x77, 0xca, 0xaf, 0xf5, 0x7f, 0x59, 0xa0, 0xca, 0xfa, 0xc8, 0x27, 0x98, 0x01, 0xd1, 0x3d, 0x28, + 0x0c, 0xcc, 0x0b, 0x23, 0x20, 0xe1, 0xd0, 0x89, 0xc2, 0x52, 0x76, 0x37, 0xb3, 0xb7, 0x80, 0x61, + 0x60, 0x5e, 0x60, 0x2e, 0x41, 0x1b, 0xb0, 0x30, 0xf0, 0xba, 0xc4, 0x29, 0xcd, 0xed, 0x66, 0xf6, + 0xf2, 0x98, 0x37, 0xd4, 0x7f, 0x64, 0x60, 0x9e, 0xb2, 0xa0, 0x0d, 0x50, 0xf4, 0x67, 0x6d, 0xcd, + 0x38, 0x6b, 0x76, 0xda, 0x5a, 0xb5, 0x7e, 0x54, 0xd7, 0x6a, 0xca, 0x5b, 0x08, 0xc1, 0xca, 0x51, + 0xa5, 0xaa, 0x19, 0x35, 0x4d, 0xd7, 0xaa, 0x7a, 0xbd, 0xd5, 0x54, 0x32, 0x68, 0x0b, 0x50, 0xa3, + 0xd2, 0xac, 0x9d, 0x56, 0xf0, 0x63, 0x49, 0x9e, 0xa5, 0x7d, 0x1b, 0xad, 0xe3, 0x96, 0x24, 0x9b, + 0x43, 0xeb, 0xb0, 0xda, 0xa8, 0x1c, 0x6a, 0x0d, 0x49, 0x38, 0x4f, 0x3b, 0xea, 0xda, 0x67, 0xba, + 0x24, 0x5b, 0x40, 0x77, 0xe0, 0x56, 0xad, 0x55, 0x3d, 0x3b, 0xd5, 0x9a, 0xba, 0x31, 0xa1, 0x2c, + 0xa0, 0xdb, 0xb0, 0xd9, 0xa9, 0x1c, 0x69, 0x46, 0x47, 0xab, 0xe0, 0xea, 0x89, 0xa4, 0x5a, 0xa4, + 0xc3, 0xae, 0x9f, 0x56, 0x8e, 0x35, 0xa3, 0x8d, 0x5b, 0x6d, 0x0d, 0xeb, 0x75, 0xad, 0xa3, 0x2c, + 0xa1, 0x15, 0x80, 0x2a, 0x6e, 0xb5, 0x8d, 0x93, 0x7a, 0x53, 0xef, 0x28, 0x79, 0xb4, 0x06, 0xcb, + 0x4f, 0xb5, 0x43, 0x09, 0x08, 0x6a, 0x13, 0x0a, 0x75, 0x1a, 0xfa, 0x1d, 0x6f, 0x18, 0x58, 0x04, + 0xa9, 0xb0, 0xdc, 0xb7, 0x42, 0x83, 0xaf, 0x86, 0x61, 0x60, 0xb3, 0x89, 0xc8, 0xe3, 0x42, 0xdf, + 0x0a, 0x59, 0xb7, 0xb3, 0xc0, 0x46, 0x77, 0x20, 0x3f, 0xd6, 0x67, 0x99, 0x3e, 0x67, 0x0b, 0xa5, + 0x4a, 0x60, 0x81, 0x75, 0x44, 0x25, 0x58, 0xb2, 0x3c, 0x37, 0x22, 0x6e, 0xc4, 0x38, 0x8a, 0x38, + 0x6e, 0xa2, 0x43, 0x58, 0x0c, 0x99, 0x35, 0x06, 0x2e, 0x1c, 0xdc, 0x9f, 0x32, 0xcb, 0xd2, 0xf8, + 0xb0, 0x40, 0xaa, 0xbf, 0x54, 0x60, 0xe5, 0xc8, 0xb4, 0x48, 0x25, 0x59, 0x11, 0xa8, 0x0d, 0xcb, + 0xe7, 0xde, 0xd0, 0xed, 0xda, 0x6e, 0xdf, 0xf0, 0x3d, 0x67, 0xc4, 0xcc, 0x16, 0xa6, 0xc6, 0xd0, + 0xa1, 0xc0, 0xb4, 0x3d, 0x67, 0x84, 0x8b, 0xe7, 0x52, 0x0b, 0x9d, 0x81, 0xd2, 0xeb, 0x1a, 0x69, + 0xd2, 0xec, 0xec, 0xa4, 0x2b, 0xbd, 0xae, 0xdc, 0x46, 0x3a, 0xe4, 0x1d, 0xd3, 0xed, 0x0e, 0xcc, + 0xe0, 0xcb, 0xb0, 0x34, 0xb7, 0x3b, 0xb7, 0x57, 0x38, 0xf8, 0x68, 0x5a, 0xa0, 0xa7, 0x5e, 0xb5, + 0xdc, 0x10, 0x70, 0x3c, 0x26, 0x42, 0x3b, 0x00, 0x81, 0xe7, 0x38, 0x86, 0xe9, 0xf6, 0x1d, 0x52, + 0x9a, 0xdf, 0xcd, 0xec, 0x65, 0x71, 0x9e, 0x4a, 0x2a, 0x54, 0x40, 0x27, 0xcd, 0x37, 0x5d, 0xa1, + 0x5d, 0x60, 0xda, 0x9c, 0x6f, 0xba, 0x5c, 0xb9, 0x03, 0x10, 0xd9, 0x4e, 0x24, 0xb4, 0x8b, 0x1c, + 0x4b, 0x25, 0x5c, 0xfd, 0x00, 0x36, 0x92, 0x2c, 0x62, 0x58, 0x9e, 0xdb, 0xb3, 0xbb, 0xc4, 0xb5, + 0x48, 0x69, 0x89, 0x75, 0x5c, 0x4f, 0x74, 0xd5, 0x44, 0x85, 0x3e, 0x84, 0xad, 0x78, 0x68, 0xd4, + 0x75, 0x12, 0x28, 0xc7, 0x40, 0x9b, 0x92, 0x56, 0x82, 0xb5, 0x61, 0xe5, 0x0b, 0x6f, 0x64, 0x38, + 0xf6, 0x97, 0xc4, 0xb1, 0x5f, 0x78, 0x5e, 0xb7, 0x94, 0x67, 0x89, 0xe0, 0xff, 0xa7, 0xf8, 0xa7, + 0x91, 0x00, 0xf0, 0xf2, 0x17, 0xde, 0x68, 0xdc, 0x44, 0x4f, 0x60, 0x2d, 0xf4, 0x82, 0xc0, 0x7b, + 0x25, 0x93, 0xc2, 0xac, 0xa4, 0x0a, 0xe7, 0x90, 0x78, 0x75, 0x50, 0x4c, 0xb7, 0x4f, 0x02, 0x99, + 0xb6, 0x30, 0x2b, 0xed, 0x2a, 0xa3, 0x90, 0x58, 0x9f, 0xc3, 0x7a, 0x38, 0x0c, 0xfc, 0xc0, 0x0e, + 0x89, 0x4c, 0x5c, 0x9c, 0x95, 0x18, 0xc5, 0x2c, 0x12, 0xb7, 0x05, 0xa5, 0xa1, 0xdb, 0x25, 0x81, + 0x41, 0x2e, 0x7c, 0x2f, 0x24, 0x5d, 0xd9, 0xc0, 0xf2, 0xac, 0x06, 0xb6, 0x18, 0x95, 0xc6, 0x99, + 0x24, 0x23, 0x9f, 0x01, 0x3a, 0x77, 0x86, 0x41, 0x90, 0xa6, 0x5f, 0x99, 0x95, 0x7e, 0x4d, 0x90, + 0xa4, 0x5d, 0xf3, 0x82, 0x98, 0xdd, 0x57, 0xc4, 0x4c, 0xf9, 0x7c, 0x75, 0x66, 0xd7, 0xc4, 0x2c, + 0x63, 0xd9, 0xf6, 0x9f, 0x96, 0x20, 0x17, 0xaf, 0x29, 0xd4, 0x12, 0x5b, 0xd0, 0x1c, 0x63, 0xfe, + 0xf8, 0xcd, 0x56, 0xa6, 0xbc, 0x25, 0x55, 0x21, 0xe7, 0x7b, 0xa1, 0x4d, 0xf5, 0x6c, 0x5d, 0x16, + 0x0e, 0xfe, 0x6f, 0x0a, 0x69, 0x5b, 0x74, 0xc7, 0x09, 0x50, 0xfd, 0xdd, 0xe2, 0x78, 0x83, 0x3a, + 0x6b, 0x3e, 0x6e, 0xb6, 0x9e, 0x36, 0x8d, 0x78, 0xfb, 0x51, 0xde, 0x42, 0x45, 0xc8, 0x35, 0xb4, + 0x23, 0xdd, 0xd0, 0x9e, 0x69, 0x4a, 0x06, 0x2d, 0x43, 0x1e, 0xd7, 0x8f, 0x4f, 0x78, 0x33, 0x8b, + 0x4a, 0xb0, 0xc1, 0x94, 0xad, 0x23, 0x23, 0xee, 0x74, 0x88, 0x5b, 0x4f, 0x95, 0x39, 0xba, 0xa3, + 0xf0, 0x8e, 0x93, 0xaa, 0x79, 0xaa, 0x8a, 0x41, 0x09, 0x17, 0x53, 0x2d, 0xa0, 0x6d, 0xd8, 0x4a, + 0x50, 0x69, 0xdd, 0x22, 0x85, 0x9d, 0xd6, 0x6b, 0xed, 0x56, 0xbd, 0xa9, 0x1b, 0x87, 0x9a, 0xfe, + 0x54, 0xd3, 0x9a, 0x54, 0x4b, 0x77, 0xa3, 0x22, 0xe4, 0x9a, 0xad, 0x8e, 0x66, 0xe8, 0xf5, 0xb6, + 0x92, 0xa3, 0x63, 0x3c, 0x6b, 0xb7, 0x35, 0x6c, 0x34, 0xea, 0x6d, 0x25, 0x4f, 0x9b, 0x8d, 0xd6, + 0x53, 0xd1, 0x04, 0xba, 0x73, 0x9d, 0xb6, 0xce, 0xf4, 0x13, 0x36, 0x2a, 0xa5, 0x80, 0x56, 0xa1, + 0xc0, 0xdb, 0xcc, 0x9e, 0x52, 0x44, 0x0a, 0x14, 0xb9, 0xa0, 0xaa, 0x35, 0x75, 0x0d, 0x2b, 0xcb, + 0x68, 0x13, 0xd6, 0x18, 0xfd, 0x61, 0x4b, 0xd7, 0x5b, 0xa7, 0xa2, 0xe3, 0x0a, 0xf5, 0x97, 0x2c, + 0x66, 0x7c, 0xab, 0x74, 0xf3, 0x96, 0xa5, 0x82, 0x44, 0x49, 0xde, 0x5a, 0x7b, 0xa6, 0x19, 0x7a, + 0xab, 0x6d, 0x1c, 0xb6, 0xce, 0x9a, 0xb5, 0x0a, 0x7e, 0xa6, 0xac, 0xa5, 0x54, 0xfc, 0xad, 0xab, + 0x2d, 0xdc, 0xd4, 0xb0, 0x82, 0xd0, 0x5d, 0x28, 0x25, 0x2a, 0xc1, 0x98, 0x00, 0xd7, 0x13, 0xf7, + 0x53, 0x2d, 0x7b, 0x10, 0xb8, 0x8d, 0xb1, 0x23, 0x2f, 0x99, 0xdb, 0x4c, 0xeb, 0x52, 0xf6, 0xb6, + 0xd0, 0x0e, 0xdc, 0x1e, 0xeb, 0x26, 0x0d, 0xde, 0x1a, 0xcf, 0xea, 0xa4, 0xc5, 0x12, 0xba, 0x07, + 0x77, 0xe4, 0x79, 0x36, 0xf8, 0x14, 0xc4, 0x33, 0xa6, 0xdc, 0x46, 0xbb, 0x70, 0x37, 0x35, 0xa5, + 0x93, 0x3d, 0xb6, 0xa9, 0x43, 0x39, 0x45, 0x05, 0x1b, 0x3a, 0xae, 0x1c, 0xd3, 0x3a, 0xe2, 0x0e, + 0xf5, 0xbe, 0xc0, 0x49, 0xe2, 0xbb, 0xac, 0x18, 0x8a, 0xdf, 0xbd, 0x7d, 0xd6, 0xae, 0x37, 0x94, + 0x1d, 0x5a, 0x0c, 0x8d, 0x87, 0xc7, 0x85, 0x6f, 0x53, 0xfc, 0x51, 0x0b, 0x6b, 0x27, 0x5a, 0xa5, + 0x66, 0x1c, 0xb3, 0x5a, 0xa9, 0x51, 0x51, 0xee, 0xd1, 0x8a, 0xa5, 0x7a, 0x52, 0x6f, 0x1a, 0xc7, + 0xcd, 0x8a, 0x7e, 0x42, 0x29, 0x77, 0xa9, 0x7d, 0x26, 0x62, 0xbc, 0xc7, 0xad, 0x26, 0x95, 0xbe, + 0x43, 0xf1, 0x4c, 0xca, 0x99, 0x85, 0x58, 0x55, 0x7f, 0x08, 0xc5, 0x86, 0x67, 0xb1, 0xb5, 0x59, + 0x77, 0x7b, 0x1e, 0x7a, 0x1f, 0x96, 0x1c, 0x33, 0x32, 0x1c, 0xb7, 0x2f, 0xca, 0x83, 0xf5, 0x78, + 0x29, 0xd2, 0xa5, 0x5a, 0x6e, 0x98, 0x51, 0xc3, 0xed, 0xe3, 0x45, 0x87, 0xfd, 0xaa, 0x4f, 0x21, + 0xd7, 0x0e, 0x68, 0x71, 0x1c, 0x8d, 0x10, 0x82, 0x79, 0xd7, 0x1c, 0x10, 0x51, 0x10, 0xb1, 0x67, + 0x5a, 0x4b, 0xbe, 0x34, 0x9d, 0x21, 0x11, 0x55, 0x10, 0x6f, 0xa0, 0x77, 0xa0, 0x38, 0xb4, 0xdd, + 0xe8, 0xa3, 0x0f, 0x0c, 0xae, 0xa4, 0x89, 0x64, 0x1e, 0x17, 0xb8, 0xec, 0x09, 0x15, 0xa9, 0xbf, + 0x98, 0x03, 0x45, 0x73, 0x23, 0x3b, 0x1a, 0x49, 0x05, 0x8c, 0x02, 0x73, 0x03, 0xbb, 0x2b, 0x0c, + 0xd0, 0x47, 0xb4, 0x05, 0x8b, 0x8e, 0x67, 0x99, 0x4e, 0x6c, 0x40, 0xb4, 0xd0, 0x2e, 0x14, 0xba, + 0x24, 0xb4, 0x02, 0xdb, 0x67, 0x49, 0x85, 0x57, 0xb2, 0xb2, 0x88, 0x8e, 0x2c, 0xb4, 0xbc, 0x20, + 0x2e, 0x04, 0x78, 0x03, 0xbd, 0x0d, 0x20, 0xed, 0xc4, 0xbc, 0x0a, 0x90, 0x24, 0x54, 0x1f, 0x79, + 0xbe, 0x6d, 0x99, 0x8e, 0x1d, 0x8d, 0x44, 0x1d, 0x20, 0x49, 0x2e, 0x97, 0x58, 0x4b, 0xdf, 0xb6, + 0xc4, 0xaa, 0x43, 0xde, 0x11, 0xf3, 0x13, 0x96, 0x72, 0xac, 0x16, 0x9a, 0xc6, 0x26, 0xcf, 0x27, + 0x1e, 0xa3, 0xd1, 0x31, 0x80, 0xcf, 0x27, 0xcb, 0x26, 0x61, 0x29, 0xcf, 0xb8, 0xa6, 0x26, 0x5a, + 0x31, 0xbb, 0x58, 0x82, 0xaa, 0x7f, 0xc9, 0xc2, 0x46, 0xc7, 0xec, 0x91, 0x0e, 0x31, 0x03, 0xeb, + 0x85, 0x34, 0x41, 0x9f, 0xc2, 0x82, 0xd9, 0x1d, 0x3a, 0x91, 0x38, 0x9d, 0xcc, 0xb0, 0xe9, 0x70, + 0x1c, 0x25, 0x08, 0x7d, 0xcf, 0xeb, 0xb1, 0xe9, 0x9c, 0x8d, 0x80, 0xe1, 0x50, 0x15, 0x96, 0x06, + 0xa4, 0x4b, 0xa7, 0x43, 0x6c, 0x4f, 0x33, 0x50, 0xc4, 0x48, 0xa4, 0x41, 0xee, 0xa5, 0xed, 0x39, + 0x2c, 0x06, 0xe6, 0x67, 0x65, 0x49, 0xa0, 0xe8, 0x13, 0x98, 0x0f, 0x4c, 0x6b, 0x34, 0x7b, 0x85, + 0xc6, 0x60, 0xea, 0x2b, 0x28, 0xd0, 0xd5, 0xe6, 0xb9, 0x7d, 0x4c, 0xac, 0x08, 0x3d, 0x84, 0xc2, + 0xc0, 0x76, 0x8d, 0x1b, 0x2c, 0xce, 0xfc, 0xc0, 0x76, 0xf9, 0x23, 0x03, 0x99, 0x17, 0x09, 0x28, + 0xfb, 0x3a, 0x90, 0x79, 0xc1, 0x1f, 0xd5, 0x00, 0xf2, 0x55, 0x7a, 0x2e, 0x65, 0xf9, 0x60, 0x0f, + 0x16, 0xd8, 0x21, 0x55, 0x18, 0x44, 0x29, 0x2c, 0xeb, 0x86, 0x79, 0x87, 0xf1, 0x8a, 0xca, 0xca, + 0x2b, 0xea, 0x3d, 0x58, 0xf1, 0xed, 0x0b, 0xe2, 0x18, 0xbd, 0xc0, 0xb4, 0x92, 0xc5, 0x98, 0xc5, + 0xcb, 0x4c, 0x7a, 0x24, 0x84, 0xea, 0xe7, 0x50, 0xaa, 0x79, 0x03, 0xdb, 0x35, 0xdd, 0x88, 0x91, + 0x86, 0x52, 0x54, 0xfd, 0x08, 0x16, 0x99, 0x85, 0xb0, 0x94, 0x61, 0x31, 0xbb, 0x37, 0xc5, 0x93, + 0xc9, 0xe0, 0xb1, 0xc0, 0xa9, 0x21, 0xac, 0xb2, 0x33, 0x52, 0x3b, 0x89, 0x61, 0xf4, 0x53, 0x58, + 0xed, 0x0a, 0x83, 0x46, 0xc2, 0x4e, 0xdf, 0xf0, 0x7b, 0x53, 0xd8, 0xaf, 0x1b, 0x26, 0x5e, 0xe9, + 0xa6, 0x34, 0xea, 0xaf, 0x33, 0x90, 0xab, 0x06, 0x9e, 0x7f, 0x62, 0xbb, 0xd1, 0x7f, 0xe0, 0xec, + 0x95, 0x4e, 0x55, 0xd9, 0x4b, 0xa9, 0x6a, 0x1f, 0xd6, 0xed, 0x81, 0xef, 0x05, 0x91, 0xe9, 0x5a, + 0x64, 0xd2, 0xfb, 0x68, 0xac, 0x4a, 0xa6, 0xe0, 0x27, 0xb0, 0x1e, 0x0f, 0x57, 0xf6, 0xfe, 0x11, + 0x80, 0x15, 0x78, 0xbe, 0xf1, 0x82, 0xca, 0xc5, 0x0c, 0x4c, 0xcb, 0x1a, 0x31, 0x0f, 0xce, 0x5b, + 0x31, 0xa3, 0xfa, 0x11, 0xac, 0x26, 0xf4, 0x6d, 0x33, 0x30, 0x07, 0x21, 0x7a, 0x17, 0x96, 0xcd, + 0xd0, 0x27, 0x56, 0x64, 0xb0, 0xcb, 0x15, 0xce, 0x9e, 0xc5, 0x45, 0x2e, 0xc4, 0x4c, 0xa6, 0xd6, + 0x00, 0x3d, 0x25, 0xe7, 0xb5, 0xf8, 0x08, 0x25, 0xa0, 0x65, 0x58, 0xb7, 0x5d, 0xcb, 0x19, 0x76, + 0x89, 0xd1, 0x27, 0x5e, 0xea, 0x36, 0x23, 0x87, 0xd7, 0x84, 0xea, 0x98, 0x78, 0xe2, 0x52, 0x43, + 0xfd, 0x7d, 0x16, 0x8a, 0x2c, 0x04, 0xaa, 0xf4, 0x8c, 0x7d, 0x11, 0xa1, 0x26, 0x2c, 0xb3, 0x55, + 0xe1, 0xb9, 0x7d, 0x23, 0x20, 0x56, 0x24, 0x26, 0x64, 0xda, 0x51, 0x5b, 0x5a, 0x91, 0xb8, 0xe0, + 0x48, 0xcb, 0xf3, 0x3d, 0x58, 0x71, 0x4c, 0xb7, 0x3f, 0xa4, 0xc7, 0x7e, 0xee, 0xaa, 0xec, 0xee, + 0xdc, 0x5e, 0x1e, 0x2f, 0xc7, 0x52, 0xf6, 0xe2, 0xe8, 0x39, 0xac, 0x8d, 0xbd, 0x69, 0xf8, 0xec, + 0x65, 0x44, 0xcd, 0x5b, 0xbe, 0xa1, 0x53, 0x85, 0xf7, 0xf0, 0xaa, 0x35, 0xe1, 0x4e, 0x0b, 0x36, + 0x52, 0xf7, 0x59, 0x31, 0xfd, 0x22, 0xa3, 0x7f, 0x30, 0x85, 0xfe, 0xb2, 0x93, 0x31, 0x7a, 0x75, + 0x49, 0xa6, 0xfe, 0x2d, 0x03, 0x1b, 0x22, 0x3a, 0x08, 0x73, 0x28, 0x26, 0x5f, 0x0d, 0x49, 0x18, + 0xa1, 0x47, 0xb0, 0xc0, 0xee, 0x38, 0x84, 0x23, 0xff, 0xe7, 0x26, 0x77, 0x16, 0x98, 0x43, 0xd0, + 0x21, 0xe4, 0x7a, 0xfc, 0xa6, 0x8a, 0xbb, 0xad, 0x70, 0xf0, 0xbf, 0x37, 0xbb, 0xd8, 0xc2, 0x09, + 0x8e, 0xae, 0x30, 0x7e, 0xe9, 0x62, 0xf1, 0x19, 0x66, 0x91, 0x3e, 0x7d, 0x85, 0xc9, 0x41, 0x81, + 0x8b, 0xb6, 0xd4, 0x52, 0x1f, 0xc3, 0x16, 0xd3, 0x8e, 0x17, 0x43, 0x1c, 0x3c, 0x0a, 0xcc, 0x8d, + 0xaf, 0x7e, 0xe8, 0x23, 0xba, 0x07, 0x05, 0x9f, 0x1a, 0x77, 0x87, 0x83, 0x73, 0x12, 0xc4, 0xb7, + 0x6a, 0x54, 0xd4, 0x64, 0x12, 0xf5, 0xcf, 0x39, 0xd8, 0x9c, 0xf0, 0x5b, 0xe8, 0x7b, 0x6e, 0x48, + 0xd0, 0x67, 0xa0, 0xf4, 0x4c, 0x8b, 0x48, 0x77, 0x97, 0xf1, 0x32, 0xfb, 0xee, 0x4c, 0x47, 0x2b, + 0xbc, 0xda, 0x4b, 0xb5, 0x43, 0x74, 0x0e, 0x1b, 0xf1, 0x2d, 0x42, 0x8a, 0x9d, 0xbb, 0x78, 0x7f, + 0x0a, 0xfb, 0x64, 0xf9, 0x85, 0xd7, 0x63, 0x32, 0xd9, 0xc6, 0x73, 0x50, 0x1c, 0xaf, 0xef, 0xa5, + 0xf8, 0xe7, 0xde, 0x8c, 0x7f, 0x95, 0x12, 0xc9, 0xdc, 0x9f, 0xc3, 0x9a, 0x63, 0x9e, 0x13, 0x27, + 0x45, 0x3e, 0xff, 0x66, 0xe4, 0x0a, 0x63, 0x9a, 0x18, 0xf9, 0xc4, 0x9d, 0x71, 0x58, 0x5a, 0x78, + 0xc3, 0x91, 0x53, 0x22, 0x99, 0xdb, 0x80, 0x8d, 0xde, 0xd0, 0x71, 0x8c, 0x09, 0x03, 0xec, 0x9e, + 0x62, 0xfa, 0xbc, 0xea, 0x29, 0x36, 0x8c, 0x28, 0x55, 0x5a, 0x86, 0x6c, 0xd8, 0x0a, 0xcd, 0x1e, + 0x31, 0x42, 0x56, 0x82, 0xc9, 0x26, 0xf8, 0x6a, 0x7f, 0x38, 0xc5, 0xc4, 0x55, 0xe5, 0x1b, 0xde, + 0x08, 0xaf, 0x2a, 0xea, 0x5c, 0xb8, 0xc3, 0x17, 0xd6, 0xb8, 0x02, 0x94, 0xed, 0xe5, 0x6e, 0x94, + 0xbc, 0x26, 0xb6, 0x5f, 0x7c, 0xdb, 0x4e, 0x0b, 0x24, 0x7b, 0x3d, 0xd8, 0x94, 0x52, 0xa4, 0x64, + 0xa9, 0xc0, 0x2c, 0x1d, 0xdc, 0x34, 0x4d, 0xca, 0x91, 0x6b, 0x5d, 0xb1, 0xb1, 0xb5, 0x61, 0x39, + 0x95, 0x2e, 0xd9, 0x1d, 0xcf, 0xf4, 0x84, 0x21, 0xe7, 0x49, 0x5c, 0x94, 0x33, 0x24, 0xad, 0x95, + 0x48, 0x10, 0x78, 0x01, 0xab, 0xf8, 0xa4, 0x5a, 0x29, 0xf0, 0xad, 0x72, 0x87, 0x7d, 0x03, 0xc0, + 0xbc, 0x03, 0x6a, 0x89, 0xbb, 0xdf, 0x8b, 0xa8, 0xb4, 0xc9, 0xfa, 0x7e, 0x78, 0x13, 0xff, 0x5d, + 0x4a, 0x44, 0x38, 0x66, 0x51, 0x07, 0xb0, 0x7d, 0x68, 0x46, 0xc9, 0xbc, 0xf1, 0x14, 0x13, 0xc6, + 0xb9, 0xb9, 0x05, 0xb9, 0x80, 0x3f, 0xc6, 0xa9, 0x65, 0x5a, 0x7c, 0x5c, 0x95, 0xe2, 0x71, 0x42, + 0xa2, 0x7e, 0x05, 0x77, 0xae, 0x34, 0x27, 0x52, 0x1a, 0x86, 0x7c, 0x20, 0x9e, 0x63, 0x83, 0x1f, + 0xcc, 0x66, 0x90, 0x83, 0xf1, 0x98, 0x46, 0xfd, 0x43, 0x16, 0x4a, 0x95, 0x70, 0xe4, 0x5a, 0x71, + 0xcf, 0x23, 0xdb, 0x49, 0x36, 0x9f, 0x53, 0x28, 0xda, 0xae, 0x3f, 0x8c, 0xf8, 0x3d, 0x6a, 0xff, + 0x86, 0x9b, 0x79, 0x9d, 0x42, 0xd8, 0xe5, 0x6a, 0x1f, 0x17, 0xec, 0x71, 0xe3, 0xbf, 0x73, 0x3f, + 0xa2, 0x8c, 0xde, 0x30, 0x92, 0xde, 0x72, 0xfe, 0x46, 0x8c, 0x2d, 0x86, 0x11, 0xaf, 0x59, 0xf4, + 0xa4, 0x96, 0x3a, 0x80, 0xdb, 0x57, 0xb8, 0x54, 0x4c, 0xe2, 0x25, 0x73, 0x99, 0x6f, 0x6b, 0x6e, + 0x08, 0x6f, 0x33, 0x73, 0xa9, 0xd0, 0xa1, 0x36, 0x93, 0x40, 0xed, 0x5c, 0x0a, 0xd4, 0x69, 0xe5, + 0xf8, 0x75, 0x21, 0x21, 0x05, 0xeb, 0x08, 0xee, 0x5d, 0x6b, 0x56, 0xbc, 0xeb, 0x93, 0xcb, 0x01, + 0xfb, 0xfd, 0xd9, 0x0d, 0x5f, 0x0e, 0xda, 0x10, 0x0a, 0x52, 0x90, 0xd1, 0x13, 0x78, 0xdf, 0x0a, + 0x0d, 0xf1, 0x71, 0x87, 0xfb, 0x73, 0xda, 0x69, 0xe6, 0xd8, 0x0a, 0xc5, 0xa7, 0x9d, 0x7c, 0x3f, + 0x7e, 0x44, 0x77, 0x20, 0x3f, 0xb0, 0x07, 0xc4, 0x60, 0xf7, 0xb0, 0xe2, 0x0b, 0x13, 0x15, 0xe8, + 0x23, 0x9f, 0xa8, 0x3f, 0xcb, 0x40, 0x51, 0x9e, 0x05, 0xf4, 0x04, 0x56, 0xa9, 0xd9, 0x2e, 0x09, + 0x23, 0xdb, 0xe5, 0xb9, 0x34, 0x73, 0xa3, 0x8d, 0xe8, 0xd8, 0x0a, 0x6b, 0x63, 0x10, 0x5e, 0xe9, + 0xa7, 0xda, 0x68, 0x07, 0xe0, 0x9c, 0xfa, 0xd4, 0x08, 0xed, 0xaf, 0x89, 0xa8, 0x79, 0xf2, 0x4c, + 0xd2, 0xb1, 0xbf, 0x26, 0xea, 0x0e, 0xe4, 0x93, 0xc1, 0x5f, 0x2e, 0x99, 0x54, 0x15, 0x56, 0xd2, + 0xfc, 0x57, 0xf4, 0xf9, 0x4d, 0x16, 0xd6, 0x5a, 0xf1, 0xb7, 0xd7, 0x53, 0x12, 0x99, 0x5d, 0x33, + 0x32, 0x51, 0x03, 0x16, 0x42, 0xea, 0x75, 0x71, 0xcd, 0x30, 0xed, 0xdb, 0xd0, 0x25, 0x02, 0x96, + 0x8c, 0x09, 0xe6, 0x24, 0xe8, 0x63, 0x28, 0x58, 0x01, 0x31, 0x23, 0x62, 0x44, 0xf6, 0x80, 0x5f, + 0xfa, 0x14, 0x0e, 0xb6, 0x63, 0xce, 0xf8, 0x03, 0x6f, 0x59, 0x8f, 0x3f, 0xf0, 0x62, 0xe0, 0xdd, + 0xa9, 0x80, 0x82, 0x87, 0x7e, 0x37, 0x01, 0x2f, 0x4e, 0x07, 0xf3, 0xee, 0x54, 0xa0, 0xfe, 0x18, + 0x16, 0xd8, 0x48, 0xd0, 0x26, 0xac, 0x75, 0xf4, 0x8a, 0x3e, 0xf9, 0x51, 0xb5, 0x00, 0x4b, 0x55, + 0xac, 0x55, 0x74, 0xad, 0xa6, 0x64, 0x68, 0x03, 0x9f, 0x35, 0x9b, 0xf5, 0xe6, 0xb1, 0x92, 0x45, + 0x39, 0x98, 0xaf, 0xb5, 0x9a, 0x9a, 0x32, 0x87, 0x96, 0x21, 0x5f, 0xad, 0x34, 0xab, 0x5a, 0xa3, + 0xa1, 0xd5, 0x94, 0xf9, 0xfb, 0x04, 0x40, 0xfa, 0x24, 0x50, 0x80, 0x25, 0x71, 0x15, 0xae, 0xbc, + 0x85, 0xd6, 0x60, 0xf9, 0x89, 0x86, 0x9f, 0x19, 0x67, 0xcd, 0x46, 0xfd, 0xb1, 0xd6, 0x78, 0xa6, + 0x64, 0x50, 0x11, 0x72, 0x49, 0x2b, 0x4b, 0x5b, 0xed, 0x56, 0xa7, 0x53, 0x3f, 0x6c, 0x50, 0x62, + 0x80, 0x45, 0xa1, 0x99, 0x47, 0xab, 0x50, 0x60, 0x50, 0x21, 0x58, 0x38, 0xf8, 0x7b, 0x16, 0x56, + 0xe4, 0x2d, 0xc9, 0x0b, 0xd0, 0x6f, 0x33, 0xb0, 0x7e, 0xc5, 0x9e, 0x80, 0x7e, 0x30, 0xed, 0x88, + 0x7b, 0xed, 0xb6, 0xb5, 0xfd, 0xe8, 0x4d, 0xa0, 0x7c, 0xe9, 0xa9, 0xef, 0x7d, 0xf3, 0xc7, 0xbf, + 0xfe, 0x3c, 0x7b, 0x4f, 0xdd, 0x9e, 0xfc, 0x0b, 0x43, 0xf8, 0x48, 0x94, 0x14, 0xe4, 0x51, 0xe6, + 0x3e, 0xfa, 0x55, 0x06, 0x6e, 0x5d, 0x93, 0x1c, 0xd0, 0x27, 0x37, 0xc9, 0x00, 0xd7, 0xe6, 0xb2, + 0xed, 0x9d, 0x18, 0x2e, 0xfd, 0x91, 0x60, 0x1c, 0x8b, 0x6a, 0x99, 0x0d, 0x70, 0x4f, 0x7d, 0x57, + 0x1a, 0x60, 0x8f, 0xe2, 0x1f, 0x99, 0x97, 0x78, 0x1f, 0x65, 0xee, 0x1f, 0x7e, 0x93, 0x81, 0x77, + 0x2c, 0x6f, 0xf0, 0xfa, 0x31, 0x1d, 0xae, 0xa7, 0x67, 0xa5, 0x4d, 0x03, 0xb0, 0x9d, 0x79, 0x5e, + 0x15, 0xa8, 0xbe, 0x47, 0x8f, 0xa3, 0x65, 0x2f, 0xe8, 0xef, 0xf7, 0x89, 0xcb, 0xc2, 0x73, 0x9f, + 0xab, 0x4c, 0xdf, 0x0e, 0xaf, 0xf9, 0x97, 0xc4, 0xc7, 0x5c, 0xf0, 0xcf, 0x4c, 0xe6, 0x7c, 0x91, + 0x41, 0x1e, 0xfe, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x49, 0x4b, 0x80, 0x49, 0x2c, 0x22, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1p2beta1/text_annotation.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1p2beta1/text_annotation.pb.go new file mode 100644 index 000000000..bea7c74ef --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1p2beta1/text_annotation.pb.go @@ -0,0 +1,593 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/vision/v1p2beta1/text_annotation.proto + +package vision + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Enum to denote the type of break found. New line, space etc. +type TextAnnotation_DetectedBreak_BreakType int32 + +const ( + // Unknown break label type. + TextAnnotation_DetectedBreak_UNKNOWN TextAnnotation_DetectedBreak_BreakType = 0 + // Regular space. + TextAnnotation_DetectedBreak_SPACE TextAnnotation_DetectedBreak_BreakType = 1 + // Sure space (very wide). + TextAnnotation_DetectedBreak_SURE_SPACE TextAnnotation_DetectedBreak_BreakType = 2 + // Line-wrapping break. + TextAnnotation_DetectedBreak_EOL_SURE_SPACE TextAnnotation_DetectedBreak_BreakType = 3 + // End-line hyphen that is not present in text; does not co-occur with + // `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`. + TextAnnotation_DetectedBreak_HYPHEN TextAnnotation_DetectedBreak_BreakType = 4 + // Line break that ends a paragraph. + TextAnnotation_DetectedBreak_LINE_BREAK TextAnnotation_DetectedBreak_BreakType = 5 +) + +var TextAnnotation_DetectedBreak_BreakType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SPACE", + 2: "SURE_SPACE", + 3: "EOL_SURE_SPACE", + 4: "HYPHEN", + 5: "LINE_BREAK", +} +var TextAnnotation_DetectedBreak_BreakType_value = map[string]int32{ + "UNKNOWN": 0, + "SPACE": 1, + "SURE_SPACE": 2, + "EOL_SURE_SPACE": 3, + "HYPHEN": 4, + "LINE_BREAK": 5, +} + +func (x TextAnnotation_DetectedBreak_BreakType) String() string { + return proto.EnumName(TextAnnotation_DetectedBreak_BreakType_name, int32(x)) +} +func (TextAnnotation_DetectedBreak_BreakType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor2, []int{0, 1, 0} +} + +// Type of a block (text, image etc) as identified by OCR. +type Block_BlockType int32 + +const ( + // Unknown block type. + Block_UNKNOWN Block_BlockType = 0 + // Regular text block. + Block_TEXT Block_BlockType = 1 + // Table block. + Block_TABLE Block_BlockType = 2 + // Image block. + Block_PICTURE Block_BlockType = 3 + // Horizontal/vertical line box. + Block_RULER Block_BlockType = 4 + // Barcode block. + Block_BARCODE Block_BlockType = 5 +) + +var Block_BlockType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "TEXT", + 2: "TABLE", + 3: "PICTURE", + 4: "RULER", + 5: "BARCODE", +} +var Block_BlockType_value = map[string]int32{ + "UNKNOWN": 0, + "TEXT": 1, + "TABLE": 2, + "PICTURE": 3, + "RULER": 4, + "BARCODE": 5, +} + +func (x Block_BlockType) String() string { + return proto.EnumName(Block_BlockType_name, int32(x)) +} +func (Block_BlockType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{2, 0} } + +// TextAnnotation contains a structured representation of OCR extracted text. +// The hierarchy of an OCR extracted text structure is like this: +// TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol +// Each structural component, starting from Page, may further have their own +// properties. Properties describe detected languages, breaks etc.. Please refer +// to the [TextAnnotation.TextProperty][google.cloud.vision.v1p2beta1.TextAnnotation.TextProperty] message definition below for more +// detail. +type TextAnnotation struct { + // List of pages detected by OCR. + Pages []*Page `protobuf:"bytes,1,rep,name=pages" json:"pages,omitempty"` + // UTF-8 text detected on the pages. + Text string `protobuf:"bytes,2,opt,name=text" json:"text,omitempty"` +} + +func (m *TextAnnotation) Reset() { *m = TextAnnotation{} } +func (m *TextAnnotation) String() string { return proto.CompactTextString(m) } +func (*TextAnnotation) ProtoMessage() {} +func (*TextAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *TextAnnotation) GetPages() []*Page { + if m != nil { + return m.Pages + } + return nil +} + +func (m *TextAnnotation) GetText() string { + if m != nil { + return m.Text + } + return "" +} + +// Detected language for a structural component. +type TextAnnotation_DetectedLanguage struct { + // The BCP-47 language code, such as "en-US" or "sr-Latn". For more + // information, see + // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. + LanguageCode string `protobuf:"bytes,1,opt,name=language_code,json=languageCode" json:"language_code,omitempty"` + // Confidence of detected language. Range [0, 1]. + Confidence float32 `protobuf:"fixed32,2,opt,name=confidence" json:"confidence,omitempty"` +} + +func (m *TextAnnotation_DetectedLanguage) Reset() { *m = TextAnnotation_DetectedLanguage{} } +func (m *TextAnnotation_DetectedLanguage) String() string { return proto.CompactTextString(m) } +func (*TextAnnotation_DetectedLanguage) ProtoMessage() {} +func (*TextAnnotation_DetectedLanguage) Descriptor() ([]byte, []int) { + return fileDescriptor2, []int{0, 0} +} + +func (m *TextAnnotation_DetectedLanguage) GetLanguageCode() string { + if m != nil { + return m.LanguageCode + } + return "" +} + +func (m *TextAnnotation_DetectedLanguage) GetConfidence() float32 { + if m != nil { + return m.Confidence + } + return 0 +} + +// Detected start or end of a structural component. +type TextAnnotation_DetectedBreak struct { + // Detected break type. + Type TextAnnotation_DetectedBreak_BreakType `protobuf:"varint,1,opt,name=type,enum=google.cloud.vision.v1p2beta1.TextAnnotation_DetectedBreak_BreakType" json:"type,omitempty"` + // True if break prepends the element. + IsPrefix bool `protobuf:"varint,2,opt,name=is_prefix,json=isPrefix" json:"is_prefix,omitempty"` +} + +func (m *TextAnnotation_DetectedBreak) Reset() { *m = TextAnnotation_DetectedBreak{} } +func (m *TextAnnotation_DetectedBreak) String() string { return proto.CompactTextString(m) } +func (*TextAnnotation_DetectedBreak) ProtoMessage() {} +func (*TextAnnotation_DetectedBreak) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0, 1} } + +func (m *TextAnnotation_DetectedBreak) GetType() TextAnnotation_DetectedBreak_BreakType { + if m != nil { + return m.Type + } + return TextAnnotation_DetectedBreak_UNKNOWN +} + +func (m *TextAnnotation_DetectedBreak) GetIsPrefix() bool { + if m != nil { + return m.IsPrefix + } + return false +} + +// Additional information detected on the structural component. +type TextAnnotation_TextProperty struct { + // A list of detected languages together with confidence. + DetectedLanguages []*TextAnnotation_DetectedLanguage `protobuf:"bytes,1,rep,name=detected_languages,json=detectedLanguages" json:"detected_languages,omitempty"` + // Detected start or end of a text segment. + DetectedBreak *TextAnnotation_DetectedBreak `protobuf:"bytes,2,opt,name=detected_break,json=detectedBreak" json:"detected_break,omitempty"` +} + +func (m *TextAnnotation_TextProperty) Reset() { *m = TextAnnotation_TextProperty{} } +func (m *TextAnnotation_TextProperty) String() string { return proto.CompactTextString(m) } +func (*TextAnnotation_TextProperty) ProtoMessage() {} +func (*TextAnnotation_TextProperty) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0, 2} } + +func (m *TextAnnotation_TextProperty) GetDetectedLanguages() []*TextAnnotation_DetectedLanguage { + if m != nil { + return m.DetectedLanguages + } + return nil +} + +func (m *TextAnnotation_TextProperty) GetDetectedBreak() *TextAnnotation_DetectedBreak { + if m != nil { + return m.DetectedBreak + } + return nil +} + +// Detected page from OCR. +type Page struct { + // Additional information detected on the page. + Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"` + // Page width. For PDFs the unit is points. For images (including + // TIFFs) the unit is pixels. + Width int32 `protobuf:"varint,2,opt,name=width" json:"width,omitempty"` + // Page height. For PDFs the unit is points. For images (including + // TIFFs) the unit is pixels. + Height int32 `protobuf:"varint,3,opt,name=height" json:"height,omitempty"` + // List of blocks of text, images etc on this page. + Blocks []*Block `protobuf:"bytes,4,rep,name=blocks" json:"blocks,omitempty"` + // Confidence of the OCR results on the page. Range [0, 1]. + Confidence float32 `protobuf:"fixed32,5,opt,name=confidence" json:"confidence,omitempty"` +} + +func (m *Page) Reset() { *m = Page{} } +func (m *Page) String() string { return proto.CompactTextString(m) } +func (*Page) ProtoMessage() {} +func (*Page) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func (m *Page) GetProperty() *TextAnnotation_TextProperty { + if m != nil { + return m.Property + } + return nil +} + +func (m *Page) GetWidth() int32 { + if m != nil { + return m.Width + } + return 0 +} + +func (m *Page) GetHeight() int32 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *Page) GetBlocks() []*Block { + if m != nil { + return m.Blocks + } + return nil +} + +func (m *Page) GetConfidence() float32 { + if m != nil { + return m.Confidence + } + return 0 +} + +// Logical element on the page. +type Block struct { + // Additional information detected for the block. + Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"` + // The bounding box for the block. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // + // * when the text is horizontal it might look like: + // + // 0----1 + // | | + // 3----2 + // + // * when it's rotated 180 degrees around the top-left corner it becomes: + // + // 2----3 + // | | + // 1----0 + // + // and the vertice order will still be (0, 1, 2, 3). + BoundingBox *BoundingPoly `protobuf:"bytes,2,opt,name=bounding_box,json=boundingBox" json:"bounding_box,omitempty"` + // List of paragraphs in this block (if this blocks is of type text). + Paragraphs []*Paragraph `protobuf:"bytes,3,rep,name=paragraphs" json:"paragraphs,omitempty"` + // Detected block type (text, image etc) for this block. + BlockType Block_BlockType `protobuf:"varint,4,opt,name=block_type,json=blockType,enum=google.cloud.vision.v1p2beta1.Block_BlockType" json:"block_type,omitempty"` + // Confidence of the OCR results on the block. Range [0, 1]. + Confidence float32 `protobuf:"fixed32,5,opt,name=confidence" json:"confidence,omitempty"` +} + +func (m *Block) Reset() { *m = Block{} } +func (m *Block) String() string { return proto.CompactTextString(m) } +func (*Block) ProtoMessage() {} +func (*Block) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } + +func (m *Block) GetProperty() *TextAnnotation_TextProperty { + if m != nil { + return m.Property + } + return nil +} + +func (m *Block) GetBoundingBox() *BoundingPoly { + if m != nil { + return m.BoundingBox + } + return nil +} + +func (m *Block) GetParagraphs() []*Paragraph { + if m != nil { + return m.Paragraphs + } + return nil +} + +func (m *Block) GetBlockType() Block_BlockType { + if m != nil { + return m.BlockType + } + return Block_UNKNOWN +} + +func (m *Block) GetConfidence() float32 { + if m != nil { + return m.Confidence + } + return 0 +} + +// Structural unit of text representing a number of words in certain order. +type Paragraph struct { + // Additional information detected for the paragraph. + Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"` + // The bounding box for the paragraph. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingBox *BoundingPoly `protobuf:"bytes,2,opt,name=bounding_box,json=boundingBox" json:"bounding_box,omitempty"` + // List of words in this paragraph. + Words []*Word `protobuf:"bytes,3,rep,name=words" json:"words,omitempty"` + // Confidence of the OCR results for the paragraph. Range [0, 1]. + Confidence float32 `protobuf:"fixed32,4,opt,name=confidence" json:"confidence,omitempty"` +} + +func (m *Paragraph) Reset() { *m = Paragraph{} } +func (m *Paragraph) String() string { return proto.CompactTextString(m) } +func (*Paragraph) ProtoMessage() {} +func (*Paragraph) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } + +func (m *Paragraph) GetProperty() *TextAnnotation_TextProperty { + if m != nil { + return m.Property + } + return nil +} + +func (m *Paragraph) GetBoundingBox() *BoundingPoly { + if m != nil { + return m.BoundingBox + } + return nil +} + +func (m *Paragraph) GetWords() []*Word { + if m != nil { + return m.Words + } + return nil +} + +func (m *Paragraph) GetConfidence() float32 { + if m != nil { + return m.Confidence + } + return 0 +} + +// A word representation. +type Word struct { + // Additional information detected for the word. + Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"` + // The bounding box for the word. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingBox *BoundingPoly `protobuf:"bytes,2,opt,name=bounding_box,json=boundingBox" json:"bounding_box,omitempty"` + // List of symbols in the word. + // The order of the symbols follows the natural reading order. + Symbols []*Symbol `protobuf:"bytes,3,rep,name=symbols" json:"symbols,omitempty"` + // Confidence of the OCR results for the word. Range [0, 1]. + Confidence float32 `protobuf:"fixed32,4,opt,name=confidence" json:"confidence,omitempty"` +} + +func (m *Word) Reset() { *m = Word{} } +func (m *Word) String() string { return proto.CompactTextString(m) } +func (*Word) ProtoMessage() {} +func (*Word) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} } + +func (m *Word) GetProperty() *TextAnnotation_TextProperty { + if m != nil { + return m.Property + } + return nil +} + +func (m *Word) GetBoundingBox() *BoundingPoly { + if m != nil { + return m.BoundingBox + } + return nil +} + +func (m *Word) GetSymbols() []*Symbol { + if m != nil { + return m.Symbols + } + return nil +} + +func (m *Word) GetConfidence() float32 { + if m != nil { + return m.Confidence + } + return 0 +} + +// A single symbol representation. +type Symbol struct { + // Additional information detected for the symbol. + Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"` + // The bounding box for the symbol. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingBox *BoundingPoly `protobuf:"bytes,2,opt,name=bounding_box,json=boundingBox" json:"bounding_box,omitempty"` + // The actual UTF-8 representation of the symbol. + Text string `protobuf:"bytes,3,opt,name=text" json:"text,omitempty"` + // Confidence of the OCR results for the symbol. Range [0, 1]. + Confidence float32 `protobuf:"fixed32,4,opt,name=confidence" json:"confidence,omitempty"` +} + +func (m *Symbol) Reset() { *m = Symbol{} } +func (m *Symbol) String() string { return proto.CompactTextString(m) } +func (*Symbol) ProtoMessage() {} +func (*Symbol) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} } + +func (m *Symbol) GetProperty() *TextAnnotation_TextProperty { + if m != nil { + return m.Property + } + return nil +} + +func (m *Symbol) GetBoundingBox() *BoundingPoly { + if m != nil { + return m.BoundingBox + } + return nil +} + +func (m *Symbol) GetText() string { + if m != nil { + return m.Text + } + return "" +} + +func (m *Symbol) GetConfidence() float32 { + if m != nil { + return m.Confidence + } + return 0 +} + +func init() { + proto.RegisterType((*TextAnnotation)(nil), "google.cloud.vision.v1p2beta1.TextAnnotation") + proto.RegisterType((*TextAnnotation_DetectedLanguage)(nil), "google.cloud.vision.v1p2beta1.TextAnnotation.DetectedLanguage") + proto.RegisterType((*TextAnnotation_DetectedBreak)(nil), "google.cloud.vision.v1p2beta1.TextAnnotation.DetectedBreak") + proto.RegisterType((*TextAnnotation_TextProperty)(nil), "google.cloud.vision.v1p2beta1.TextAnnotation.TextProperty") + proto.RegisterType((*Page)(nil), "google.cloud.vision.v1p2beta1.Page") + proto.RegisterType((*Block)(nil), "google.cloud.vision.v1p2beta1.Block") + proto.RegisterType((*Paragraph)(nil), "google.cloud.vision.v1p2beta1.Paragraph") + proto.RegisterType((*Word)(nil), "google.cloud.vision.v1p2beta1.Word") + proto.RegisterType((*Symbol)(nil), "google.cloud.vision.v1p2beta1.Symbol") + proto.RegisterEnum("google.cloud.vision.v1p2beta1.TextAnnotation_DetectedBreak_BreakType", TextAnnotation_DetectedBreak_BreakType_name, TextAnnotation_DetectedBreak_BreakType_value) + proto.RegisterEnum("google.cloud.vision.v1p2beta1.Block_BlockType", Block_BlockType_name, Block_BlockType_value) +} + +func init() { + proto.RegisterFile("google/cloud/vision/v1p2beta1/text_annotation.proto", fileDescriptor2) +} + +var fileDescriptor2 = []byte{ + // 774 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x56, 0x4f, 0x6f, 0xd3, 0x48, + 0x14, 0x5f, 0x27, 0x76, 0x1a, 0xbf, 0xb4, 0x91, 0x77, 0x76, 0xb5, 0x8a, 0xb2, 0xbb, 0xa8, 0xa4, + 0x20, 0x55, 0x02, 0x39, 0x6a, 0x7a, 0x2a, 0x45, 0xa0, 0x38, 0xb5, 0xd4, 0xaa, 0x21, 0xb5, 0xa6, + 0x09, 0xa5, 0x5c, 0x2c, 0xff, 0x99, 0x3a, 0x56, 0x13, 0x8f, 0x65, 0xbb, 0x6d, 0x72, 0xe5, 0x8a, + 0x04, 0x5f, 0x88, 0x2f, 0x83, 0xc4, 0x09, 0xf1, 0x01, 0x38, 0x22, 0x8f, 0xed, 0x34, 0x09, 0xa2, + 0xe6, 0x8f, 0x38, 0xf4, 0x12, 0xcd, 0x7b, 0x79, 0xbf, 0x37, 0xef, 0xf7, 0x7b, 0xf3, 0x3c, 0x03, + 0xdb, 0x0e, 0xa5, 0xce, 0x88, 0x34, 0xad, 0x11, 0xbd, 0xb0, 0x9b, 0x97, 0x6e, 0xe8, 0x52, 0xaf, + 0x79, 0xb9, 0xe5, 0xb7, 0x4c, 0x12, 0x19, 0x5b, 0xcd, 0x88, 0x4c, 0x22, 0xdd, 0xf0, 0x3c, 0x1a, + 0x19, 0x91, 0x4b, 0x3d, 0xd9, 0x0f, 0x68, 0x44, 0xd1, 0xff, 0x09, 0x48, 0x66, 0x20, 0x39, 0x01, + 0xc9, 0x33, 0x50, 0xfd, 0xbf, 0x34, 0xa7, 0xe1, 0xbb, 0xcd, 0x6b, 0x6c, 0x98, 0x80, 0xeb, 0x0f, + 0x6f, 0xde, 0xd1, 0x21, 0x74, 0x4c, 0xa2, 0x60, 0x9a, 0x44, 0x37, 0x5e, 0x0b, 0x50, 0xed, 0x93, + 0x49, 0xd4, 0x9e, 0xe5, 0x41, 0x3b, 0x20, 0xf8, 0x86, 0x43, 0xc2, 0x1a, 0xb7, 0x5e, 0xdc, 0xac, + 0xb4, 0x36, 0xe4, 0x1b, 0xab, 0x91, 0x35, 0xc3, 0x21, 0x38, 0x41, 0x20, 0x04, 0x7c, 0xcc, 0xa8, + 0x56, 0x58, 0xe7, 0x36, 0x45, 0xcc, 0xd6, 0xf5, 0x13, 0x90, 0xf6, 0x48, 0x44, 0xac, 0x88, 0xd8, + 0x5d, 0xc3, 0x73, 0x2e, 0x0c, 0x87, 0xa0, 0x0d, 0x58, 0x1b, 0xa5, 0x6b, 0xdd, 0xa2, 0x36, 0xa9, + 0x71, 0x0c, 0xb0, 0x9a, 0x39, 0x3b, 0xd4, 0x26, 0xe8, 0x0e, 0x80, 0x45, 0xbd, 0x33, 0xd7, 0x26, + 0x9e, 0x45, 0x58, 0xca, 0x02, 0x9e, 0xf3, 0xd4, 0x3f, 0x71, 0xb0, 0x96, 0x65, 0x56, 0x02, 0x62, + 0x9c, 0xa3, 0x53, 0xe0, 0xa3, 0xa9, 0x9f, 0x64, 0xab, 0xb6, 0xd4, 0x9c, 0xc2, 0x17, 0x69, 0xcb, + 0x0b, 0xa9, 0x64, 0xf6, 0xdb, 0x9f, 0xfa, 0x04, 0xb3, 0x94, 0xe8, 0x5f, 0x10, 0xdd, 0x50, 0xf7, + 0x03, 0x72, 0xe6, 0x4e, 0x58, 0x2d, 0x65, 0x5c, 0x76, 0x43, 0x8d, 0xd9, 0x0d, 0x0b, 0xc4, 0x59, + 0x3c, 0xaa, 0xc0, 0xca, 0xa0, 0x77, 0xd8, 0x3b, 0x3a, 0xe9, 0x49, 0x7f, 0x20, 0x11, 0x84, 0x63, + 0xad, 0xdd, 0x51, 0x25, 0x0e, 0x55, 0x01, 0x8e, 0x07, 0x58, 0xd5, 0x13, 0xbb, 0x80, 0x10, 0x54, + 0xd5, 0xa3, 0xae, 0x3e, 0xe7, 0x2b, 0x22, 0x80, 0xd2, 0xfe, 0xa9, 0xb6, 0xaf, 0xf6, 0x24, 0x3e, + 0x8e, 0xef, 0x1e, 0xf4, 0x54, 0x5d, 0xc1, 0x6a, 0xfb, 0x50, 0x12, 0xea, 0xef, 0x39, 0x58, 0x8d, + 0x4b, 0xd6, 0x02, 0xea, 0x93, 0x20, 0x9a, 0xa2, 0x31, 0x20, 0x3b, 0xad, 0x59, 0xcf, 0x84, 0xcb, + 0x9a, 0xf6, 0xe4, 0xe7, 0xb8, 0x67, 0x0d, 0xc2, 0x7f, 0xda, 0x4b, 0x9e, 0x10, 0x99, 0x50, 0x9d, + 0x6d, 0x67, 0xc6, 0x6c, 0x99, 0x0c, 0x95, 0xd6, 0xee, 0x2f, 0xc8, 0x8c, 0xd7, 0xec, 0x79, 0xb3, + 0xf1, 0x91, 0x03, 0x3e, 0x3e, 0x4f, 0xe8, 0x39, 0x94, 0xfd, 0x94, 0x27, 0xeb, 0x66, 0xa5, 0xf5, + 0xe8, 0xc7, 0xb6, 0x99, 0x57, 0x0a, 0xcf, 0x72, 0xa1, 0xbf, 0x41, 0xb8, 0x72, 0xed, 0x68, 0xc8, + 0x6a, 0x17, 0x70, 0x62, 0xa0, 0x7f, 0xa0, 0x34, 0x24, 0xae, 0x33, 0x8c, 0x6a, 0x45, 0xe6, 0x4e, + 0x2d, 0xf4, 0x18, 0x4a, 0xe6, 0x88, 0x5a, 0xe7, 0x61, 0x8d, 0x67, 0xaa, 0xde, 0xcb, 0xa9, 0x41, + 0x89, 0x83, 0x71, 0x8a, 0x59, 0x3a, 0xbf, 0xc2, 0xf2, 0xf9, 0x6d, 0xbc, 0x2b, 0x82, 0xc0, 0x10, + 0xbf, 0x8d, 0x6d, 0x0f, 0x56, 0x4d, 0x7a, 0xe1, 0xd9, 0xae, 0xe7, 0xe8, 0x26, 0x9d, 0xa4, 0x0d, + 0x7b, 0x90, 0xc7, 0x22, 0x85, 0x68, 0x74, 0x34, 0xc5, 0x95, 0x2c, 0x81, 0x42, 0x27, 0x68, 0x1f, + 0xc0, 0x37, 0x02, 0xc3, 0x09, 0x0c, 0x7f, 0x18, 0xd6, 0x8a, 0x4c, 0x93, 0xcd, 0xdc, 0xcf, 0x43, + 0x0a, 0xc0, 0x73, 0x58, 0xf4, 0x0c, 0x80, 0xa9, 0xa4, 0xb3, 0x79, 0xe5, 0xd9, 0xbc, 0xca, 0xdf, + 0xa3, 0x6e, 0xf2, 0xcb, 0x06, 0x53, 0x34, 0xb3, 0x65, 0xae, 0xd4, 0x18, 0xc4, 0x19, 0x6e, 0x71, + 0x40, 0xcb, 0xc0, 0xf7, 0xd5, 0x17, 0x7d, 0x89, 0x8b, 0x47, 0xb5, 0xdf, 0x56, 0xba, 0xf1, 0x68, + 0x56, 0x60, 0x45, 0x3b, 0xe8, 0xf4, 0x07, 0x38, 0x9e, 0x49, 0x11, 0x04, 0x3c, 0xe8, 0xaa, 0x58, + 0xe2, 0x63, 0xbf, 0xd2, 0xc6, 0x9d, 0xa3, 0x3d, 0x55, 0x12, 0x1a, 0x6f, 0x0a, 0x20, 0xce, 0xc8, + 0xdd, 0x9a, 0x16, 0xee, 0x80, 0x70, 0x45, 0x03, 0x3b, 0xeb, 0x5e, 0xde, 0xc7, 0xfd, 0x84, 0x06, + 0x36, 0x4e, 0x10, 0x4b, 0x22, 0xf3, 0x5f, 0x89, 0xfc, 0xb6, 0x00, 0x7c, 0x1c, 0x7f, 0x6b, 0xb4, + 0x78, 0x0a, 0x2b, 0xe1, 0x74, 0x6c, 0xd2, 0x51, 0xa6, 0xc6, 0xfd, 0x9c, 0x54, 0xc7, 0x2c, 0x1a, + 0x67, 0xa8, 0x5c, 0x45, 0x3e, 0x70, 0x50, 0x4a, 0x30, 0xb7, 0x46, 0x93, 0xec, 0x06, 0x2f, 0x5e, + 0xdf, 0xe0, 0x79, 0x34, 0x95, 0x57, 0x1c, 0xdc, 0xb5, 0xe8, 0xf8, 0xe6, 0x3d, 0x95, 0xbf, 0x16, + 0x09, 0x69, 0xf1, 0xf3, 0x43, 0xe3, 0x5e, 0x76, 0x52, 0x94, 0x43, 0xe3, 0x3b, 0x4c, 0xa6, 0x81, + 0xd3, 0x74, 0x88, 0xc7, 0x1e, 0x27, 0xcd, 0xe4, 0x2f, 0xc3, 0x77, 0xc3, 0x6f, 0xbc, 0x66, 0x76, + 0x13, 0xc7, 0x67, 0x8e, 0x33, 0x4b, 0x0c, 0xb2, 0xfd, 0x25, 0x00, 0x00, 0xff, 0xff, 0xce, 0x91, + 0x71, 0x97, 0x71, 0x09, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1p2beta1/web_detection.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1p2beta1/web_detection.pb.go new file mode 100644 index 000000000..9185111e6 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1p2beta1/web_detection.pb.go @@ -0,0 +1,278 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/vision/v1p2beta1/web_detection.proto + +package vision + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Relevant information for the image from the Internet. +type WebDetection struct { + // Deduced entities from similar images on the Internet. + WebEntities []*WebDetection_WebEntity `protobuf:"bytes,1,rep,name=web_entities,json=webEntities" json:"web_entities,omitempty"` + // Fully matching images from the Internet. + // Can include resized copies of the query image. + FullMatchingImages []*WebDetection_WebImage `protobuf:"bytes,2,rep,name=full_matching_images,json=fullMatchingImages" json:"full_matching_images,omitempty"` + // Partial matching images from the Internet. + // Those images are similar enough to share some key-point features. For + // example an original image will likely have partial matching for its crops. + PartialMatchingImages []*WebDetection_WebImage `protobuf:"bytes,3,rep,name=partial_matching_images,json=partialMatchingImages" json:"partial_matching_images,omitempty"` + // Web pages containing the matching images from the Internet. + PagesWithMatchingImages []*WebDetection_WebPage `protobuf:"bytes,4,rep,name=pages_with_matching_images,json=pagesWithMatchingImages" json:"pages_with_matching_images,omitempty"` + // The visually similar image results. + VisuallySimilarImages []*WebDetection_WebImage `protobuf:"bytes,6,rep,name=visually_similar_images,json=visuallySimilarImages" json:"visually_similar_images,omitempty"` + // The service's best guess as to the topic of the request image. + // Inferred from similar images on the open web. + BestGuessLabels []*WebDetection_WebLabel `protobuf:"bytes,8,rep,name=best_guess_labels,json=bestGuessLabels" json:"best_guess_labels,omitempty"` +} + +func (m *WebDetection) Reset() { *m = WebDetection{} } +func (m *WebDetection) String() string { return proto.CompactTextString(m) } +func (*WebDetection) ProtoMessage() {} +func (*WebDetection) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } + +func (m *WebDetection) GetWebEntities() []*WebDetection_WebEntity { + if m != nil { + return m.WebEntities + } + return nil +} + +func (m *WebDetection) GetFullMatchingImages() []*WebDetection_WebImage { + if m != nil { + return m.FullMatchingImages + } + return nil +} + +func (m *WebDetection) GetPartialMatchingImages() []*WebDetection_WebImage { + if m != nil { + return m.PartialMatchingImages + } + return nil +} + +func (m *WebDetection) GetPagesWithMatchingImages() []*WebDetection_WebPage { + if m != nil { + return m.PagesWithMatchingImages + } + return nil +} + +func (m *WebDetection) GetVisuallySimilarImages() []*WebDetection_WebImage { + if m != nil { + return m.VisuallySimilarImages + } + return nil +} + +func (m *WebDetection) GetBestGuessLabels() []*WebDetection_WebLabel { + if m != nil { + return m.BestGuessLabels + } + return nil +} + +// Entity deduced from similar images on the Internet. +type WebDetection_WebEntity struct { + // Opaque entity ID. + EntityId string `protobuf:"bytes,1,opt,name=entity_id,json=entityId" json:"entity_id,omitempty"` + // Overall relevancy score for the entity. + // Not normalized and not comparable across different image queries. + Score float32 `protobuf:"fixed32,2,opt,name=score" json:"score,omitempty"` + // Canonical description of the entity, in English. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` +} + +func (m *WebDetection_WebEntity) Reset() { *m = WebDetection_WebEntity{} } +func (m *WebDetection_WebEntity) String() string { return proto.CompactTextString(m) } +func (*WebDetection_WebEntity) ProtoMessage() {} +func (*WebDetection_WebEntity) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 0} } + +func (m *WebDetection_WebEntity) GetEntityId() string { + if m != nil { + return m.EntityId + } + return "" +} + +func (m *WebDetection_WebEntity) GetScore() float32 { + if m != nil { + return m.Score + } + return 0 +} + +func (m *WebDetection_WebEntity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// Metadata for online images. +type WebDetection_WebImage struct { + // The result image URL. + Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` + // (Deprecated) Overall relevancy score for the image. + Score float32 `protobuf:"fixed32,2,opt,name=score" json:"score,omitempty"` +} + +func (m *WebDetection_WebImage) Reset() { *m = WebDetection_WebImage{} } +func (m *WebDetection_WebImage) String() string { return proto.CompactTextString(m) } +func (*WebDetection_WebImage) ProtoMessage() {} +func (*WebDetection_WebImage) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 1} } + +func (m *WebDetection_WebImage) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *WebDetection_WebImage) GetScore() float32 { + if m != nil { + return m.Score + } + return 0 +} + +// Metadata for web pages. +type WebDetection_WebPage struct { + // The result web page URL. + Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` + // (Deprecated) Overall relevancy score for the web page. + Score float32 `protobuf:"fixed32,2,opt,name=score" json:"score,omitempty"` + // Title for the web page, may contain HTML markups. + PageTitle string `protobuf:"bytes,3,opt,name=page_title,json=pageTitle" json:"page_title,omitempty"` + // Fully matching images on the page. + // Can include resized copies of the query image. + FullMatchingImages []*WebDetection_WebImage `protobuf:"bytes,4,rep,name=full_matching_images,json=fullMatchingImages" json:"full_matching_images,omitempty"` + // Partial matching images on the page. + // Those images are similar enough to share some key-point features. For + // example an original image will likely have partial matching for its + // crops. + PartialMatchingImages []*WebDetection_WebImage `protobuf:"bytes,5,rep,name=partial_matching_images,json=partialMatchingImages" json:"partial_matching_images,omitempty"` +} + +func (m *WebDetection_WebPage) Reset() { *m = WebDetection_WebPage{} } +func (m *WebDetection_WebPage) String() string { return proto.CompactTextString(m) } +func (*WebDetection_WebPage) ProtoMessage() {} +func (*WebDetection_WebPage) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 2} } + +func (m *WebDetection_WebPage) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *WebDetection_WebPage) GetScore() float32 { + if m != nil { + return m.Score + } + return 0 +} + +func (m *WebDetection_WebPage) GetPageTitle() string { + if m != nil { + return m.PageTitle + } + return "" +} + +func (m *WebDetection_WebPage) GetFullMatchingImages() []*WebDetection_WebImage { + if m != nil { + return m.FullMatchingImages + } + return nil +} + +func (m *WebDetection_WebPage) GetPartialMatchingImages() []*WebDetection_WebImage { + if m != nil { + return m.PartialMatchingImages + } + return nil +} + +// Label to provide extra metadata for the web detection. +type WebDetection_WebLabel struct { + // Label for extra metadata. + Label string `protobuf:"bytes,1,opt,name=label" json:"label,omitempty"` + // The BCP-47 language code for `label`, such as "en-US" or "sr-Latn". + // For more information, see + // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. + LanguageCode string `protobuf:"bytes,2,opt,name=language_code,json=languageCode" json:"language_code,omitempty"` +} + +func (m *WebDetection_WebLabel) Reset() { *m = WebDetection_WebLabel{} } +func (m *WebDetection_WebLabel) String() string { return proto.CompactTextString(m) } +func (*WebDetection_WebLabel) ProtoMessage() {} +func (*WebDetection_WebLabel) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 3} } + +func (m *WebDetection_WebLabel) GetLabel() string { + if m != nil { + return m.Label + } + return "" +} + +func (m *WebDetection_WebLabel) GetLanguageCode() string { + if m != nil { + return m.LanguageCode + } + return "" +} + +func init() { + proto.RegisterType((*WebDetection)(nil), "google.cloud.vision.v1p2beta1.WebDetection") + proto.RegisterType((*WebDetection_WebEntity)(nil), "google.cloud.vision.v1p2beta1.WebDetection.WebEntity") + proto.RegisterType((*WebDetection_WebImage)(nil), "google.cloud.vision.v1p2beta1.WebDetection.WebImage") + proto.RegisterType((*WebDetection_WebPage)(nil), "google.cloud.vision.v1p2beta1.WebDetection.WebPage") + proto.RegisterType((*WebDetection_WebLabel)(nil), "google.cloud.vision.v1p2beta1.WebDetection.WebLabel") +} + +func init() { proto.RegisterFile("google/cloud/vision/v1p2beta1/web_detection.proto", fileDescriptor3) } + +var fileDescriptor3 = []byte{ + // 511 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0x4f, 0x6f, 0xd3, 0x30, + 0x18, 0xc6, 0x95, 0x76, 0x1b, 0x8d, 0x5b, 0x04, 0xb3, 0x86, 0x16, 0x05, 0x26, 0x15, 0xb8, 0xf4, + 0x94, 0xa8, 0x1d, 0x9c, 0xb8, 0x6d, 0x4c, 0x68, 0x12, 0x48, 0x55, 0x40, 0x1a, 0xe2, 0x92, 0x39, + 0x89, 0x97, 0xbe, 0x92, 0x1b, 0x47, 0xb1, 0xd3, 0xaa, 0x37, 0x4e, 0x7c, 0x14, 0x3e, 0x23, 0x47, + 0xf4, 0xda, 0xee, 0x54, 0x51, 0x36, 0x31, 0x86, 0xb8, 0xf9, 0x7d, 0xac, 0xe7, 0xf9, 0xd9, 0xaf, + 0xff, 0x90, 0x71, 0x29, 0x65, 0x29, 0x78, 0x9c, 0x0b, 0xd9, 0x16, 0xf1, 0x02, 0x14, 0xc8, 0x2a, + 0x5e, 0x8c, 0xeb, 0x49, 0xc6, 0x35, 0x1b, 0xc7, 0x4b, 0x9e, 0xa5, 0x05, 0xd7, 0x3c, 0xd7, 0x20, + 0xab, 0xa8, 0x6e, 0xa4, 0x96, 0xf4, 0xc8, 0x5a, 0x22, 0x63, 0x89, 0xac, 0x25, 0xba, 0xb6, 0x84, + 0xcf, 0x5c, 0x22, 0xab, 0x21, 0x66, 0x55, 0x25, 0x35, 0x43, 0xaf, 0xb2, 0xe6, 0x17, 0xdf, 0x7c, + 0x32, 0xb8, 0xe0, 0xd9, 0xdb, 0x75, 0x26, 0xfd, 0x4c, 0x06, 0x08, 0xe1, 0x95, 0x06, 0x0d, 0x5c, + 0x05, 0xde, 0xb0, 0x3b, 0xea, 0x4f, 0x5e, 0x47, 0xb7, 0x42, 0xa2, 0xcd, 0x08, 0x2c, 0xce, 0xd0, + 0xbe, 0x4a, 0xfa, 0x4b, 0x37, 0x04, 0xae, 0xe8, 0x15, 0x39, 0xb8, 0x6a, 0x85, 0x48, 0xe7, 0x4c, + 0xe7, 0x33, 0xa8, 0xca, 0x14, 0xe6, 0xac, 0xe4, 0x2a, 0xe8, 0x18, 0xc2, 0xab, 0x3b, 0x12, 0xce, + 0xd1, 0x9c, 0x50, 0x4c, 0xfc, 0xe0, 0x02, 0x8d, 0xa4, 0xa8, 0x20, 0x87, 0x35, 0x6b, 0x34, 0xb0, + 0x6d, 0x54, 0xf7, 0x1e, 0xa8, 0x27, 0x2e, 0xf4, 0x17, 0x5a, 0x4d, 0xc2, 0x1a, 0x07, 0xe9, 0x12, + 0xf4, 0x6c, 0x0b, 0xb8, 0x63, 0x80, 0xc7, 0x77, 0x04, 0x4e, 0x91, 0x77, 0x68, 0x62, 0x2f, 0x40, + 0xcf, 0xb6, 0xf7, 0xb7, 0x00, 0xd5, 0x32, 0x21, 0x56, 0xa9, 0x82, 0x39, 0x08, 0xd6, 0xac, 0x71, + 0x7b, 0xf7, 0xd9, 0xdf, 0x3a, 0xf4, 0xa3, 0xcd, 0x74, 0xb4, 0x4b, 0xb2, 0x9f, 0x71, 0xa5, 0xd3, + 0xb2, 0xe5, 0x4a, 0xa5, 0x82, 0x65, 0x5c, 0xa8, 0xa0, 0xf7, 0x57, 0x9c, 0xf7, 0x68, 0x4e, 0x1e, + 0x61, 0xdc, 0x3b, 0x4c, 0x33, 0xb5, 0x0a, 0x2f, 0x89, 0x7f, 0x7d, 0x63, 0xe8, 0x53, 0xe2, 0x9b, + 0xab, 0xb7, 0x4a, 0xa1, 0x08, 0xbc, 0xa1, 0x37, 0xf2, 0x93, 0x9e, 0x15, 0xce, 0x0b, 0x7a, 0x40, + 0x76, 0x55, 0x2e, 0x1b, 0x1e, 0x74, 0x86, 0xde, 0xa8, 0x93, 0xd8, 0x82, 0x0e, 0x49, 0xbf, 0xe0, + 0x2a, 0x6f, 0xa0, 0x46, 0x50, 0xd0, 0x35, 0xa6, 0x4d, 0x29, 0x9c, 0x90, 0xde, 0x7a, 0x9b, 0xf4, + 0x31, 0xe9, 0xb6, 0x8d, 0x70, 0xd1, 0x38, 0xfc, 0x7d, 0x6a, 0xf8, 0xbd, 0x43, 0x1e, 0xb8, 0xa3, + 0xf8, 0x53, 0x0f, 0x3d, 0x22, 0x04, 0x0f, 0x2d, 0xd5, 0xa0, 0x05, 0x77, 0x0b, 0xf1, 0x51, 0xf9, + 0x84, 0xc2, 0x8d, 0x0f, 0x60, 0xe7, 0xff, 0x3d, 0x80, 0xdd, 0x7f, 0xfe, 0x00, 0xc2, 0x33, 0xd3, + 0x5c, 0x73, 0x96, 0xd8, 0x16, 0x73, 0x43, 0x5c, 0xab, 0x6c, 0x41, 0x5f, 0x92, 0x87, 0x82, 0x55, + 0x65, 0x8b, 0xad, 0xc9, 0x65, 0x61, 0x9b, 0xe6, 0x27, 0x83, 0xb5, 0x78, 0x2a, 0x0b, 0x7e, 0xf2, + 0xd5, 0x23, 0xcf, 0x73, 0x39, 0xbf, 0x7d, 0x65, 0x27, 0xfb, 0x9b, 0x4b, 0x9b, 0xe2, 0x0f, 0x36, + 0xf5, 0xbe, 0x9c, 0x3a, 0x4f, 0x29, 0x31, 0x31, 0x92, 0x4d, 0x19, 0x97, 0xbc, 0x32, 0xff, 0x5b, + 0x6c, 0xa7, 0x58, 0x0d, 0xea, 0x86, 0x2f, 0xf5, 0x8d, 0x15, 0x7e, 0x78, 0x5e, 0xb6, 0x67, 0x2c, + 0xc7, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x66, 0x62, 0xaa, 0xcd, 0x84, 0x05, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/cloudbuild/v1/cloudbuild.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/cloudbuild/v1/cloudbuild.pb.go index 0b62fd0d8..5f0a7ad72 100644 --- a/vendor/google.golang.org/genproto/googleapis/devtools/cloudbuild/v1/cloudbuild.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/devtools/cloudbuild/v1/cloudbuild.pb.go @@ -66,25 +66,25 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package -// Possible status of a build. +// Possible status of a build or build step. type Build_Status int32 const ( // Status of the build is unknown. Build_STATUS_UNKNOWN Build_Status = 0 - // Build is queued; work has not yet begun. + // Build or step is queued; work has not yet begun. Build_QUEUED Build_Status = 1 - // Build is being executed. + // Build or step is being executed. Build_WORKING Build_Status = 2 - // Build finished successfully. + // Build or step finished successfully. Build_SUCCESS Build_Status = 3 - // Build failed to complete successfully. + // Build or step failed to complete successfully. Build_FAILURE Build_Status = 4 - // Build failed due to an internal cause. + // Build or step failed due to an internal cause. Build_INTERNAL_ERROR Build_Status = 5 - // Build took longer than was allowed. + // Build or step took longer than was allowed. Build_TIMEOUT Build_Status = 6 - // Build was canceled by a user. + // Build or step was canceled by a user. Build_CANCELLED Build_Status = 7 ) @@ -374,6 +374,9 @@ type RepoSource struct { // *RepoSource_CommitSha Revision isRepoSource_Revision `protobuf_oneof:"revision"` // Directory, relative to the source root, in which to run the build. + // + // This must be a relative path. If a step's dir is specified and is an + // absolute path, this value is ignored for that step's execution. Dir string `protobuf:"bytes,7,opt,name=dir" json:"dir,omitempty"` } @@ -663,6 +666,7 @@ type BuiltImage struct { // Docker Registry 2.0 digest. Digest string `protobuf:"bytes,3,opt,name=digest" json:"digest,omitempty"` // Stores timing information for pushing the specified image. + // @OutputOnly PushTiming *TimeSpan `protobuf:"bytes,4,opt,name=push_timing,json=pushTiming" json:"push_timing,omitempty"` } @@ -723,8 +727,16 @@ type BuildStep struct { // an entrypoint, the first element in args will be used as the entrypoint, // and the remainder will be used as arguments. Args []string `protobuf:"bytes,3,rep,name=args" json:"args,omitempty"` - // Working directory (relative to project source root) to use when running - // this operation's container. + // Working directory to use when running this step's container. + // + // If this value is a relative path, it is relative to the build's working + // directory. If this value is absolute, it may be outside the build's working + // directory, in which case the contents of the path may not be persisted + // across build step executions, unless a volume for that path is specified. + // + // If the build specifies a RepoSource with dir and a step with a dir which + // specifies an absolute path, the RepoSource dir is ignored for the step's + // execution. Dir string `protobuf:"bytes,4,opt,name=dir" json:"dir,omitempty"` // Optional unique identifier for this build step, used in wait_for to // reference this build step as a dependency. @@ -751,7 +763,17 @@ type BuildStep struct { // of a mis-configured build request. Volumes []*Volume `protobuf:"bytes,9,rep,name=volumes" json:"volumes,omitempty"` // Stores timing information for executing this build step. + // @OutputOnly Timing *TimeSpan `protobuf:"bytes,10,opt,name=timing" json:"timing,omitempty"` + // Time limit for executing this build step. If not defined, the step has no + // time limit and will be allowed to continue to run until either it completes + // or the build itself times out. + Timeout *google_protobuf4.Duration `protobuf:"bytes,11,opt,name=timeout" json:"timeout,omitempty"` + // Status of the build step. At this time, build step status is only updated + // on build completion; step status is not updated in real-time as the build + // progresses. + // @OutputOnly + Status Build_Status `protobuf:"varint,12,opt,name=status,enum=google.devtools.cloudbuild.v1.Build_Status" json:"status,omitempty"` } func (m *BuildStep) Reset() { *m = BuildStep{} } @@ -829,6 +851,20 @@ func (m *BuildStep) GetTiming() *TimeSpan { return nil } +func (m *BuildStep) GetTimeout() *google_protobuf4.Duration { + if m != nil { + return m.Timeout + } + return nil +} + +func (m *BuildStep) GetStatus() Build_Status { + if m != nil { + return m.Status + } + return Build_STATUS_UNKNOWN +} + // Volume describes a Docker container volume which is mounted into build steps // in order to persist files across build step execution. type Volume struct { @@ -969,7 +1005,7 @@ type Build struct { BuildTriggerId string `protobuf:"bytes,22,opt,name=build_trigger_id,json=buildTriggerId" json:"build_trigger_id,omitempty"` // Special options for this build. Options *BuildOptions `protobuf:"bytes,23,opt,name=options" json:"options,omitempty"` - // URL to logs for this build in Google Cloud Logging. + // URL to logs for this build in Google Cloud Console. // @OutputOnly LogUrl string `protobuf:"bytes,25,opt,name=log_url,json=logUrl" json:"log_url,omitempty"` // Substitutions data for Build resource. @@ -978,13 +1014,15 @@ type Build struct { Tags []string `protobuf:"bytes,31,rep,name=tags" json:"tags,omitempty"` // Secrets to decrypt using Cloud KMS. Secrets []*Secret `protobuf:"bytes,32,rep,name=secrets" json:"secrets,omitempty"` - // Stores timing information for phases of the build. - // Valid keys are: - // - BUILD: time to execute all build steps - // - PUSH: time to push all specified images. - // - FETCHSOURCE: time to fetch source. + // Stores timing information for phases of the build. Valid keys are: + // + // * BUILD: time to execute all build steps + // * PUSH: time to push all specified images. + // * FETCHSOURCE: time to fetch source. + // // If the build does not specify source, or does not specify images, // these keys will not be included. + // @OutputOnly Timing map[string]*TimeSpan `protobuf:"bytes,33,rep,name=timing" json:"timing,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` } @@ -1807,7 +1845,7 @@ type BuildOptions struct { SourceProvenanceHash []Hash_HashType `protobuf:"varint,1,rep,packed,name=source_provenance_hash,json=sourceProvenanceHash,enum=google.devtools.cloudbuild.v1.Hash_HashType" json:"source_provenance_hash,omitempty"` // Requested verifiability options. RequestedVerifyOption BuildOptions_VerifyOption `protobuf:"varint,2,opt,name=requested_verify_option,json=requestedVerifyOption,enum=google.devtools.cloudbuild.v1.BuildOptions_VerifyOption" json:"requested_verify_option,omitempty"` - // GCE VM size to run the build on. + // Compute Engine machine type on which to run the build. MachineType BuildOptions_MachineType `protobuf:"varint,3,opt,name=machine_type,json=machineType,enum=google.devtools.cloudbuild.v1.BuildOptions_MachineType" json:"machine_type,omitempty"` // Requested disk size for the VM that runs the build. Note that this is *NOT* // "disk free"; some of the space will be used by the operating system and @@ -2427,166 +2465,167 @@ var _CloudBuild_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("google/devtools/cloudbuild/v1/cloudbuild.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 2567 bytes of a gzipped FileDescriptorProto + // 2585 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x5a, 0x5b, 0x73, 0xdb, 0xc6, - 0x15, 0x16, 0x48, 0x8a, 0x97, 0x43, 0x5d, 0xe0, 0x8d, 0xa3, 0x40, 0x74, 0x1c, 0x2b, 0xc8, 0x4d, - 0x71, 0x12, 0x32, 0x92, 0xeb, 0xd8, 0x51, 0x2e, 0xb6, 0x44, 0x51, 0x97, 0x89, 0x4c, 0xb9, 0x20, - 0xe9, 0x4c, 0xd3, 0x76, 0x50, 0x90, 0x58, 0x43, 0xa8, 0x40, 0x00, 0x05, 0x96, 0x6c, 0x94, 0xd4, - 0xd3, 0x36, 0x33, 0xed, 0x6b, 0xdb, 0xe9, 0xf4, 0xa1, 0xd3, 0x87, 0x5e, 0x9e, 0x3b, 0x9d, 0x4e, - 0xfb, 0xd0, 0x99, 0xce, 0xe4, 0xa9, 0x0f, 0xfd, 0x01, 0xfd, 0x0b, 0xfd, 0x21, 0x9d, 0xbd, 0x80, - 0x04, 0x49, 0xbb, 0x20, 0xe2, 0xf6, 0x45, 0xe6, 0x9e, 0xdd, 0xef, 0xec, 0xd9, 0x73, 0xdb, 0x6f, - 0x49, 0x43, 0xd5, 0xf2, 0x3c, 0xcb, 0xc1, 0x35, 0x13, 0x0f, 0x89, 0xe7, 0x39, 0x61, 0xad, 0xe7, - 0x78, 0x03, 0xb3, 0x3b, 0xb0, 0x1d, 0xb3, 0x36, 0xdc, 0x8a, 0x8d, 0xaa, 0x7e, 0xe0, 0x11, 0x0f, - 0x5d, 0xe5, 0xeb, 0xab, 0xd1, 0xfa, 0x6a, 0x6c, 0xc5, 0x70, 0xab, 0xf2, 0xbc, 0x50, 0x67, 0xf8, - 0x76, 0xcd, 0x70, 0x5d, 0x8f, 0x18, 0xc4, 0xf6, 0xdc, 0x90, 0x83, 0x2b, 0xaa, 0x98, 0x65, 0x98, - 0x9a, 0x31, 0x30, 0x6d, 0xc2, 0xff, 0xea, 0x8e, 0x67, 0x89, 0x35, 0x2f, 0x89, 0x35, 0x8e, 0xe7, - 0x5a, 0xc1, 0xc0, 0x75, 0x6d, 0xd7, 0xaa, 0x79, 0x3e, 0x0e, 0x26, 0x14, 0xbd, 0x20, 0x16, 0xb1, - 0x51, 0x77, 0xf0, 0xb0, 0x66, 0x0e, 0xf8, 0x02, 0x31, 0x7f, 0x65, 0x7a, 0x1e, 0xf7, 0x7d, 0x72, - 0x21, 0x26, 0xaf, 0x4d, 0x4f, 0x12, 0xbb, 0x8f, 0x43, 0x62, 0xf4, 0x7d, 0xbe, 0x40, 0xdd, 0x83, - 0x4b, 0x1a, 0x26, 0xc1, 0xc5, 0x1e, 0x3d, 0x95, 0x86, 0xbf, 0x37, 0xc0, 0x21, 0x41, 0x57, 0x01, - 0xfc, 0xc0, 0xfb, 0x2e, 0xee, 0x11, 0xdd, 0x36, 0x15, 0x69, 0x43, 0xda, 0x2c, 0x69, 0x25, 0x21, - 0x39, 0x36, 0xd1, 0x0a, 0x64, 0x6c, 0x53, 0xc9, 0x30, 0x71, 0xc6, 0x36, 0xd5, 0x5f, 0x4b, 0xb0, - 0xa6, 0x0d, 0x5c, 0xa6, 0xa2, 0x1d, 0xd8, 0x96, 0x85, 0x83, 0x39, 0x35, 0x5d, 0x05, 0x20, 0x1c, - 0xa0, 0x8f, 0x34, 0x96, 0x84, 0xe4, 0xd8, 0x44, 0xbb, 0x90, 0x0f, 0xbd, 0x41, 0xd0, 0xc3, 0x4a, - 0x76, 0x43, 0xda, 0x2c, 0x6f, 0xbf, 0x5e, 0xfd, 0xaf, 0x11, 0xa9, 0x6a, 0xd8, 0xf7, 0x5a, 0x0c, - 0xa0, 0x09, 0xa0, 0xaa, 0xc3, 0x72, 0x8b, 0x78, 0x81, 0x61, 0x61, 0x3e, 0x81, 0xd6, 0x20, 0xdf, - 0x1d, 0xf4, 0xce, 0x31, 0x11, 0xd6, 0x88, 0x11, 0x95, 0x7b, 0x5d, 0x6a, 0x96, 0x30, 0x43, 0x8c, - 0xd0, 0x0b, 0x00, 0x16, 0x76, 0x45, 0x4c, 0x98, 0x1d, 0x59, 0x2d, 0x26, 0x51, 0xff, 0x29, 0x01, - 0x8c, 0xf7, 0x4d, 0x3a, 0xf0, 0x15, 0x28, 0x05, 0xd8, 0xf7, 0x74, 0xd7, 0xe8, 0x63, 0xb1, 0x51, - 0x91, 0x0a, 0x9a, 0x46, 0x1f, 0xa3, 0x17, 0xa1, 0xdc, 0x0d, 0x0c, 0xb7, 0x77, 0xc6, 0xa7, 0xe9, - 0x5e, 0xa5, 0xa3, 0x05, 0x0d, 0xb8, 0x90, 0x2d, 0xb9, 0x02, 0x45, 0x62, 0x58, 0x7c, 0x3e, 0x27, - 0xe6, 0x0b, 0xc4, 0xb0, 0xd8, 0xe4, 0x35, 0x80, 0x9e, 0xd7, 0xef, 0xdb, 0x44, 0x0f, 0xcf, 0x0c, - 0x65, 0x51, 0x4c, 0x97, 0xb8, 0xac, 0x75, 0x66, 0x20, 0x19, 0xb2, 0xa6, 0x1d, 0x28, 0x05, 0xb6, - 0x2f, 0xfd, 0xb8, 0x07, 0x50, 0x0c, 0xf0, 0xd0, 0x0e, 0xe9, 0x49, 0xfe, 0x26, 0x41, 0x5e, 0x9c, - 0xa2, 0x03, 0x2b, 0x21, 0xf7, 0x9a, 0x2e, 0x02, 0x90, 0x61, 0x01, 0x78, 0x33, 0x21, 0x00, 0x13, - 0xae, 0x3e, 0x5a, 0xd0, 0x96, 0xc3, 0x09, 0xdf, 0x9f, 0x40, 0x99, 0x9d, 0xfe, 0x2b, 0x06, 0x95, - 0xfa, 0x22, 0x18, 0x8d, 0xf6, 0x8a, 0x51, 0x76, 0xa8, 0x5f, 0x48, 0x00, 0x34, 0xfb, 0xc8, 0x71, - 0xdf, 0xb0, 0x30, 0x42, 0x90, 0x63, 0x0e, 0xe2, 0xde, 0x67, 0x9f, 0x69, 0x78, 0x4d, 0xdb, 0xc2, - 0x21, 0xe1, 0x6e, 0xd5, 0xc4, 0x08, 0x1d, 0x41, 0xd9, 0x1f, 0x84, 0x67, 0x3a, 0xb1, 0xfb, 0xb6, - 0x6b, 0x31, 0x9f, 0x96, 0xb7, 0x5f, 0x4b, 0x30, 0xa9, 0x6d, 0xf7, 0x71, 0xcb, 0x37, 0x5c, 0x0d, - 0x28, 0xb6, 0xcd, 0xa0, 0xea, 0x3f, 0x32, 0x50, 0x62, 0x25, 0xd0, 0x22, 0xd8, 0x7f, 0xac, 0x0d, - 0x32, 0x64, 0xb1, 0x3b, 0x54, 0x32, 0x1b, 0x59, 0xea, 0x7e, 0xec, 0x0e, 0xe9, 0x2a, 0x23, 0xb0, - 0x42, 0x25, 0xcb, 0x44, 0xec, 0x73, 0x14, 0xa4, 0xdc, 0x28, 0x48, 0xa2, 0xde, 0x16, 0xa3, 0x7a, - 0x43, 0xeb, 0x50, 0xfc, 0xbe, 0x61, 0x13, 0xfd, 0xa1, 0x17, 0x28, 0x79, 0x86, 0x2c, 0xd0, 0xf1, - 0x81, 0x17, 0xd0, 0x6c, 0xc5, 0x2e, 0x09, 0x2e, 0x7c, 0xcf, 0x76, 0x89, 0x08, 0x74, 0x4c, 0x42, - 0xd3, 0x33, 0xc4, 0xbd, 0x00, 0x13, 0x9d, 0x5a, 0x52, 0x64, 0xe0, 0x12, 0x97, 0x34, 0xdc, 0x21, - 0xba, 0x03, 0x85, 0xa1, 0xe7, 0x0c, 0xfa, 0x38, 0x54, 0x4a, 0x1b, 0xd9, 0xcd, 0xf2, 0xf6, 0x2b, - 0x09, 0x9e, 0x78, 0xc0, 0x56, 0x6b, 0x11, 0x0a, 0xdd, 0x81, 0xbc, 0xf0, 0x24, 0xa4, 0xf3, 0xa4, - 0x80, 0xa9, 0x6f, 0x43, 0x9e, 0xeb, 0x7c, 0xac, 0x07, 0x11, 0xe4, 0x7c, 0x83, 0x9c, 0x89, 0xca, - 0x61, 0x9f, 0xd5, 0x4f, 0xa1, 0xa0, 0xe1, 0x70, 0xe0, 0x90, 0x90, 0xf6, 0x0b, 0x9b, 0x66, 0x40, - 0xc8, 0x7c, 0x9c, 0x9c, 0x5a, 0xe3, 0x9c, 0xd1, 0x04, 0x10, 0x5d, 0x87, 0x4b, 0x6c, 0x5a, 0x0f, - 0x09, 0xf6, 0x75, 0xa1, 0x8d, 0x87, 0x67, 0xb5, 0x1b, 0x45, 0x97, 0x41, 0x42, 0xf5, 0x17, 0x65, - 0x58, 0x64, 0x11, 0x17, 0x11, 0x92, 0x46, 0x11, 0x9a, 0xec, 0x02, 0xf2, 0x74, 0x17, 0xa8, 0x43, - 0x3e, 0x24, 0x06, 0x19, 0x84, 0xec, 0x20, 0x2b, 0xdb, 0x6f, 0xcc, 0x61, 0xa7, 0x59, 0x6d, 0x31, - 0x88, 0x26, 0xa0, 0xe8, 0x25, 0x58, 0xe6, 0x9f, 0x74, 0x13, 0x13, 0xc3, 0x76, 0x14, 0x85, 0x6d, - 0xb3, 0xc4, 0x85, 0xfb, 0x4c, 0x86, 0x3e, 0x98, 0xea, 0xa0, 0x49, 0xf1, 0x9c, 0xec, 0x9e, 0xe8, - 0x43, 0x58, 0xa4, 0x7e, 0x08, 0x95, 0x32, 0xf3, 0xe7, 0xe6, 0x3c, 0x76, 0x52, 0x07, 0x69, 0x1c, - 0x86, 0xee, 0x42, 0x21, 0xe0, 0xb1, 0x11, 0xf9, 0xf0, 0x6a, 0x62, 0xb1, 0xb3, 0xd5, 0x5a, 0x04, - 0x43, 0xef, 0x41, 0xb9, 0x17, 0x60, 0x83, 0x60, 0x5a, 0xa1, 0x58, 0xc9, 0x33, 0x2d, 0x95, 0x48, - 0x4b, 0x74, 0xad, 0xb1, 0x3c, 0x62, 0xd7, 0x9a, 0x06, 0x7c, 0x39, 0x15, 0xa0, 0x77, 0x01, 0x42, - 0x62, 0x04, 0x84, 0x63, 0x0b, 0x89, 0xd8, 0x12, 0x5b, 0xcd, 0xa0, 0xef, 0x41, 0xf9, 0xa1, 0xed, - 0xda, 0xbc, 0x33, 0x60, 0xa5, 0x98, 0xbc, 0x2f, 0x5f, 0xce, 0xc0, 0x37, 0xa0, 0x40, 0x51, 0xde, - 0x80, 0x28, 0x4b, 0x0c, 0xb8, 0x3e, 0x03, 0xdc, 0x17, 0x97, 0xb8, 0x16, 0xad, 0xa4, 0x1d, 0x4a, - 0xa4, 0xdb, 0x32, 0x4b, 0xb7, 0x28, 0x23, 0xaf, 0x41, 0xd9, 0xf1, 0xac, 0x50, 0x17, 0xb7, 0xd6, - 0x33, 0xbc, 0xa6, 0xa9, 0x68, 0x8f, 0xdf, 0x5c, 0xdf, 0x82, 0x4b, 0x3c, 0x5c, 0xba, 0x1f, 0x78, - 0x43, 0xec, 0x1a, 0x6e, 0x0f, 0x2b, 0xcf, 0xb2, 0x7d, 0x6b, 0x73, 0x85, 0xfb, 0xfe, 0x08, 0xa6, - 0xc9, 0xe1, 0x94, 0x04, 0x6d, 0x82, 0xcc, 0x0b, 0x22, 0x76, 0x51, 0xaf, 0x31, 0x1b, 0x56, 0xba, - 0xb1, 0x0b, 0xff, 0xd8, 0x44, 0x0d, 0x28, 0x78, 0x3e, 0x63, 0x2e, 0xca, 0x73, 0x6c, 0xf7, 0xb9, - 0xd2, 0xfa, 0x94, 0x43, 0xb4, 0x08, 0x8b, 0x9e, 0x83, 0x82, 0xe3, 0x59, 0xfa, 0x20, 0x70, 0x94, - 0x75, 0xde, 0xaa, 0x1d, 0xcf, 0xea, 0x04, 0x0e, 0xfa, 0x36, 0x2c, 0x87, 0x83, 0x6e, 0x48, 0x6c, - 0x32, 0xe0, 0xbb, 0x5c, 0x65, 0x49, 0x79, 0x6b, 0xbe, 0xe2, 0x89, 0x23, 0x1b, 0xb4, 0x21, 0x6a, - 0x93, 0xda, 0x68, 0x6f, 0x21, 0x86, 0x15, 0x2a, 0xd7, 0x78, 0x2f, 0xa6, 0x9f, 0x69, 0x3f, 0xe4, - 0xcd, 0x31, 0x54, 0x36, 0xe6, 0xea, 0x87, 0x2d, 0xb6, 0x5a, 0x8b, 0x50, 0xe8, 0x68, 0xd4, 0x0f, - 0x5f, 0x64, 0xf8, 0xb7, 0xe7, 0x32, 0x96, 0xdf, 0x28, 0xdc, 0x4a, 0x81, 0xaf, 0xdc, 0x05, 0x34, - 0x7b, 0x06, 0x7a, 0x59, 0x9c, 0xe3, 0x0b, 0xd1, 0x79, 0xe8, 0x47, 0x74, 0x19, 0x16, 0x87, 0x86, - 0x33, 0x88, 0xd8, 0x05, 0x1f, 0xec, 0x64, 0x6e, 0x4b, 0x95, 0x2e, 0x94, 0x63, 0x8a, 0x1f, 0x03, - 0xfd, 0x20, 0x0e, 0x4d, 0xd1, 0xbb, 0xc7, 0x7b, 0xa8, 0x3f, 0x84, 0x3c, 0x6f, 0x53, 0x08, 0xc1, - 0x4a, 0xab, 0xbd, 0xdb, 0xee, 0xb4, 0xf4, 0x4e, 0xf3, 0xa3, 0xe6, 0xe9, 0xc7, 0x4d, 0x79, 0x01, - 0x01, 0xe4, 0xbf, 0xde, 0x69, 0x74, 0x1a, 0xfb, 0xb2, 0x84, 0xca, 0x50, 0xf8, 0xf8, 0x54, 0xfb, - 0xe8, 0xb8, 0x79, 0x28, 0x67, 0xe8, 0xa0, 0xd5, 0xa9, 0xd7, 0x1b, 0xad, 0x96, 0x9c, 0xa5, 0x83, - 0x83, 0xdd, 0xe3, 0x93, 0x8e, 0xd6, 0x90, 0x73, 0x54, 0xcd, 0x71, 0xb3, 0xdd, 0xd0, 0x9a, 0xbb, - 0x27, 0x7a, 0x43, 0xd3, 0x4e, 0x35, 0x79, 0x91, 0x2e, 0x68, 0x1f, 0xdf, 0x6b, 0x9c, 0x76, 0xda, - 0x72, 0x1e, 0x2d, 0x43, 0xa9, 0xbe, 0xdb, 0xac, 0x37, 0x4e, 0x4e, 0x1a, 0xfb, 0x72, 0x41, 0xfd, - 0x01, 0x14, 0x23, 0xbb, 0xa6, 0xca, 0x5f, 0x4a, 0x53, 0xfe, 0x37, 0xa1, 0x88, 0x5d, 0x93, 0x03, - 0x33, 0x89, 0xc0, 0x02, 0x76, 0x4d, 0x3a, 0x52, 0xdb, 0xb0, 0x26, 0x92, 0x5a, 0xd0, 0xc3, 0x7b, - 0x98, 0x18, 0xa6, 0x41, 0x0c, 0xb4, 0x03, 0x8b, 0xcc, 0x71, 0xc2, 0x8c, 0x97, 0xe7, 0xc9, 0x03, - 0x8d, 0x43, 0xd4, 0xdf, 0x67, 0x41, 0x9e, 0xae, 0x54, 0x64, 0xc2, 0x73, 0x01, 0x0e, 0x3d, 0x67, - 0x88, 0xe9, 0x5d, 0x35, 0xc1, 0xd5, 0xb2, 0xe9, 0xb9, 0x9a, 0xf6, 0x6c, 0xa4, 0x6c, 0x92, 0x2d, - 0x7f, 0x13, 0x2e, 0x8f, 0x76, 0x89, 0x53, 0xb7, 0x7c, 0x5a, 0x3e, 0x8e, 0x22, 0x35, 0x31, 0xae, - 0xfc, 0x1d, 0xda, 0x63, 0x1d, 0xac, 0x9f, 0x19, 0xe1, 0x19, 0x0e, 0x95, 0x1c, 0xab, 0x90, 0x3b, - 0x29, 0x5b, 0x56, 0xf5, 0xc0, 0x76, 0xf0, 0x11, 0xd3, 0xc0, 0x0b, 0x06, 0x1e, 0x8e, 0x04, 0x95, - 0x33, 0x58, 0x9d, 0x9a, 0x7e, 0x4c, 0xda, 0xdf, 0x99, 0x4c, 0xfb, 0xa4, 0x43, 0x8d, 0x15, 0xc6, - 0x13, 0xbf, 0x09, 0x30, 0x9e, 0x40, 0x77, 0xa1, 0x34, 0x3a, 0x99, 0x22, 0xb1, 0x73, 0xbd, 0x94, - 0xa0, 0x96, 0x22, 0xb5, 0x62, 0x64, 0xbb, 0xfa, 0x23, 0x09, 0x72, 0xf4, 0x03, 0xba, 0x0b, 0x39, - 0x72, 0xe1, 0xf3, 0xf4, 0x5d, 0x49, 0x0c, 0x2a, 0x85, 0xb0, 0x3f, 0xed, 0x0b, 0x1f, 0x6b, 0x0c, - 0x39, 0xd9, 0x11, 0x96, 0x84, 0xd1, 0xea, 0x06, 0x14, 0xa3, 0x75, 0xa8, 0x08, 0xb9, 0xe6, 0x69, - 0xb3, 0xc1, 0x2b, 0xb4, 0x75, 0xb4, 0xbb, 0x7d, 0xf3, 0x1d, 0x59, 0x52, 0xbf, 0xa4, 0xef, 0x01, - 0xd6, 0xc7, 0xd0, 0x06, 0x2c, 0x9d, 0xf7, 0x43, 0xfd, 0x1c, 0x5f, 0xe8, 0x31, 0x4e, 0x06, 0xe7, - 0xfd, 0xf0, 0x23, 0x7c, 0xc1, 0xde, 0x1e, 0xad, 0x09, 0x62, 0x99, 0x65, 0x47, 0xfe, 0xda, 0x5c, - 0xcd, 0x52, 0xfc, 0xd3, 0x70, 0x87, 0x3c, 0x7e, 0x63, 0x3a, 0x5a, 0x79, 0x1f, 0x56, 0x26, 0x27, - 0x93, 0xfa, 0xdd, 0x52, 0x3c, 0x24, 0x1e, 0xa0, 0x3a, 0xe3, 0x02, 0x69, 0xde, 0xb6, 0xa3, 0x3a, - 0xcd, 0xa4, 0xaf, 0xd3, 0xbb, 0xb0, 0x7a, 0x88, 0xc9, 0xd3, 0xbc, 0xa4, 0x7f, 0x2a, 0xc1, 0xa5, - 0x13, 0x3b, 0xe4, 0x3a, 0xc2, 0x39, 0x95, 0x5c, 0x81, 0x92, 0xcf, 0xaa, 0xdf, 0xfe, 0x8c, 0x7b, - 0x61, 0x51, 0x2b, 0x52, 0x41, 0xcb, 0xfe, 0x8c, 0xbf, 0x47, 0xe9, 0x24, 0xf1, 0xce, 0xb1, 0x2b, - 0xde, 0x3e, 0x6c, 0x79, 0x9b, 0x0a, 0x28, 0xe9, 0x78, 0x68, 0x3b, 0x04, 0x07, 0x8c, 0xe1, 0x94, - 0x34, 0x31, 0x52, 0x3f, 0x03, 0x14, 0xb7, 0x23, 0xf4, 0x3d, 0x37, 0xc4, 0xe8, 0x7d, 0xfa, 0x76, - 0xa6, 0x12, 0x91, 0xd3, 0xf3, 0x79, 0x47, 0x60, 0xd0, 0xab, 0xb0, 0xea, 0xe2, 0x4f, 0x89, 0x1e, - 0xb3, 0x87, 0x9f, 0x7c, 0x99, 0x8a, 0xef, 0x47, 0x36, 0xa9, 0x75, 0x40, 0x75, 0x5a, 0xd9, 0xce, - 0xd3, 0x78, 0xf2, 0x27, 0x39, 0x58, 0x8a, 0x7f, 0x21, 0x31, 0x43, 0xd1, 0x37, 0xa0, 0x6c, 0xe2, - 0xb0, 0x17, 0xd8, 0x8c, 0x76, 0x30, 0x7a, 0x5a, 0xd2, 0xe2, 0x22, 0xd4, 0x06, 0x39, 0xe2, 0x3c, - 0x04, 0xf7, 0x7d, 0xc7, 0x20, 0x11, 0x87, 0x4c, 0xd1, 0xf7, 0x56, 0x85, 0x8a, 0xb6, 0xd0, 0x80, - 0xde, 0x8f, 0x12, 0x2c, 0x37, 0x7f, 0x82, 0x1d, 0x2d, 0x88, 0x14, 0x43, 0xcf, 0x03, 0x6b, 0x11, - 0xac, 0x08, 0x8b, 0xe2, 0x81, 0x3f, 0x92, 0x4c, 0x93, 0xe5, 0xc5, 0x54, 0x64, 0xb9, 0x02, 0x45, - 0xd3, 0x0e, 0x8d, 0xae, 0x83, 0x4d, 0xa5, 0xb4, 0x21, 0x6d, 0x16, 0xb5, 0xd1, 0x18, 0x99, 0xd3, - 0xd4, 0x8b, 0xbf, 0x07, 0x3e, 0x9c, 0xc7, 0x78, 0x11, 0x80, 0x64, 0x06, 0xf6, 0xf4, 0x14, 0x67, - 0x4f, 0x86, 0x15, 0x41, 0x56, 0x85, 0xbb, 0xd5, 0x1f, 0x4b, 0xb0, 0x1e, 0xeb, 0x02, 0xe9, 0xbe, - 0x9e, 0x6a, 0x40, 0x41, 0x84, 0x4f, 0xb4, 0x83, 0x37, 0x52, 0x1c, 0x58, 0x8b, 0xb0, 0xea, 0x03, - 0x58, 0x8b, 0xfa, 0xc2, 0xff, 0xf2, 0xeb, 0x31, 0xf5, 0x5d, 0x50, 0x46, 0x45, 0x2a, 0x14, 0xcf, - 0xd9, 0x33, 0x54, 0x13, 0xd6, 0x1f, 0x03, 0x15, 0x65, 0x7e, 0x08, 0x45, 0xb1, 0x49, 0x54, 0xe8, - 0xa9, 0xce, 0x3d, 0x02, 0xab, 0xdf, 0x80, 0xf5, 0x7d, 0xec, 0xe0, 0xaf, 0xe4, 0xfb, 0x84, 0xb3, - 0xff, 0x4e, 0x82, 0xf5, 0x8e, 0x6f, 0x1a, 0xff, 0x07, 0xdd, 0xf1, 0xb0, 0x67, 0x9f, 0x22, 0xec, - 0x7f, 0xcf, 0x8b, 0x16, 0x24, 0x9e, 0x38, 0xa8, 0x0b, 0x6b, 0x33, 0x0f, 0xb5, 0x31, 0x45, 0x48, - 0x7b, 0xb9, 0x5f, 0x9e, 0x7e, 0xaa, 0x31, 0xba, 0xe0, 0x53, 0x5a, 0xc8, 0x9c, 0x80, 0x4d, 0x7d, - 0x88, 0x03, 0xfb, 0xe1, 0x85, 0xce, 0x5f, 0x56, 0xe2, 0xbb, 0x86, 0xdb, 0x29, 0x1e, 0x65, 0xd5, - 0x07, 0x4c, 0x01, 0x1f, 0x51, 0x8a, 0x28, 0x14, 0xc7, 0xc5, 0xe8, 0x13, 0x58, 0xea, 0x1b, 0xbd, - 0x33, 0xdb, 0xc5, 0x3a, 0x23, 0x2a, 0x59, 0xb6, 0xcd, 0xad, 0x34, 0xdb, 0xdc, 0xe3, 0x78, 0x76, - 0xac, 0x72, 0x7f, 0x3c, 0xa0, 0xbc, 0xc3, 0xb4, 0xc3, 0x73, 0x76, 0xb5, 0xe9, 0x56, 0x97, 0xd1, - 0xce, 0xac, 0x06, 0x54, 0x46, 0x6f, 0xb7, 0xc3, 0x2e, 0xf2, 0xe0, 0x99, 0x78, 0x13, 0x89, 0xce, - 0x9a, 0x63, 0x46, 0x7c, 0x98, 0xc6, 0x88, 0x78, 0xeb, 0x11, 0x27, 0x46, 0xe1, 0x8c, 0x0c, 0xf9, - 0x70, 0x99, 0x3e, 0x4f, 0x43, 0x12, 0x60, 0x83, 0x3e, 0xa6, 0xa2, 0x1d, 0x17, 0xd3, 0xef, 0x78, - 0xe2, 0x59, 0xad, 0x48, 0x4d, 0xb4, 0xa3, 0x33, 0x23, 0x53, 0xab, 0xb0, 0x34, 0xe1, 0x70, 0x19, - 0x96, 0x9a, 0xa7, 0x6d, 0xfd, 0x41, 0x43, 0x3b, 0x3e, 0x38, 0x6e, 0xec, 0xcb, 0x0b, 0x68, 0x09, - 0x8a, 0xa3, 0x91, 0xa4, 0xd6, 0xa1, 0x1c, 0x73, 0x28, 0x5a, 0x85, 0x72, 0xa7, 0xd9, 0xba, 0xdf, - 0xa8, 0x47, 0xab, 0x29, 0x7e, 0x4b, 0x3f, 0x3a, 0x3e, 0x3c, 0xaa, 0xdf, 0xef, 0xe8, 0xb7, 0x65, - 0x09, 0x5d, 0x82, 0xe5, 0x98, 0xe4, 0xc6, 0xb6, 0x9c, 0x51, 0x6f, 0x4e, 0xf6, 0x62, 0xb1, 0xf5, - 0x0a, 0xc0, 0xbd, 0x4e, 0xab, 0xad, 0xdf, 0xdb, 0x6d, 0xd7, 0x8f, 0xe4, 0x05, 0xaa, 0x7b, 0xf7, - 0xe4, 0xe4, 0xf4, 0x63, 0xfd, 0xe4, 0xf4, 0xb4, 0xd5, 0x90, 0x25, 0xf5, 0x10, 0xd0, 0xec, 0xa9, - 0xf8, 0x5b, 0x50, 0x6b, 0xec, 0xde, 0xd3, 0xf7, 0x1b, 0x07, 0xbb, 0x9d, 0x93, 0xb6, 0xbc, 0x40, - 0xdf, 0x6d, 0x42, 0x76, 0xda, 0x94, 0x25, 0xaa, 0x39, 0x1a, 0x1e, 0x1c, 0xc8, 0x99, 0xed, 0xbf, - 0xae, 0x00, 0xd4, 0xa9, 0xeb, 0xf8, 0x17, 0x6c, 0x3f, 0x97, 0xa0, 0x1c, 0x6b, 0xe3, 0x68, 0x2b, - 0xc1, 0xcf, 0xb3, 0xc4, 0xaf, 0x72, 0x35, 0x82, 0xc4, 0x7e, 0x6d, 0xa9, 0x8e, 0x1e, 0x6a, 0x6a, - 0xed, 0x8b, 0x7f, 0xfd, 0xfb, 0x97, 0x99, 0xd7, 0xd5, 0x8d, 0xda, 0x70, 0xab, 0x26, 0x5a, 0x45, - 0x58, 0xfb, 0x7c, 0xdc, 0x46, 0x1e, 0xd5, 0x38, 0x8f, 0xd9, 0x11, 0x57, 0xf1, 0xcf, 0x24, 0x28, - 0x46, 0x6d, 0x1d, 0x55, 0x13, 0xec, 0x99, 0xe2, 0x85, 0x95, 0xb9, 0xae, 0x7d, 0xf5, 0x2d, 0x66, - 0xd3, 0x6b, 0xe8, 0x95, 0x24, 0x9b, 0x6a, 0x9f, 0xdb, 0xe6, 0x23, 0xf4, 0x1b, 0x09, 0x60, 0xcc, - 0xda, 0x50, 0xd2, 0x77, 0x0d, 0x33, 0x44, 0xb3, 0xb2, 0x95, 0x02, 0xc1, 0xef, 0x0a, 0x75, 0x93, - 0x99, 0xa8, 0xa2, 0x44, 0xb7, 0xa1, 0xdf, 0xd2, 0x10, 0x8e, 0x79, 0x5d, 0x72, 0x08, 0x67, 0x38, - 0xe0, 0x9c, 0x5e, 0xbb, 0xc5, 0x4c, 0xda, 0x52, 0xdf, 0x9c, 0xcb, 0x6b, 0x3b, 0x3d, 0xb6, 0xcf, - 0x8e, 0x74, 0x1d, 0xfd, 0x8a, 0xfd, 0x94, 0x13, 0xfd, 0x18, 0x96, 0xe8, 0xbf, 0x99, 0xdf, 0xcd, - 0x92, 0x52, 0xec, 0x1d, 0x66, 0xd8, 0xdb, 0xea, 0x1b, 0xf3, 0x19, 0x16, 0x50, 0xfd, 0xd4, 0xae, - 0x3f, 0x4b, 0x13, 0x2f, 0x99, 0x88, 0xd1, 0xde, 0x9e, 0xbf, 0x06, 0x26, 0xaf, 0xc7, 0x4a, 0x9a, - 0xfb, 0x4c, 0xbd, 0xc1, 0xac, 0x7e, 0x4b, 0x55, 0x9f, 0x6c, 0x75, 0x74, 0xe1, 0xef, 0x44, 0x77, - 0x1f, 0xfa, 0x93, 0x34, 0x7e, 0x0b, 0x45, 0xf6, 0xde, 0x9c, 0xb3, 0x46, 0x9e, 0xc6, 0x58, 0x11, - 0x7b, 0x54, 0x4b, 0x36, 0xb6, 0xf6, 0xf9, 0x98, 0x03, 0x3c, 0x42, 0x7f, 0x89, 0xbf, 0xbc, 0x22, - 0x46, 0x84, 0x6e, 0xcd, 0x5b, 0x10, 0x53, 0xf4, 0xab, 0x72, 0x3b, 0x3d, 0x50, 0x14, 0xd4, 0x75, - 0x76, 0x82, 0x97, 0xd1, 0x1c, 0xee, 0xa6, 0x25, 0x85, 0x66, 0x09, 0x56, 0x62, 0x62, 0x3c, 0x91, - 0x93, 0x55, 0xd6, 0x66, 0xde, 0x0a, 0x8d, 0xbe, 0x4f, 0x2e, 0x22, 0xb7, 0x5e, 0x4f, 0xed, 0xd6, - 0x2f, 0x25, 0x40, 0xb3, 0x34, 0x2d, 0xd1, 0xc2, 0x27, 0x32, 0xbb, 0x74, 0xd9, 0x70, 0x97, 0x99, - 0xbd, 0xb3, 0x9d, 0xd6, 0xec, 0x71, 0x1e, 0xff, 0x51, 0x82, 0xd5, 0xa9, 0x9f, 0xb6, 0x13, 0xf3, - 0xf8, 0xf1, 0x3f, 0x85, 0x27, 0x35, 0x87, 0x3a, 0xb3, 0xf5, 0x03, 0xf5, 0x46, 0x5a, 0x5b, 0x83, - 0x81, 0xbb, 0x23, 0x7e, 0xaf, 0xd9, 0x3b, 0x07, 0xa5, 0xe7, 0xf5, 0xa3, 0x8d, 0x26, 0xcc, 0xba, - 0x2f, 0x7d, 0x72, 0x28, 0xe4, 0x96, 0xe7, 0x18, 0xae, 0x55, 0xf5, 0x02, 0xab, 0x66, 0x61, 0x97, - 0x85, 0xba, 0xc6, 0xa7, 0x0c, 0xdf, 0x0e, 0x9f, 0xf0, 0xdf, 0x23, 0xde, 0x1b, 0x8f, 0xfe, 0x90, - 0xc9, 0x1e, 0xd6, 0xf7, 0xba, 0x79, 0x86, 0xbc, 0xf1, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x17, - 0x08, 0x55, 0x51, 0x57, 0x21, 0x00, 0x00, + 0xf5, 0x17, 0x48, 0x8a, 0x97, 0x43, 0x5d, 0xe0, 0x8d, 0xa3, 0x40, 0x74, 0x1c, 0x2b, 0xc8, 0x4d, + 0x71, 0x12, 0x32, 0x92, 0xff, 0x8e, 0x1d, 0xe5, 0x62, 0x4b, 0x14, 0x75, 0x99, 0xc8, 0x94, 0xff, + 0x20, 0xe9, 0x4c, 0xd3, 0x76, 0x50, 0x90, 0x58, 0x43, 0xa8, 0x40, 0x00, 0x05, 0x96, 0x6c, 0x94, + 0x34, 0xd3, 0x36, 0x33, 0xed, 0x6b, 0xdb, 0xe9, 0xf4, 0xa1, 0xd3, 0x87, 0x5e, 0x9e, 0x3b, 0x9d, + 0x4e, 0xfb, 0xd0, 0x99, 0xce, 0xe4, 0xb9, 0x1f, 0xa0, 0x5f, 0xa1, 0x1f, 0xa4, 0xb3, 0x17, 0x90, + 0x20, 0x29, 0x17, 0x44, 0xdc, 0xbe, 0x50, 0xd8, 0xb3, 0x7b, 0xce, 0x9e, 0x3d, 0xb7, 0xfd, 0x1d, + 0x40, 0x50, 0xb5, 0x3c, 0xcf, 0x72, 0x70, 0xcd, 0xc4, 0x43, 0xe2, 0x79, 0x4e, 0x58, 0xeb, 0x39, + 0xde, 0xc0, 0xec, 0x0e, 0x6c, 0xc7, 0xac, 0x0d, 0xb7, 0x62, 0xa3, 0xaa, 0x1f, 0x78, 0xc4, 0x43, + 0xd7, 0xf9, 0xfa, 0x6a, 0xb4, 0xbe, 0x1a, 0x5b, 0x31, 0xdc, 0xaa, 0x3c, 0x2f, 0xc4, 0x19, 0xbe, + 0x5d, 0x33, 0x5c, 0xd7, 0x23, 0x06, 0xb1, 0x3d, 0x37, 0xe4, 0xcc, 0x15, 0x55, 0xcc, 0x32, 0x9e, + 0x9a, 0x31, 0x30, 0x6d, 0xc2, 0x7f, 0x75, 0xc7, 0xb3, 0xc4, 0x9a, 0x97, 0xc4, 0x1a, 0xc7, 0x73, + 0xad, 0x60, 0xe0, 0xba, 0xb6, 0x6b, 0xd5, 0x3c, 0x1f, 0x07, 0x13, 0x82, 0x5e, 0x10, 0x8b, 0xd8, + 0xa8, 0x3b, 0x78, 0x5c, 0x33, 0x07, 0x7c, 0x81, 0x98, 0xbf, 0x36, 0x3d, 0x8f, 0xfb, 0x3e, 0xb9, + 0x10, 0x93, 0x37, 0xa6, 0x27, 0x89, 0xdd, 0xc7, 0x21, 0x31, 0xfa, 0x3e, 0x5f, 0xa0, 0xee, 0xc1, + 0x15, 0x0d, 0x93, 0xe0, 0x62, 0x8f, 0x9e, 0x4a, 0xc3, 0xdf, 0x1b, 0xe0, 0x90, 0xa0, 0xeb, 0x00, + 0x7e, 0xe0, 0x7d, 0x17, 0xf7, 0x88, 0x6e, 0x9b, 0x8a, 0xb4, 0x21, 0x6d, 0x96, 0xb4, 0x92, 0xa0, + 0x1c, 0x9b, 0x68, 0x05, 0x32, 0xb6, 0xa9, 0x64, 0x18, 0x39, 0x63, 0x9b, 0xea, 0xaf, 0x25, 0x58, + 0xd3, 0x06, 0x2e, 0x13, 0xd1, 0x0e, 0x6c, 0xcb, 0xc2, 0xc1, 0x9c, 0x92, 0xae, 0x03, 0x10, 0xce, + 0xa0, 0x8f, 0x24, 0x96, 0x04, 0xe5, 0xd8, 0x44, 0xbb, 0x90, 0x0f, 0xbd, 0x41, 0xd0, 0xc3, 0x4a, + 0x76, 0x43, 0xda, 0x2c, 0x6f, 0xbf, 0x5e, 0xfd, 0x8f, 0x1e, 0xa9, 0x6a, 0xd8, 0xf7, 0x5a, 0x8c, + 0x41, 0x13, 0x8c, 0xaa, 0x0e, 0xcb, 0x2d, 0xe2, 0x05, 0x86, 0x85, 0xf9, 0x04, 0x5a, 0x83, 0x7c, + 0x77, 0xd0, 0x3b, 0xc7, 0x44, 0x68, 0x23, 0x46, 0x94, 0xee, 0x75, 0xa9, 0x5a, 0x42, 0x0d, 0x31, + 0x42, 0x2f, 0x00, 0x58, 0xd8, 0x15, 0x3e, 0x61, 0x7a, 0x64, 0xb5, 0x18, 0x45, 0xfd, 0x87, 0x04, + 0x30, 0xde, 0x37, 0xe9, 0xc0, 0xd7, 0xa0, 0x14, 0x60, 0xdf, 0xd3, 0x5d, 0xa3, 0x8f, 0xc5, 0x46, + 0x45, 0x4a, 0x68, 0x1a, 0x7d, 0x8c, 0x5e, 0x84, 0x72, 0x37, 0x30, 0xdc, 0xde, 0x19, 0x9f, 0xa6, + 0x7b, 0x95, 0x8e, 0x16, 0x34, 0xe0, 0x44, 0xb6, 0xe4, 0x1a, 0x14, 0x89, 0x61, 0xf1, 0xf9, 0x9c, + 0x98, 0x2f, 0x10, 0xc3, 0x62, 0x93, 0x37, 0x00, 0x7a, 0x5e, 0xbf, 0x6f, 0x13, 0x3d, 0x3c, 0x33, + 0x94, 0x45, 0x31, 0x5d, 0xe2, 0xb4, 0xd6, 0x99, 0x81, 0x64, 0xc8, 0x9a, 0x76, 0xa0, 0x14, 0xd8, + 0xbe, 0xf4, 0x71, 0x0f, 0xa0, 0x18, 0xe0, 0xa1, 0x1d, 0xd2, 0x93, 0xfc, 0x4d, 0x82, 0xbc, 0x38, + 0x45, 0x07, 0x56, 0x42, 0x6e, 0x35, 0x5d, 0x38, 0x20, 0xc3, 0x1c, 0xf0, 0x66, 0x82, 0x03, 0x26, + 0x4c, 0x7d, 0xb4, 0xa0, 0x2d, 0x87, 0x13, 0xb6, 0x3f, 0x81, 0x32, 0x3b, 0xfd, 0xd7, 0x74, 0x2a, + 0xb5, 0x45, 0x30, 0x1a, 0xed, 0x15, 0xa3, 0xe8, 0x50, 0xbf, 0x94, 0x00, 0x68, 0xf4, 0x91, 0xe3, + 0xbe, 0x61, 0x61, 0x84, 0x20, 0xc7, 0x0c, 0xc4, 0xad, 0xcf, 0x9e, 0xa9, 0x7b, 0x4d, 0xdb, 0xc2, + 0x21, 0xe1, 0x66, 0xd5, 0xc4, 0x08, 0x1d, 0x41, 0xd9, 0x1f, 0x84, 0x67, 0x3a, 0xb1, 0xfb, 0xb6, + 0x6b, 0x31, 0x9b, 0x96, 0xb7, 0x5f, 0x4b, 0x50, 0xa9, 0x6d, 0xf7, 0x71, 0xcb, 0x37, 0x5c, 0x0d, + 0x28, 0x6f, 0x9b, 0xb1, 0xaa, 0x5f, 0x65, 0xa1, 0xc4, 0x52, 0xa0, 0x45, 0xb0, 0x7f, 0xa9, 0x0e, + 0x32, 0x64, 0xb1, 0x3b, 0x54, 0x32, 0x1b, 0x59, 0x6a, 0x7e, 0xec, 0x0e, 0xe9, 0x2a, 0x23, 0xb0, + 0x42, 0x25, 0xcb, 0x48, 0xec, 0x39, 0x72, 0x52, 0x6e, 0xe4, 0x24, 0x91, 0x6f, 0x8b, 0x51, 0xbe, + 0xa1, 0x75, 0x28, 0x7e, 0xdf, 0xb0, 0x89, 0xfe, 0xd8, 0x0b, 0x94, 0x3c, 0xe3, 0x2c, 0xd0, 0xf1, + 0x81, 0x17, 0xd0, 0x68, 0xc5, 0x2e, 0x09, 0x2e, 0x7c, 0xcf, 0x76, 0x89, 0x70, 0x74, 0x8c, 0x42, + 0xc3, 0x33, 0xc4, 0xbd, 0x00, 0x13, 0x9d, 0x6a, 0x52, 0x64, 0xcc, 0x25, 0x4e, 0x69, 0xb8, 0x43, + 0x74, 0x0f, 0x0a, 0x43, 0xcf, 0x19, 0xf4, 0x71, 0xa8, 0x94, 0x36, 0xb2, 0x9b, 0xe5, 0xed, 0x57, + 0x12, 0x2c, 0xf1, 0x88, 0xad, 0xd6, 0x22, 0x2e, 0x74, 0x0f, 0xf2, 0xc2, 0x92, 0x90, 0xce, 0x92, + 0x82, 0x0d, 0xdd, 0x82, 0x02, 0x2d, 0x51, 0xde, 0x80, 0x28, 0x65, 0x26, 0x61, 0x3d, 0x92, 0x10, + 0x95, 0xb0, 0xea, 0xbe, 0xa8, 0x7f, 0x5a, 0xb4, 0x12, 0xd5, 0x21, 0x1f, 0x12, 0x83, 0x0c, 0x42, + 0x65, 0x69, 0x43, 0xda, 0x5c, 0xd9, 0x7e, 0x23, 0x61, 0x57, 0xe6, 0xa6, 0x6a, 0x8b, 0xb1, 0x68, + 0x82, 0x55, 0x7d, 0x1b, 0xf2, 0xfc, 0x34, 0x97, 0xfa, 0x0e, 0x41, 0xce, 0x37, 0xc8, 0x99, 0xc8, + 0x59, 0xf6, 0xac, 0x7e, 0x0a, 0x05, 0x0d, 0x87, 0x03, 0x87, 0x84, 0xb4, 0x52, 0xd9, 0x34, 0xf6, + 0x42, 0xe6, 0xdd, 0xe4, 0xa0, 0x1e, 0x47, 0xab, 0x26, 0x18, 0xd1, 0x4d, 0xb8, 0xc2, 0xa6, 0xf5, + 0x90, 0x60, 0x5f, 0x17, 0xd2, 0x78, 0x60, 0xac, 0x76, 0xa3, 0xb8, 0x62, 0x2c, 0xa1, 0xfa, 0x8b, + 0x32, 0x2c, 0xb2, 0x43, 0x88, 0xd8, 0x90, 0x46, 0xb1, 0x31, 0x59, 0x7f, 0xe4, 0xe9, 0xfa, 0x33, + 0xb6, 0x54, 0xe6, 0x6b, 0x5b, 0x0a, 0xbd, 0x04, 0xcb, 0xfc, 0x49, 0x37, 0x31, 0x31, 0x6c, 0x47, + 0x51, 0xd8, 0x36, 0x4b, 0x9c, 0xb8, 0xcf, 0x68, 0xe8, 0x83, 0xa9, 0xda, 0x9d, 0x14, 0x49, 0x93, + 0x75, 0x1b, 0x7d, 0x08, 0x8b, 0xd4, 0x0e, 0xa1, 0x52, 0x66, 0xf6, 0xdc, 0x9c, 0x47, 0x4f, 0x6a, + 0x20, 0x8d, 0xb3, 0xa1, 0xfb, 0x50, 0x08, 0xb8, 0x6f, 0x44, 0x24, 0xbe, 0x9a, 0x58, 0x66, 0xd8, + 0x6a, 0x2d, 0x62, 0x43, 0xef, 0x41, 0xb9, 0x17, 0x60, 0x83, 0x60, 0x5a, 0x1b, 0xb0, 0x92, 0x67, + 0x52, 0x2a, 0x33, 0xd1, 0xd8, 0x8e, 0x2e, 0x54, 0x0d, 0xf8, 0x72, 0x4a, 0x40, 0xef, 0x02, 0x84, + 0xc4, 0x08, 0x08, 0xe7, 0x2d, 0x24, 0xf2, 0x96, 0xd8, 0x6a, 0xc6, 0xfa, 0x1e, 0x94, 0x1f, 0xdb, + 0xae, 0xcd, 0x6b, 0x12, 0x56, 0x8a, 0xc9, 0xfb, 0xf2, 0xe5, 0x8c, 0x39, 0x96, 0x3e, 0x4b, 0x73, + 0xa7, 0xcf, 0xda, 0x28, 0x78, 0x97, 0x59, 0xb8, 0x45, 0x11, 0x79, 0x03, 0xca, 0x8e, 0x67, 0x85, + 0xba, 0xb8, 0x2f, 0x9f, 0xe1, 0xd5, 0x84, 0x92, 0xf6, 0xf8, 0x9d, 0xf9, 0x2d, 0xb8, 0xc2, 0xdd, + 0xa5, 0xfb, 0x81, 0x37, 0xc4, 0xae, 0xe1, 0xf6, 0xb0, 0xf2, 0x2c, 0xdb, 0xb7, 0x36, 0x97, 0xbb, + 0x1f, 0x8e, 0xd8, 0x34, 0x39, 0x9c, 0xa2, 0xa0, 0x4d, 0x90, 0x79, 0x42, 0xc4, 0x20, 0xc2, 0x1a, + 0xd3, 0x61, 0xa5, 0x1b, 0x83, 0x1a, 0xc7, 0x26, 0x6a, 0x40, 0xc1, 0xf3, 0x19, 0x66, 0x52, 0x9e, + 0x63, 0xbb, 0xcf, 0x15, 0xd6, 0xa7, 0x9c, 0x45, 0x8b, 0x78, 0xd1, 0x73, 0x50, 0x70, 0x3c, 0x4b, + 0x1f, 0x04, 0x8e, 0xb2, 0xce, 0x2f, 0x09, 0xc7, 0xb3, 0x3a, 0x81, 0x83, 0xbe, 0x0d, 0xcb, 0xe1, + 0xa0, 0x1b, 0x12, 0x9b, 0x0c, 0xf8, 0x2e, 0xd7, 0x59, 0x50, 0xde, 0x99, 0x2f, 0x79, 0xe2, 0x9c, + 0x0d, 0x5a, 0x8a, 0xb5, 0x49, 0x69, 0xb4, 0xb6, 0x10, 0xc3, 0x0a, 0x95, 0x1b, 0xfc, 0x16, 0xa0, + 0xcf, 0xb4, 0x12, 0xf3, 0xb2, 0x1c, 0x2a, 0x1b, 0x73, 0x55, 0xe2, 0x16, 0x5b, 0xad, 0x45, 0x5c, + 0xe8, 0x68, 0x54, 0x89, 0x5f, 0x64, 0xfc, 0x6f, 0xcf, 0xa5, 0x2c, 0xbf, 0xcb, 0xb8, 0x96, 0x82, + 0xbf, 0x72, 0x1f, 0xd0, 0xec, 0x19, 0xe8, 0x35, 0x75, 0x8e, 0x2f, 0x44, 0xe5, 0xa1, 0x8f, 0xe8, + 0x2a, 0x2c, 0x0e, 0x0d, 0x67, 0x10, 0xe1, 0x1a, 0x3e, 0xd8, 0xc9, 0xdc, 0x95, 0x2a, 0x5d, 0x28, + 0xc7, 0x04, 0x5f, 0xc2, 0xfa, 0x41, 0x9c, 0x35, 0xc5, 0xad, 0x31, 0xde, 0x43, 0xfd, 0x21, 0xe4, + 0x79, 0x99, 0x42, 0x08, 0x56, 0x5a, 0xed, 0xdd, 0x76, 0xa7, 0xa5, 0x77, 0x9a, 0x1f, 0x35, 0x4f, + 0x3f, 0x6e, 0xca, 0x0b, 0x08, 0x20, 0xff, 0xff, 0x9d, 0x46, 0xa7, 0xb1, 0x2f, 0x4b, 0xa8, 0x0c, + 0x85, 0x8f, 0x4f, 0xb5, 0x8f, 0x8e, 0x9b, 0x87, 0x72, 0x86, 0x0e, 0x5a, 0x9d, 0x7a, 0xbd, 0xd1, + 0x6a, 0xc9, 0x59, 0x3a, 0x38, 0xd8, 0x3d, 0x3e, 0xe9, 0x68, 0x0d, 0x39, 0x47, 0xc5, 0x1c, 0x37, + 0xdb, 0x0d, 0xad, 0xb9, 0x7b, 0xa2, 0x37, 0x34, 0xed, 0x54, 0x93, 0x17, 0xe9, 0x82, 0xf6, 0xf1, + 0x83, 0xc6, 0x69, 0xa7, 0x2d, 0xe7, 0xd1, 0x32, 0x94, 0xea, 0xbb, 0xcd, 0x7a, 0xe3, 0xe4, 0xa4, + 0xb1, 0x2f, 0x17, 0xd4, 0x1f, 0x40, 0x31, 0xd2, 0x6b, 0x2a, 0xfd, 0xa5, 0x34, 0xe9, 0x7f, 0x1b, + 0x8a, 0xd8, 0x35, 0x39, 0x63, 0x26, 0x91, 0xb1, 0x80, 0x5d, 0x93, 0x8e, 0xd4, 0x36, 0xac, 0x89, + 0xa0, 0x16, 0xc0, 0xf4, 0x01, 0x26, 0x86, 0x69, 0x10, 0x03, 0xed, 0xc0, 0x22, 0x33, 0x9c, 0x50, + 0xe3, 0xe5, 0x79, 0xe2, 0x40, 0xe3, 0x2c, 0xea, 0xef, 0xb3, 0x20, 0x4f, 0x67, 0x2a, 0x32, 0xe1, + 0xb9, 0x00, 0x87, 0x9e, 0x33, 0xc4, 0xf4, 0xae, 0x9a, 0x40, 0x89, 0xd9, 0xf4, 0x28, 0x51, 0x7b, + 0x36, 0x12, 0x36, 0x89, 0xd3, 0xbf, 0x09, 0x57, 0x47, 0xbb, 0xc4, 0x41, 0x63, 0x3e, 0x6d, 0x27, + 0x80, 0x22, 0x31, 0x31, 0x94, 0xfe, 0x1d, 0x5a, 0x63, 0x1d, 0xac, 0x9f, 0x19, 0xe1, 0x19, 0x0e, + 0x95, 0x1c, 0xcb, 0x90, 0x7b, 0x29, 0x4b, 0x56, 0xf5, 0xc0, 0x76, 0xf0, 0x11, 0x93, 0xc0, 0x13, + 0x06, 0x1e, 0x8f, 0x08, 0x95, 0x33, 0x58, 0x9d, 0x9a, 0xbe, 0x24, 0xec, 0xef, 0x4d, 0x86, 0x7d, + 0xd2, 0xa1, 0xc6, 0x02, 0xe3, 0x81, 0xdf, 0x04, 0x18, 0x4f, 0xa0, 0xfb, 0x50, 0x1a, 0x9d, 0x4c, + 0x91, 0xd8, 0xb9, 0x5e, 0x4a, 0x10, 0x4b, 0x39, 0xb5, 0x62, 0xa4, 0xbb, 0xfa, 0x23, 0x09, 0x72, + 0xf4, 0x01, 0xdd, 0x87, 0x1c, 0xb9, 0xf0, 0x79, 0xf8, 0xae, 0x24, 0x3a, 0x95, 0xb2, 0xb0, 0x9f, + 0xf6, 0x85, 0x8f, 0x35, 0xc6, 0x39, 0x59, 0x11, 0x96, 0x84, 0xd2, 0xea, 0x06, 0x14, 0xa3, 0x75, + 0xa8, 0x08, 0xb9, 0xe6, 0x69, 0xb3, 0xc1, 0x33, 0xb4, 0x75, 0xb4, 0xbb, 0x7d, 0xfb, 0x1d, 0x59, + 0x52, 0xbf, 0xa2, 0x9d, 0x08, 0xab, 0x63, 0x68, 0x03, 0x96, 0xce, 0xfb, 0xa1, 0x7e, 0x8e, 0x2f, + 0xf4, 0x18, 0x26, 0x83, 0xf3, 0x7e, 0xf8, 0x11, 0xbe, 0x60, 0x5d, 0x4f, 0x6b, 0x02, 0xd2, 0x66, + 0xd9, 0x91, 0xff, 0x6f, 0xae, 0x62, 0x29, 0xfe, 0x34, 0xdc, 0x21, 0xf7, 0xdf, 0x18, 0x08, 0x57, + 0xde, 0x87, 0x95, 0xc9, 0xc9, 0xa4, 0x7a, 0xb7, 0x14, 0x77, 0x89, 0x07, 0xa8, 0xce, 0xb0, 0x40, + 0x9a, 0xae, 0x7a, 0x94, 0xa7, 0x99, 0xf4, 0x79, 0x7a, 0x1f, 0x56, 0x0f, 0x31, 0x79, 0x9a, 0x1e, + 0xfe, 0xa7, 0x12, 0x5c, 0x39, 0xb1, 0x43, 0x2e, 0x23, 0x9c, 0x53, 0xc8, 0x35, 0x28, 0xf9, 0x2c, + 0xfb, 0xed, 0xcf, 0xb8, 0x15, 0x16, 0xb5, 0x22, 0x25, 0xb4, 0xec, 0xcf, 0x78, 0x27, 0x4c, 0x27, + 0x89, 0x77, 0x8e, 0x5d, 0xd1, 0x75, 0xb1, 0xe5, 0x6d, 0x4a, 0xa0, 0xa0, 0xe3, 0xb1, 0xed, 0x10, + 0x1c, 0x30, 0x84, 0x53, 0xd2, 0xc4, 0x48, 0xfd, 0x0c, 0x50, 0x5c, 0x8f, 0xd0, 0xf7, 0xdc, 0x10, + 0xa3, 0xf7, 0x69, 0xd7, 0x4e, 0x29, 0x22, 0xa6, 0xe7, 0xb3, 0x8e, 0xe0, 0x41, 0xaf, 0xc2, 0xaa, + 0x8b, 0x3f, 0x25, 0x7a, 0x4c, 0x1f, 0x7e, 0xf2, 0x65, 0x4a, 0x7e, 0x18, 0xe9, 0xa4, 0xd6, 0x01, + 0xd5, 0x69, 0x66, 0x3b, 0x4f, 0x63, 0xc9, 0x9f, 0xe4, 0x60, 0x29, 0xfe, 0x2a, 0x64, 0x06, 0xa2, + 0x6f, 0x40, 0xd9, 0xc4, 0x61, 0x2f, 0xb0, 0x19, 0xec, 0x60, 0xf0, 0xb4, 0xa4, 0xc5, 0x49, 0xa8, + 0x0d, 0x72, 0x84, 0x79, 0x08, 0xee, 0xfb, 0x8e, 0x41, 0x22, 0x0c, 0x99, 0xa2, 0xee, 0xad, 0x0a, + 0x11, 0x6d, 0x21, 0x01, 0xbd, 0x1f, 0x05, 0x58, 0x6e, 0xfe, 0x00, 0x3b, 0x5a, 0x10, 0x21, 0x86, + 0x9e, 0x07, 0x56, 0x22, 0x58, 0x12, 0x16, 0xc5, 0xab, 0x85, 0x11, 0x65, 0x1a, 0x2c, 0x2f, 0xa6, + 0x02, 0xcb, 0x15, 0x28, 0x9a, 0x76, 0x68, 0x74, 0x1d, 0x6c, 0x2a, 0xa5, 0x0d, 0x69, 0xb3, 0xa8, + 0x8d, 0xc6, 0xc8, 0x9c, 0x86, 0x5e, 0xbc, 0x1f, 0xf8, 0x70, 0x1e, 0xe5, 0x85, 0x03, 0x92, 0x11, + 0xd8, 0xd3, 0x43, 0x9c, 0x3d, 0x19, 0x56, 0x04, 0x58, 0x15, 0xe6, 0x56, 0x7f, 0x2c, 0xc1, 0x7a, + 0xac, 0x0a, 0xa4, 0x7b, 0x31, 0xd6, 0x80, 0x82, 0x70, 0x9f, 0x28, 0x07, 0x6f, 0xa4, 0x38, 0xb0, + 0x16, 0xf1, 0xaa, 0x8f, 0x60, 0x2d, 0xaa, 0x0b, 0xff, 0xcd, 0x17, 0x73, 0xea, 0xbb, 0xa0, 0x8c, + 0x92, 0x54, 0x08, 0x9e, 0xb3, 0x66, 0xa8, 0x26, 0xac, 0x5f, 0xc2, 0x2a, 0xd2, 0xfc, 0x10, 0x8a, + 0x62, 0x93, 0x28, 0xd1, 0x53, 0x9d, 0x7b, 0xc4, 0xac, 0x7e, 0x03, 0xd6, 0xf7, 0xb1, 0x83, 0xbf, + 0x96, 0xed, 0x13, 0xce, 0xfe, 0x3b, 0x09, 0xd6, 0x3b, 0xbe, 0x69, 0xfc, 0x0f, 0x64, 0xc7, 0xdd, + 0x9e, 0x7d, 0x0a, 0xb7, 0xff, 0x3d, 0x2f, 0x4a, 0x90, 0x68, 0x71, 0x50, 0x17, 0xd6, 0x66, 0x1a, + 0xb5, 0x31, 0x44, 0x48, 0x7b, 0xb9, 0x5f, 0x9d, 0x6e, 0xd5, 0x18, 0x5c, 0xf0, 0x29, 0x2c, 0x64, + 0x46, 0xc0, 0xa6, 0x3e, 0xc4, 0x81, 0xfd, 0xf8, 0x42, 0xe7, 0x9d, 0x95, 0x78, 0xd7, 0x70, 0x37, + 0x45, 0x53, 0x56, 0x7d, 0xc4, 0x04, 0xf0, 0x11, 0x85, 0x88, 0x42, 0x70, 0x9c, 0x8c, 0x3e, 0x81, + 0xa5, 0xbe, 0xd1, 0x3b, 0xb3, 0x5d, 0xac, 0x33, 0xa0, 0x92, 0x65, 0xdb, 0xdc, 0x49, 0xb3, 0xcd, + 0x03, 0xce, 0xcf, 0x8e, 0x55, 0xee, 0x8f, 0x07, 0x14, 0x77, 0x98, 0x76, 0x78, 0xce, 0xae, 0x36, + 0xdd, 0xea, 0x32, 0xd8, 0x99, 0xd5, 0x80, 0xd2, 0xe8, 0xed, 0x76, 0xd8, 0x45, 0x1e, 0x3c, 0x13, + 0x2f, 0x22, 0xd1, 0x59, 0x73, 0x4c, 0x89, 0x0f, 0xd3, 0x28, 0x11, 0x2f, 0x3d, 0xe2, 0xc4, 0x28, + 0x9c, 0xa1, 0x21, 0x1f, 0xae, 0xd2, 0xf6, 0x34, 0x24, 0x01, 0x36, 0x68, 0x33, 0x15, 0xed, 0xb8, + 0x98, 0x7e, 0xc7, 0x13, 0xcf, 0x6a, 0x45, 0x62, 0xa2, 0x1d, 0x9d, 0x19, 0x9a, 0x5a, 0x85, 0xa5, + 0x09, 0x83, 0xcb, 0xb0, 0xd4, 0x3c, 0x6d, 0xeb, 0x8f, 0x1a, 0xda, 0xf1, 0xc1, 0x71, 0x63, 0x5f, + 0x5e, 0x40, 0x4b, 0x50, 0x1c, 0x8d, 0x24, 0xb5, 0x0e, 0xe5, 0x98, 0x41, 0xd1, 0x2a, 0x94, 0x3b, + 0xcd, 0xd6, 0xc3, 0x46, 0x3d, 0x5a, 0x4d, 0xf9, 0xb7, 0xf4, 0xa3, 0xe3, 0xc3, 0xa3, 0xfa, 0xc3, + 0x8e, 0x7e, 0x57, 0x96, 0xd0, 0x15, 0x58, 0x8e, 0x51, 0x6e, 0x6d, 0xcb, 0x19, 0xf5, 0xf6, 0x64, + 0x2d, 0x16, 0x5b, 0xaf, 0x00, 0x3c, 0xe8, 0xb4, 0xda, 0xfa, 0x83, 0xdd, 0x76, 0xfd, 0x48, 0x5e, + 0xa0, 0xb2, 0x77, 0x4f, 0x4e, 0x4e, 0x3f, 0xd6, 0x4f, 0x4e, 0x4f, 0x5b, 0x0d, 0x59, 0x52, 0x0f, + 0x01, 0xcd, 0x9e, 0x8a, 0xf7, 0x82, 0x5a, 0x63, 0xf7, 0x81, 0xbe, 0xdf, 0x38, 0xd8, 0xed, 0x9c, + 0xb4, 0xe5, 0x05, 0xda, 0xb7, 0x09, 0xda, 0x69, 0x53, 0x96, 0xa8, 0xe4, 0x68, 0x78, 0x70, 0x20, + 0x67, 0xb6, 0xff, 0xba, 0x02, 0x50, 0xa7, 0xa6, 0xe3, 0x2f, 0xd8, 0x7e, 0x2e, 0x41, 0x39, 0x56, + 0xc6, 0xd1, 0x56, 0x82, 0x9d, 0x67, 0x81, 0x5f, 0xe5, 0x7a, 0xc4, 0x12, 0xfb, 0xce, 0x53, 0x1d, + 0x35, 0x6a, 0x6a, 0xed, 0xcb, 0x7f, 0xfe, 0xeb, 0x97, 0x99, 0xd7, 0xd5, 0x8d, 0xda, 0x70, 0xab, + 0x26, 0x4a, 0x45, 0x58, 0xfb, 0x7c, 0x5c, 0x46, 0xbe, 0xa8, 0x71, 0x1c, 0xb3, 0x23, 0xae, 0xe2, + 0x9f, 0x49, 0x50, 0x8c, 0xca, 0x3a, 0xaa, 0x26, 0xe8, 0x33, 0x85, 0x0b, 0x2b, 0x73, 0x5d, 0xfb, + 0xea, 0x5b, 0x4c, 0xa7, 0xd7, 0xd0, 0x2b, 0x49, 0x3a, 0xd5, 0x3e, 0xb7, 0xcd, 0x2f, 0xd0, 0x6f, + 0x24, 0x80, 0x31, 0x6a, 0x43, 0x49, 0xef, 0x1a, 0x66, 0x80, 0x66, 0x65, 0x2b, 0x05, 0x07, 0xbf, + 0x2b, 0xd4, 0x4d, 0xa6, 0xa2, 0x8a, 0x12, 0xcd, 0x86, 0x7e, 0x4b, 0x5d, 0x38, 0xc6, 0x75, 0xc9, + 0x2e, 0x9c, 0xc1, 0x80, 0x73, 0x5a, 0xed, 0x0e, 0x53, 0x69, 0x4b, 0x7d, 0x73, 0x2e, 0xab, 0xed, + 0xf4, 0xd8, 0x3e, 0x3b, 0xd2, 0x4d, 0xf4, 0x2b, 0xf6, 0x11, 0x29, 0xfa, 0x0c, 0x97, 0x68, 0xbf, + 0x99, 0x2f, 0x76, 0x49, 0x21, 0xf6, 0x0e, 0x53, 0xec, 0x6d, 0xf5, 0x8d, 0xf9, 0x14, 0x0b, 0xa8, + 0x7c, 0xaa, 0xd7, 0x9f, 0xa5, 0x89, 0x4e, 0x26, 0x42, 0xb4, 0x77, 0xe7, 0xcf, 0x81, 0xc9, 0xeb, + 0xb1, 0x92, 0xe6, 0x3e, 0x53, 0x6f, 0x31, 0xad, 0xdf, 0x52, 0xd5, 0x27, 0x6b, 0x1d, 0x5d, 0xf8, + 0x3b, 0xd1, 0xdd, 0x87, 0xfe, 0x24, 0x8d, 0x7b, 0xa1, 0x48, 0xdf, 0xdb, 0x73, 0xe6, 0xc8, 0xd3, + 0x28, 0x2b, 0x7c, 0x8f, 0x6a, 0xc9, 0xca, 0xd6, 0x3e, 0x1f, 0x63, 0x80, 0x2f, 0xd0, 0x5f, 0xe2, + 0x9d, 0x57, 0x84, 0x88, 0xd0, 0x9d, 0x79, 0x13, 0x62, 0x0a, 0x7e, 0x55, 0xee, 0xa6, 0x67, 0x14, + 0x09, 0x75, 0x93, 0x9d, 0xe0, 0x65, 0x34, 0x87, 0xb9, 0x69, 0x4a, 0xa1, 0x59, 0x80, 0x95, 0x18, + 0x18, 0x4f, 0xc4, 0x64, 0x95, 0xb5, 0x99, 0x5e, 0xa1, 0xd1, 0xf7, 0xc9, 0x45, 0x64, 0xd6, 0x9b, + 0xa9, 0xcd, 0xfa, 0x95, 0x04, 0x68, 0x16, 0xa6, 0x25, 0x6a, 0xf8, 0x44, 0x64, 0x97, 0x2e, 0x1a, + 0xee, 0x33, 0xb5, 0x77, 0xb6, 0xd3, 0xaa, 0x3d, 0x8e, 0xe3, 0x3f, 0x4a, 0xb0, 0x3a, 0xf5, 0x51, + 0x3d, 0x31, 0x8e, 0x2f, 0xff, 0x08, 0x9f, 0x54, 0x1c, 0xea, 0x4c, 0xd7, 0x0f, 0xd4, 0x5b, 0x69, + 0x75, 0x0d, 0x06, 0xee, 0x8e, 0xf8, 0x5e, 0xb3, 0x77, 0x0e, 0x4a, 0xcf, 0xeb, 0x47, 0x1b, 0x4d, + 0xa8, 0xf5, 0x50, 0xfa, 0xe4, 0x50, 0xd0, 0x2d, 0xcf, 0x31, 0x5c, 0xab, 0xea, 0x05, 0x56, 0xcd, + 0xc2, 0x2e, 0x73, 0x75, 0x8d, 0x4f, 0x19, 0xbe, 0x1d, 0x3e, 0xe1, 0x1f, 0x33, 0xde, 0x1b, 0x8f, + 0xfe, 0x90, 0xc9, 0x1e, 0xd6, 0xf7, 0xba, 0x79, 0xc6, 0x79, 0xeb, 0xdf, 0x01, 0x00, 0x00, 0xff, + 0xff, 0x8d, 0x04, 0xdb, 0x30, 0xd1, 0x21, 0x00, 0x00, } diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go new file mode 100644 index 000000000..61570e2a1 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go @@ -0,0 +1,904 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/alert.proto + +/* +Package monitoring is a generated protocol buffer package. + +It is generated from these files: + google/monitoring/v3/alert.proto + google/monitoring/v3/alert_service.proto + google/monitoring/v3/common.proto + google/monitoring/v3/group.proto + google/monitoring/v3/group_service.proto + google/monitoring/v3/metric.proto + google/monitoring/v3/metric_service.proto + google/monitoring/v3/mutation_record.proto + google/monitoring/v3/notification.proto + google/monitoring/v3/notification_service.proto + google/monitoring/v3/uptime.proto + google/monitoring/v3/uptime_service.proto + +It has these top-level messages: + AlertPolicy + CreateAlertPolicyRequest + GetAlertPolicyRequest + ListAlertPoliciesRequest + ListAlertPoliciesResponse + UpdateAlertPolicyRequest + DeleteAlertPolicyRequest + TypedValue + TimeInterval + Aggregation + Group + ListGroupsRequest + ListGroupsResponse + GetGroupRequest + CreateGroupRequest + UpdateGroupRequest + DeleteGroupRequest + ListGroupMembersRequest + ListGroupMembersResponse + Point + TimeSeries + ListMonitoredResourceDescriptorsRequest + ListMonitoredResourceDescriptorsResponse + GetMonitoredResourceDescriptorRequest + ListMetricDescriptorsRequest + ListMetricDescriptorsResponse + GetMetricDescriptorRequest + CreateMetricDescriptorRequest + DeleteMetricDescriptorRequest + ListTimeSeriesRequest + ListTimeSeriesResponse + CreateTimeSeriesRequest + CreateTimeSeriesError + MutationRecord + NotificationChannelDescriptor + NotificationChannel + ListNotificationChannelDescriptorsRequest + ListNotificationChannelDescriptorsResponse + GetNotificationChannelDescriptorRequest + CreateNotificationChannelRequest + ListNotificationChannelsRequest + ListNotificationChannelsResponse + GetNotificationChannelRequest + UpdateNotificationChannelRequest + DeleteNotificationChannelRequest + SendNotificationChannelVerificationCodeRequest + GetNotificationChannelVerificationCodeRequest + GetNotificationChannelVerificationCodeResponse + VerifyNotificationChannelRequest + UptimeCheckConfig + UptimeCheckIp + ListUptimeCheckConfigsRequest + ListUptimeCheckConfigsResponse + GetUptimeCheckConfigRequest + CreateUptimeCheckConfigRequest + UpdateUptimeCheckConfigRequest + DeleteUptimeCheckConfigRequest + ListUptimeCheckIpsRequest + ListUptimeCheckIpsResponse +*/ +package monitoring + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf3 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf4 "github.com/golang/protobuf/ptypes/wrappers" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Operators for combining conditions. +type AlertPolicy_ConditionCombinerType int32 + +const ( + // An unspecified combiner. + AlertPolicy_COMBINE_UNSPECIFIED AlertPolicy_ConditionCombinerType = 0 + // Combine conditions using the logical `AND` operator. An + // incident is created only if all conditions are met + // simultaneously. This combiner is satisfied if all conditions are + // met, even if they are met on completely different resources. + AlertPolicy_AND AlertPolicy_ConditionCombinerType = 1 + // Combine conditions using the logical `OR` operator. An incident + // is created if any of the listed conditions is met. + AlertPolicy_OR AlertPolicy_ConditionCombinerType = 2 + // Combine conditions using logical `AND` operator, but unlike the regular + // `AND` option, an incident is created only if all conditions are met + // simultaneously on at least one resource. + AlertPolicy_AND_WITH_MATCHING_RESOURCE AlertPolicy_ConditionCombinerType = 3 +) + +var AlertPolicy_ConditionCombinerType_name = map[int32]string{ + 0: "COMBINE_UNSPECIFIED", + 1: "AND", + 2: "OR", + 3: "AND_WITH_MATCHING_RESOURCE", +} +var AlertPolicy_ConditionCombinerType_value = map[string]int32{ + "COMBINE_UNSPECIFIED": 0, + "AND": 1, + "OR": 2, + "AND_WITH_MATCHING_RESOURCE": 3, +} + +func (x AlertPolicy_ConditionCombinerType) String() string { + return proto.EnumName(AlertPolicy_ConditionCombinerType_name, int32(x)) +} +func (AlertPolicy_ConditionCombinerType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{0, 0} +} + +// A description of the conditions under which some aspect of your system is +// considered to be "unhealthy" and the ways to notify people or services about +// this state. For an overview of alert policies, see +// [Introduction to Alerting](/monitoring/alerts/). +type AlertPolicy struct { + // Required if the policy exists. The resource name for this policy. The + // syntax is: + // + // projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID] + // + // `[ALERT_POLICY_ID]` is assigned by Stackdriver Monitoring when the policy + // is created. When calling the + // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy] + // method, do not include the `name` field in the alerting policy passed as + // part of the request. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // A short name or phrase used to identify the policy in dashboards, + // notifications, and incidents. To avoid confusion, don't use the same + // display name for multiple policies in the same project. The name is + // limited to 512 Unicode characters. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // Documentation that is included with notifications and incidents related to + // this policy. Best practice is for the documentation to include information + // to help responders understand, mitigate, escalate, and correct the + // underlying problems detected by the alerting policy. Notification channels + // that have limited capacity might not show this documentation. + Documentation *AlertPolicy_Documentation `protobuf:"bytes,13,opt,name=documentation" json:"documentation,omitempty"` + // User-supplied key/value data to be used for organizing and + // identifying the `AlertPolicy` objects. + // + // The field can contain up to 64 entries. Each key and value is limited to + // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and + // values can contain only lowercase letters, numerals, underscores, and + // dashes. Keys must begin with a letter. + UserLabels map[string]string `protobuf:"bytes,16,rep,name=user_labels,json=userLabels" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // A list of conditions for the policy. The conditions are combined by AND or + // OR according to the `combiner` field. If the combined conditions evaluate + // to true, then an incident is created. A policy can have from one to six + // conditions. + Conditions []*AlertPolicy_Condition `protobuf:"bytes,12,rep,name=conditions" json:"conditions,omitempty"` + // How to combine the results of multiple conditions + // to determine if an incident should be opened. + Combiner AlertPolicy_ConditionCombinerType `protobuf:"varint,6,opt,name=combiner,enum=google.monitoring.v3.AlertPolicy_ConditionCombinerType" json:"combiner,omitempty"` + // Whether or not the policy is enabled. On write, the default interpretation + // if unset is that the policy is enabled. On read, clients should not make + // any assumption about the state if it has not been populated. The + // field should always be populated on List and Get operations, unless + // a field projection has been specified that strips it out. + Enabled *google_protobuf4.BoolValue `protobuf:"bytes,17,opt,name=enabled" json:"enabled,omitempty"` + // Identifies the notification channels to which notifications should be sent + // when incidents are opened or closed or when new violations occur on + // an already opened incident. Each element of this array corresponds to + // the `name` field in each of the + // [`NotificationChannel`][google.monitoring.v3.NotificationChannel] + // objects that are returned from the [`ListNotificationChannels`] + // [google.monitoring.v3.NotificationChannelService.ListNotificationChannels] + // method. The syntax of the entries in this field is: + // + // projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID] + NotificationChannels []string `protobuf:"bytes,14,rep,name=notification_channels,json=notificationChannels" json:"notification_channels,omitempty"` + // A read-only record of the creation of the alerting policy. If provided + // in a call to create or update, this field will be ignored. + CreationRecord *MutationRecord `protobuf:"bytes,10,opt,name=creation_record,json=creationRecord" json:"creation_record,omitempty"` + // A read-only record of the most recent change to the alerting policy. If + // provided in a call to create or update, this field will be ignored. + MutationRecord *MutationRecord `protobuf:"bytes,11,opt,name=mutation_record,json=mutationRecord" json:"mutation_record,omitempty"` +} + +func (m *AlertPolicy) Reset() { *m = AlertPolicy{} } +func (m *AlertPolicy) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy) ProtoMessage() {} +func (*AlertPolicy) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *AlertPolicy) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AlertPolicy) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *AlertPolicy) GetDocumentation() *AlertPolicy_Documentation { + if m != nil { + return m.Documentation + } + return nil +} + +func (m *AlertPolicy) GetUserLabels() map[string]string { + if m != nil { + return m.UserLabels + } + return nil +} + +func (m *AlertPolicy) GetConditions() []*AlertPolicy_Condition { + if m != nil { + return m.Conditions + } + return nil +} + +func (m *AlertPolicy) GetCombiner() AlertPolicy_ConditionCombinerType { + if m != nil { + return m.Combiner + } + return AlertPolicy_COMBINE_UNSPECIFIED +} + +func (m *AlertPolicy) GetEnabled() *google_protobuf4.BoolValue { + if m != nil { + return m.Enabled + } + return nil +} + +func (m *AlertPolicy) GetNotificationChannels() []string { + if m != nil { + return m.NotificationChannels + } + return nil +} + +func (m *AlertPolicy) GetCreationRecord() *MutationRecord { + if m != nil { + return m.CreationRecord + } + return nil +} + +func (m *AlertPolicy) GetMutationRecord() *MutationRecord { + if m != nil { + return m.MutationRecord + } + return nil +} + +// A content string and a MIME type that describes the content string's +// format. +type AlertPolicy_Documentation struct { + // The text of the documentation, interpreted according to `mime_type`. + // The content may not exceed 8,192 Unicode characters and may not exceed + // more than 10,240 bytes when encoded in UTF-8 format, whichever is + // smaller. + Content string `protobuf:"bytes,1,opt,name=content" json:"content,omitempty"` + // The format of the `content` field. Presently, only the value + // `"text/markdown"` is supported. See + // [Markdown](https://en.wikipedia.org/wiki/Markdown) for more information. + MimeType string `protobuf:"bytes,2,opt,name=mime_type,json=mimeType" json:"mime_type,omitempty"` +} + +func (m *AlertPolicy_Documentation) Reset() { *m = AlertPolicy_Documentation{} } +func (m *AlertPolicy_Documentation) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy_Documentation) ProtoMessage() {} +func (*AlertPolicy_Documentation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +func (m *AlertPolicy_Documentation) GetContent() string { + if m != nil { + return m.Content + } + return "" +} + +func (m *AlertPolicy_Documentation) GetMimeType() string { + if m != nil { + return m.MimeType + } + return "" +} + +// A condition is a true/false test that determines when an alerting policy +// should open an incident. If a condition evaluates to true, it signifies +// that something is wrong. +type AlertPolicy_Condition struct { + // Required if the condition exists. The unique resource name for this + // condition. Its syntax is: + // + // projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] + // + // `[CONDITION_ID]` is assigned by Stackdriver Monitoring when the + // condition is created as part of a new or updated alerting policy. + // + // When calling the + // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy] + // method, do not include the `name` field in the conditions of the + // requested alerting policy. Stackdriver Monitoring creates the + // condition identifiers and includes them in the new policy. + // + // When calling the + // [alertPolicies.update][google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy] + // method to update a policy, including a condition `name` causes the + // existing condition to be updated. Conditions without names are added to + // the updated policy. Existing conditions are deleted if they are not + // updated. + // + // Best practice is to preserve `[CONDITION_ID]` if you make only small + // changes, such as those to condition thresholds, durations, or trigger + // values. Otherwise, treat the change as a new condition and let the + // existing condition be deleted. + Name string `protobuf:"bytes,12,opt,name=name" json:"name,omitempty"` + // A short name or phrase used to identify the condition in dashboards, + // notifications, and incidents. To avoid confusion, don't use the same + // display name for multiple conditions in the same policy. + DisplayName string `protobuf:"bytes,6,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // Only one of the following condition types will be specified. + // + // Types that are valid to be assigned to Condition: + // *AlertPolicy_Condition_ConditionThreshold + // *AlertPolicy_Condition_ConditionAbsent + Condition isAlertPolicy_Condition_Condition `protobuf_oneof:"condition"` +} + +func (m *AlertPolicy_Condition) Reset() { *m = AlertPolicy_Condition{} } +func (m *AlertPolicy_Condition) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy_Condition) ProtoMessage() {} +func (*AlertPolicy_Condition) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 1} } + +type isAlertPolicy_Condition_Condition interface { + isAlertPolicy_Condition_Condition() +} + +type AlertPolicy_Condition_ConditionThreshold struct { + ConditionThreshold *AlertPolicy_Condition_MetricThreshold `protobuf:"bytes,1,opt,name=condition_threshold,json=conditionThreshold,oneof"` +} +type AlertPolicy_Condition_ConditionAbsent struct { + ConditionAbsent *AlertPolicy_Condition_MetricAbsence `protobuf:"bytes,2,opt,name=condition_absent,json=conditionAbsent,oneof"` +} + +func (*AlertPolicy_Condition_ConditionThreshold) isAlertPolicy_Condition_Condition() {} +func (*AlertPolicy_Condition_ConditionAbsent) isAlertPolicy_Condition_Condition() {} + +func (m *AlertPolicy_Condition) GetCondition() isAlertPolicy_Condition_Condition { + if m != nil { + return m.Condition + } + return nil +} + +func (m *AlertPolicy_Condition) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AlertPolicy_Condition) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *AlertPolicy_Condition) GetConditionThreshold() *AlertPolicy_Condition_MetricThreshold { + if x, ok := m.GetCondition().(*AlertPolicy_Condition_ConditionThreshold); ok { + return x.ConditionThreshold + } + return nil +} + +func (m *AlertPolicy_Condition) GetConditionAbsent() *AlertPolicy_Condition_MetricAbsence { + if x, ok := m.GetCondition().(*AlertPolicy_Condition_ConditionAbsent); ok { + return x.ConditionAbsent + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AlertPolicy_Condition) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AlertPolicy_Condition_OneofMarshaler, _AlertPolicy_Condition_OneofUnmarshaler, _AlertPolicy_Condition_OneofSizer, []interface{}{ + (*AlertPolicy_Condition_ConditionThreshold)(nil), + (*AlertPolicy_Condition_ConditionAbsent)(nil), + } +} + +func _AlertPolicy_Condition_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AlertPolicy_Condition) + // condition + switch x := m.Condition.(type) { + case *AlertPolicy_Condition_ConditionThreshold: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ConditionThreshold); err != nil { + return err + } + case *AlertPolicy_Condition_ConditionAbsent: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ConditionAbsent); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("AlertPolicy_Condition.Condition has unexpected type %T", x) + } + return nil +} + +func _AlertPolicy_Condition_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AlertPolicy_Condition) + switch tag { + case 1: // condition.condition_threshold + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(AlertPolicy_Condition_MetricThreshold) + err := b.DecodeMessage(msg) + m.Condition = &AlertPolicy_Condition_ConditionThreshold{msg} + return true, err + case 2: // condition.condition_absent + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(AlertPolicy_Condition_MetricAbsence) + err := b.DecodeMessage(msg) + m.Condition = &AlertPolicy_Condition_ConditionAbsent{msg} + return true, err + default: + return false, nil + } +} + +func _AlertPolicy_Condition_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AlertPolicy_Condition) + // condition + switch x := m.Condition.(type) { + case *AlertPolicy_Condition_ConditionThreshold: + s := proto.Size(x.ConditionThreshold) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AlertPolicy_Condition_ConditionAbsent: + s := proto.Size(x.ConditionAbsent) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Specifies how many time series must fail a predicate to trigger a +// condition. If not specified, then a `{count: 1}` trigger is used. +type AlertPolicy_Condition_Trigger struct { + // A type of trigger. + // + // Types that are valid to be assigned to Type: + // *AlertPolicy_Condition_Trigger_Count + // *AlertPolicy_Condition_Trigger_Percent + Type isAlertPolicy_Condition_Trigger_Type `protobuf_oneof:"type"` +} + +func (m *AlertPolicy_Condition_Trigger) Reset() { *m = AlertPolicy_Condition_Trigger{} } +func (m *AlertPolicy_Condition_Trigger) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy_Condition_Trigger) ProtoMessage() {} +func (*AlertPolicy_Condition_Trigger) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{0, 1, 0} +} + +type isAlertPolicy_Condition_Trigger_Type interface { + isAlertPolicy_Condition_Trigger_Type() +} + +type AlertPolicy_Condition_Trigger_Count struct { + Count int32 `protobuf:"varint,1,opt,name=count,oneof"` +} +type AlertPolicy_Condition_Trigger_Percent struct { + Percent float64 `protobuf:"fixed64,2,opt,name=percent,oneof"` +} + +func (*AlertPolicy_Condition_Trigger_Count) isAlertPolicy_Condition_Trigger_Type() {} +func (*AlertPolicy_Condition_Trigger_Percent) isAlertPolicy_Condition_Trigger_Type() {} + +func (m *AlertPolicy_Condition_Trigger) GetType() isAlertPolicy_Condition_Trigger_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *AlertPolicy_Condition_Trigger) GetCount() int32 { + if x, ok := m.GetType().(*AlertPolicy_Condition_Trigger_Count); ok { + return x.Count + } + return 0 +} + +func (m *AlertPolicy_Condition_Trigger) GetPercent() float64 { + if x, ok := m.GetType().(*AlertPolicy_Condition_Trigger_Percent); ok { + return x.Percent + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AlertPolicy_Condition_Trigger) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AlertPolicy_Condition_Trigger_OneofMarshaler, _AlertPolicy_Condition_Trigger_OneofUnmarshaler, _AlertPolicy_Condition_Trigger_OneofSizer, []interface{}{ + (*AlertPolicy_Condition_Trigger_Count)(nil), + (*AlertPolicy_Condition_Trigger_Percent)(nil), + } +} + +func _AlertPolicy_Condition_Trigger_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AlertPolicy_Condition_Trigger) + // type + switch x := m.Type.(type) { + case *AlertPolicy_Condition_Trigger_Count: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Count)) + case *AlertPolicy_Condition_Trigger_Percent: + b.EncodeVarint(2<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.Percent)) + case nil: + default: + return fmt.Errorf("AlertPolicy_Condition_Trigger.Type has unexpected type %T", x) + } + return nil +} + +func _AlertPolicy_Condition_Trigger_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AlertPolicy_Condition_Trigger) + switch tag { + case 1: // type.count + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Type = &AlertPolicy_Condition_Trigger_Count{int32(x)} + return true, err + case 2: // type.percent + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Type = &AlertPolicy_Condition_Trigger_Percent{math.Float64frombits(x)} + return true, err + default: + return false, nil + } +} + +func _AlertPolicy_Condition_Trigger_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AlertPolicy_Condition_Trigger) + // type + switch x := m.Type.(type) { + case *AlertPolicy_Condition_Trigger_Count: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Count)) + case *AlertPolicy_Condition_Trigger_Percent: + n += proto.SizeVarint(2<<3 | proto.WireFixed64) + n += 8 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A condition type that compares a collection of time series +// against a threshold. +type AlertPolicy_Condition_MetricThreshold struct { + // A [filter](/monitoring/api/v3/filters) that + // identifies which time series should be compared with the threshold. + // + // The filter is similar to the one that is specified in the + // [`MetricService.ListTimeSeries` + // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that + // call is useful to verify the time series that will be retrieved / + // processed) and must specify the metric type and optionally may contain + // restrictions on resource type, resource labels, and metric labels. + // This field may not exceed 2048 Unicode characters in length. + Filter string `protobuf:"bytes,2,opt,name=filter" json:"filter,omitempty"` + // Specifies the alignment of data points in individual time series as + // well as how to combine the retrieved time series together (such as + // when aggregating multiple streams on each resource to a single + // stream for each resource or when aggregating streams across all + // members of a group of resrouces). Multiple aggregations + // are applied in the order specified. + // + // This field is similar to the one in the + // [`MetricService.ListTimeSeries` request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). + // It is advisable to use the `ListTimeSeries` method when debugging this field. + Aggregations []*Aggregation `protobuf:"bytes,8,rep,name=aggregations" json:"aggregations,omitempty"` + // A [filter](/monitoring/api/v3/filters) that identifies a time + // series that should be used as the denominator of a ratio that will be + // compared with the threshold. If a `denominator_filter` is specified, + // the time series specified by the `filter` field will be used as the + // numerator. + // + // The filter is similar to the one that is specified in the + // [`MetricService.ListTimeSeries` + // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that + // call is useful to verify the time series that will be retrieved / + // processed) and must specify the metric type and optionally may contain + // restrictions on resource type, resource labels, and metric labels. + // This field may not exceed 2048 Unicode characters in length. + DenominatorFilter string `protobuf:"bytes,9,opt,name=denominator_filter,json=denominatorFilter" json:"denominator_filter,omitempty"` + // Specifies the alignment of data points in individual time series + // selected by `denominatorFilter` as + // well as how to combine the retrieved time series together (such as + // when aggregating multiple streams on each resource to a single + // stream for each resource or when aggregating streams across all + // members of a group of resources). + // + // When computing ratios, the `aggregations` and + // `denominator_aggregations` fields must use the same alignment period + // and produce time series that have the same periodicity and labels. + // + // This field is similar to the one in the + // [`MetricService.ListTimeSeries` + // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). It + // is advisable to use the `ListTimeSeries` method when debugging this + // field. + DenominatorAggregations []*Aggregation `protobuf:"bytes,10,rep,name=denominator_aggregations,json=denominatorAggregations" json:"denominator_aggregations,omitempty"` + // The comparison to apply between the time series (indicated by `filter` + // and `aggregation`) and the threshold (indicated by `threshold_value`). + // The comparison is applied on each time series, with the time series + // on the left-hand side and the threshold on the right-hand side. + // + // Only `COMPARISON_LT` and `COMPARISON_GT` are supported currently. + Comparison ComparisonType `protobuf:"varint,4,opt,name=comparison,enum=google.monitoring.v3.ComparisonType" json:"comparison,omitempty"` + // A value against which to compare the time series. + ThresholdValue float64 `protobuf:"fixed64,5,opt,name=threshold_value,json=thresholdValue" json:"threshold_value,omitempty"` + // The amount of time that a time series must violate the + // threshold to be considered failing. Currently, only values + // that are a multiple of a minute--e.g. 60, 120, or 300 + // seconds--are supported. If an invalid value is given, an + // error will be returned. The `Duration.nanos` field is + // ignored. When choosing a duration, it is useful to keep in mind the + // frequency of the underlying time series data (which may also be + // affected by any alignments specified in the `aggregation` field); + // a good duration is long enough so that a single outlier does not + // generate spurious alerts, but short enough that unhealthy states + // are detected and alerted on quickly. + Duration *google_protobuf3.Duration `protobuf:"bytes,6,opt,name=duration" json:"duration,omitempty"` + // The number/percent of time series for which the comparison must hold + // in order for the condition to trigger. If unspecified, then the + // condition will trigger if the comparison is true for any of the + // time series that have been identified by `filter` and `aggregations`, + // or by the ratio, if `denominator_filter` and `denominator_aggregations` + // are specified. + Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,7,opt,name=trigger" json:"trigger,omitempty"` +} + +func (m *AlertPolicy_Condition_MetricThreshold) Reset() { *m = AlertPolicy_Condition_MetricThreshold{} } +func (m *AlertPolicy_Condition_MetricThreshold) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy_Condition_MetricThreshold) ProtoMessage() {} +func (*AlertPolicy_Condition_MetricThreshold) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{0, 1, 1} +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetAggregations() []*Aggregation { + if m != nil { + return m.Aggregations + } + return nil +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetDenominatorFilter() string { + if m != nil { + return m.DenominatorFilter + } + return "" +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetDenominatorAggregations() []*Aggregation { + if m != nil { + return m.DenominatorAggregations + } + return nil +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetComparison() ComparisonType { + if m != nil { + return m.Comparison + } + return ComparisonType_COMPARISON_UNSPECIFIED +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetThresholdValue() float64 { + if m != nil { + return m.ThresholdValue + } + return 0 +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetDuration() *google_protobuf3.Duration { + if m != nil { + return m.Duration + } + return nil +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetTrigger() *AlertPolicy_Condition_Trigger { + if m != nil { + return m.Trigger + } + return nil +} + +// A condition type that checks that monitored resources +// are reporting data. The configuration defines a metric and +// a set of monitored resources. The predicate is considered in violation +// when a time series for the specified metric of a monitored +// resource does not include any data in the specified `duration`. +type AlertPolicy_Condition_MetricAbsence struct { + // A [filter](/monitoring/api/v3/filters) that + // identifies which time series should be compared with the threshold. + // + // The filter is similar to the one that is specified in the + // [`MetricService.ListTimeSeries` + // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that + // call is useful to verify the time series that will be retrieved / + // processed) and must specify the metric type and optionally may contain + // restrictions on resource type, resource labels, and metric labels. + // This field may not exceed 2048 Unicode characters in length. + Filter string `protobuf:"bytes,1,opt,name=filter" json:"filter,omitempty"` + // Specifies the alignment of data points in individual time series as + // well as how to combine the retrieved time series together (such as + // when aggregating multiple streams on each resource to a single + // stream for each resource or when aggregating streams across all + // members of a group of resrouces). Multiple aggregations + // are applied in the order specified. + // + // This field is similar to the + // one in the [`MetricService.ListTimeSeries` request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). + // It is advisable to use the `ListTimeSeries` method when debugging this field. + Aggregations []*Aggregation `protobuf:"bytes,5,rep,name=aggregations" json:"aggregations,omitempty"` + // The amount of time that a time series must fail to report new + // data to be considered failing. Currently, only values that + // are a multiple of a minute--e.g. 60, 120, or 300 + // seconds--are supported. If an invalid value is given, an + // error will be returned. The `Duration.nanos` field is + // ignored. + Duration *google_protobuf3.Duration `protobuf:"bytes,2,opt,name=duration" json:"duration,omitempty"` + // The number/percent of time series for which the comparison must hold + // in order for the condition to trigger. If unspecified, then the + // condition will trigger if the comparison is true for any of the + // time series that have been identified by `filter` and `aggregations`. + Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,3,opt,name=trigger" json:"trigger,omitempty"` +} + +func (m *AlertPolicy_Condition_MetricAbsence) Reset() { *m = AlertPolicy_Condition_MetricAbsence{} } +func (m *AlertPolicy_Condition_MetricAbsence) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy_Condition_MetricAbsence) ProtoMessage() {} +func (*AlertPolicy_Condition_MetricAbsence) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{0, 1, 2} +} + +func (m *AlertPolicy_Condition_MetricAbsence) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *AlertPolicy_Condition_MetricAbsence) GetAggregations() []*Aggregation { + if m != nil { + return m.Aggregations + } + return nil +} + +func (m *AlertPolicy_Condition_MetricAbsence) GetDuration() *google_protobuf3.Duration { + if m != nil { + return m.Duration + } + return nil +} + +func (m *AlertPolicy_Condition_MetricAbsence) GetTrigger() *AlertPolicy_Condition_Trigger { + if m != nil { + return m.Trigger + } + return nil +} + +func init() { + proto.RegisterType((*AlertPolicy)(nil), "google.monitoring.v3.AlertPolicy") + proto.RegisterType((*AlertPolicy_Documentation)(nil), "google.monitoring.v3.AlertPolicy.Documentation") + proto.RegisterType((*AlertPolicy_Condition)(nil), "google.monitoring.v3.AlertPolicy.Condition") + proto.RegisterType((*AlertPolicy_Condition_Trigger)(nil), "google.monitoring.v3.AlertPolicy.Condition.Trigger") + proto.RegisterType((*AlertPolicy_Condition_MetricThreshold)(nil), "google.monitoring.v3.AlertPolicy.Condition.MetricThreshold") + proto.RegisterType((*AlertPolicy_Condition_MetricAbsence)(nil), "google.monitoring.v3.AlertPolicy.Condition.MetricAbsence") + proto.RegisterEnum("google.monitoring.v3.AlertPolicy_ConditionCombinerType", AlertPolicy_ConditionCombinerType_name, AlertPolicy_ConditionCombinerType_value) +} + +func init() { proto.RegisterFile("google/monitoring/v3/alert.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 941 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xeb, 0x6e, 0xe3, 0x44, + 0x14, 0xae, 0x93, 0xe6, 0x76, 0xd2, 0x36, 0xd9, 0xd9, 0xee, 0xae, 0x31, 0x68, 0x95, 0xae, 0x90, + 0x88, 0x40, 0x38, 0x22, 0x01, 0x71, 0x59, 0x81, 0x94, 0x5b, 0x37, 0x11, 0x24, 0xad, 0xa6, 0x69, + 0x91, 0x50, 0x25, 0xcb, 0x71, 0xa6, 0xae, 0x85, 0x3d, 0x63, 0x4d, 0xec, 0xa2, 0xbc, 0x0e, 0x3f, + 0x79, 0x14, 0x1e, 0x81, 0x7f, 0xbc, 0x02, 0xe2, 0x01, 0x90, 0xc7, 0x63, 0xc7, 0xe9, 0xa6, 0xbb, + 0x64, 0xf7, 0x5f, 0xce, 0x9c, 0xef, 0x7c, 0xe7, 0xf6, 0xcd, 0x38, 0xd0, 0xb0, 0x19, 0xb3, 0x5d, + 0xd2, 0xf2, 0x18, 0x75, 0x02, 0xc6, 0x1d, 0x6a, 0xb7, 0xee, 0x3a, 0x2d, 0xd3, 0x25, 0x3c, 0xd0, + 0x7d, 0xce, 0x02, 0x86, 0x8e, 0x63, 0x84, 0xbe, 0x46, 0xe8, 0x77, 0x1d, 0xed, 0x23, 0x19, 0x67, + 0xfa, 0x4e, 0xcb, 0xa4, 0x94, 0x05, 0x66, 0xe0, 0x30, 0xba, 0x8c, 0x63, 0xb4, 0x93, 0xad, 0xac, + 0x16, 0xf3, 0x3c, 0x46, 0x25, 0xe4, 0xd3, 0xad, 0x10, 0x2f, 0x8c, 0x89, 0x0c, 0x4e, 0x2c, 0xc6, + 0x17, 0x12, 0xfb, 0x5c, 0x62, 0x85, 0x35, 0x0f, 0x6f, 0x5a, 0x8b, 0x90, 0x0b, 0xd8, 0x43, 0xfe, + 0xdf, 0xb8, 0xe9, 0xfb, 0x84, 0xcb, 0x72, 0x5e, 0xfc, 0x5d, 0x83, 0x6a, 0x37, 0x6a, 0xe9, 0x9c, + 0xb9, 0x8e, 0xb5, 0x42, 0x08, 0xf6, 0xa9, 0xe9, 0x11, 0x55, 0x69, 0x28, 0xcd, 0x0a, 0x16, 0xbf, + 0xd1, 0x09, 0x1c, 0x2c, 0x9c, 0xa5, 0xef, 0x9a, 0x2b, 0x43, 0xf8, 0x72, 0xc2, 0x57, 0x95, 0x67, + 0xd3, 0x08, 0x72, 0x09, 0x87, 0x0b, 0x66, 0x85, 0x1e, 0xa1, 0x71, 0x91, 0xea, 0x61, 0x43, 0x69, + 0x56, 0xdb, 0x2d, 0x7d, 0xdb, 0x84, 0xf4, 0x4c, 0x42, 0x7d, 0x90, 0x0d, 0xc3, 0x9b, 0x2c, 0x08, + 0x43, 0x35, 0x5c, 0x12, 0x6e, 0xb8, 0xe6, 0x9c, 0xb8, 0x4b, 0xb5, 0xde, 0xc8, 0x37, 0xab, 0xed, + 0x2f, 0xde, 0x4e, 0x7a, 0xb9, 0x24, 0xfc, 0x27, 0x11, 0x33, 0xa4, 0x01, 0x5f, 0x61, 0x08, 0xd3, + 0x03, 0xf4, 0x23, 0x80, 0xc5, 0xe8, 0xc2, 0x11, 0x4b, 0x51, 0x0f, 0x04, 0xe5, 0x67, 0x6f, 0xa7, + 0xec, 0x27, 0x31, 0x38, 0x13, 0x8e, 0x2e, 0xa0, 0x6c, 0x31, 0x6f, 0xee, 0x50, 0xc2, 0xd5, 0x62, + 0x43, 0x69, 0x1e, 0xb5, 0xbf, 0xde, 0x81, 0xaa, 0x2f, 0x43, 0x67, 0x2b, 0x9f, 0xe0, 0x94, 0x08, + 0x7d, 0x09, 0x25, 0x42, 0xcd, 0xb9, 0x4b, 0x16, 0xea, 0x23, 0x31, 0x46, 0x2d, 0xe1, 0x4c, 0xb6, + 0xa8, 0xf7, 0x18, 0x73, 0xaf, 0x4c, 0x37, 0x24, 0x38, 0x81, 0xa2, 0x0e, 0x3c, 0xa1, 0x2c, 0x70, + 0x6e, 0x1c, 0x2b, 0x96, 0x89, 0x75, 0x6b, 0x52, 0x1a, 0x4d, 0xed, 0xa8, 0x91, 0x6f, 0x56, 0xf0, + 0x71, 0xd6, 0xd9, 0x97, 0x3e, 0x34, 0x81, 0x9a, 0xc5, 0x49, 0x56, 0x57, 0x2a, 0x88, 0x94, 0x1f, + 0x6f, 0x6f, 0x63, 0x22, 0x45, 0x88, 0x05, 0x16, 0x1f, 0x25, 0xc1, 0xb1, 0x1d, 0xd1, 0xdd, 0x93, + 0xa9, 0x5a, 0xdd, 0x85, 0xce, 0xdb, 0xb0, 0xb5, 0x53, 0x38, 0xdc, 0x90, 0x07, 0x52, 0xa1, 0x64, + 0x31, 0x1a, 0x10, 0x1a, 0x48, 0x81, 0x26, 0x26, 0xfa, 0x10, 0x2a, 0x9e, 0xe3, 0x11, 0x23, 0x58, + 0xf9, 0x89, 0x40, 0xcb, 0xd1, 0x41, 0x34, 0x5a, 0xed, 0xaf, 0x32, 0x54, 0xd2, 0xa1, 0xa7, 0x12, + 0x3f, 0x78, 0x83, 0xc4, 0x8b, 0xaf, 0x4b, 0x9c, 0xc2, 0xe3, 0x74, 0xf1, 0x46, 0x70, 0xcb, 0xc9, + 0xf2, 0x96, 0xb9, 0x0b, 0x51, 0x47, 0xb5, 0xfd, 0x72, 0x87, 0xad, 0xeb, 0x13, 0x12, 0x70, 0xc7, + 0x9a, 0x25, 0x14, 0xa3, 0x3d, 0x8c, 0x52, 0xe6, 0xf4, 0x14, 0xdd, 0x40, 0x7d, 0x9d, 0xcf, 0x9c, + 0x2f, 0xa3, 0xa6, 0x73, 0x22, 0xd9, 0xb7, 0xbb, 0x27, 0xeb, 0x46, 0xf1, 0x16, 0x19, 0xed, 0xe1, + 0x5a, 0x4a, 0x2a, 0xce, 0x02, 0x6d, 0x08, 0xa5, 0x19, 0x77, 0x6c, 0x9b, 0x70, 0xf4, 0x14, 0x0a, + 0x16, 0x0b, 0xe5, 0x70, 0x0b, 0xa3, 0x3d, 0x1c, 0x9b, 0x48, 0x83, 0x92, 0x4f, 0xb8, 0x95, 0x54, + 0xa0, 0x8c, 0xf6, 0x70, 0x72, 0xd0, 0x2b, 0xc2, 0x7e, 0x34, 0x73, 0xed, 0x9f, 0x3c, 0xd4, 0xee, + 0x35, 0x86, 0x9e, 0x42, 0xf1, 0xc6, 0x71, 0x03, 0xc2, 0xe5, 0x46, 0xa4, 0x85, 0x86, 0x70, 0x60, + 0xda, 0x36, 0x27, 0x76, 0xfc, 0x32, 0xaa, 0x65, 0x71, 0x09, 0x4f, 0x1e, 0x68, 0x6b, 0x8d, 0xc4, + 0x1b, 0x61, 0xe8, 0x73, 0x40, 0x0b, 0x42, 0x99, 0xe7, 0x50, 0x33, 0x60, 0xdc, 0x90, 0xa9, 0x2a, + 0x22, 0xd5, 0xa3, 0x8c, 0xe7, 0x34, 0xce, 0x7a, 0x0d, 0x6a, 0x16, 0xbe, 0x51, 0x01, 0xfc, 0xdf, + 0x0a, 0x9e, 0x65, 0x28, 0xba, 0xd9, 0x62, 0x06, 0xd1, 0xb3, 0xe2, 0xf9, 0x26, 0x77, 0x96, 0x8c, + 0xaa, 0xfb, 0xe2, 0x2d, 0x78, 0x40, 0xf5, 0xfd, 0x14, 0x27, 0x2e, 0x7e, 0x26, 0x0e, 0x7d, 0x02, + 0xb5, 0x54, 0x5a, 0xc6, 0x5d, 0x74, 0xc1, 0xd5, 0x42, 0x34, 0x71, 0x7c, 0x94, 0x1e, 0x8b, 0x6b, + 0x8f, 0xbe, 0x82, 0x72, 0xf2, 0xd2, 0x0b, 0xb1, 0x56, 0xdb, 0x1f, 0xbc, 0xf6, 0x48, 0x0c, 0x24, + 0x00, 0xa7, 0x50, 0x34, 0x81, 0x52, 0x10, 0x2f, 0x5b, 0x2d, 0x89, 0xa8, 0xce, 0x2e, 0x5a, 0x92, + 0x3a, 0xc1, 0x09, 0x87, 0xf6, 0xaf, 0x02, 0x87, 0x1b, 0x02, 0xcb, 0xac, 0x5c, 0x79, 0xe3, 0xca, + 0x0b, 0xef, 0xb6, 0xf2, 0x6c, 0xdb, 0xb9, 0x77, 0x6a, 0x3b, 0xff, 0xfe, 0x6d, 0xf7, 0xaa, 0x50, + 0x49, 0x6f, 0x91, 0xf6, 0x3d, 0xd4, 0xee, 0x7d, 0x6e, 0x50, 0x1d, 0xf2, 0xbf, 0x92, 0x95, 0x9c, + 0x40, 0xf4, 0x13, 0x1d, 0x43, 0x21, 0xde, 0x66, 0x7c, 0x11, 0x62, 0xe3, 0xbb, 0xdc, 0x37, 0xca, + 0x0b, 0x13, 0x9e, 0x6c, 0xfd, 0x1e, 0xa0, 0x67, 0xf0, 0xb8, 0x7f, 0x36, 0xe9, 0x8d, 0xa7, 0x43, + 0xe3, 0x72, 0x7a, 0x71, 0x3e, 0xec, 0x8f, 0x4f, 0xc7, 0xc3, 0x41, 0x7d, 0x0f, 0x95, 0x20, 0xdf, + 0x9d, 0x0e, 0xea, 0x0a, 0x2a, 0x42, 0xee, 0x0c, 0xd7, 0x73, 0xe8, 0x39, 0x68, 0xdd, 0xe9, 0xc0, + 0xf8, 0x79, 0x3c, 0x1b, 0x19, 0x93, 0xee, 0xac, 0x3f, 0x1a, 0x4f, 0x5f, 0x19, 0x78, 0x78, 0x71, + 0x76, 0x89, 0xfb, 0xc3, 0x7a, 0xbe, 0xf7, 0xbb, 0x02, 0xaa, 0xc5, 0xbc, 0xad, 0x2d, 0xf7, 0x20, + 0xee, 0x39, 0x1a, 0xde, 0xb9, 0xf2, 0xcb, 0x0f, 0x12, 0x63, 0x33, 0xd7, 0xa4, 0xb6, 0xce, 0xb8, + 0xdd, 0xb2, 0x09, 0x15, 0xa3, 0x6d, 0xc5, 0x2e, 0xd3, 0x77, 0x96, 0x9b, 0xff, 0x4c, 0x5e, 0xae, + 0xad, 0x3f, 0x72, 0xda, 0xab, 0x98, 0xa0, 0xef, 0xb2, 0x70, 0xa1, 0x4f, 0xd6, 0xa9, 0xae, 0x3a, + 0x7f, 0x26, 0xce, 0x6b, 0xe1, 0xbc, 0x5e, 0x3b, 0xaf, 0xaf, 0x3a, 0xf3, 0xa2, 0x48, 0xd2, 0xf9, + 0x2f, 0x00, 0x00, 0xff, 0xff, 0x66, 0xb5, 0x16, 0x64, 0x76, 0x09, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go new file mode 100644 index 000000000..0b51478ab --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go @@ -0,0 +1,527 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/alert_service.proto + +package monitoring + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf5 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf6 "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The protocol for the `CreateAlertPolicy` request. +type CreateAlertPolicyRequest struct { + // The project in which to create the alerting policy. The format is + // `projects/[PROJECT_ID]`. + // + // Note that this field names the parent container in which the alerting + // policy will be written, not the name of the created policy. The alerting + // policy that is returned will have a name that contains a normalized + // representation of this name as a prefix but adds a suffix of the form + // `/alertPolicies/[POLICY_ID]`, identifying the policy in the container. + Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` + // The requested alerting policy. You should omit the `name` field in this + // policy. The name will be returned in the new policy, including + // a new [ALERT_POLICY_ID] value. + AlertPolicy *AlertPolicy `protobuf:"bytes,2,opt,name=alert_policy,json=alertPolicy" json:"alert_policy,omitempty"` +} + +func (m *CreateAlertPolicyRequest) Reset() { *m = CreateAlertPolicyRequest{} } +func (m *CreateAlertPolicyRequest) String() string { return proto.CompactTextString(m) } +func (*CreateAlertPolicyRequest) ProtoMessage() {} +func (*CreateAlertPolicyRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *CreateAlertPolicyRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy { + if m != nil { + return m.AlertPolicy + } + return nil +} + +// The protocol for the `GetAlertPolicy` request. +type GetAlertPolicyRequest struct { + // The alerting policy to retrieve. The format is + // + // projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID] + Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` +} + +func (m *GetAlertPolicyRequest) Reset() { *m = GetAlertPolicyRequest{} } +func (m *GetAlertPolicyRequest) String() string { return proto.CompactTextString(m) } +func (*GetAlertPolicyRequest) ProtoMessage() {} +func (*GetAlertPolicyRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *GetAlertPolicyRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The protocol for the `ListAlertPolicies` request. +type ListAlertPoliciesRequest struct { + // The project whose alert policies are to be listed. The format is + // + // projects/[PROJECT_ID] + // + // Note that this field names the parent container in which the alerting + // policies to be listed are stored. To retrieve a single alerting policy + // by name, use the + // [GetAlertPolicy][google.monitoring.v3.AlertPolicyService.GetAlertPolicy] + // operation, instead. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // If provided, this field specifies the criteria that must be met by + // alert policies to be included in the response. + // + // For more details, see [sorting and + // filtering](/monitoring/api/v3/sorting-and-filtering). + Filter string `protobuf:"bytes,5,opt,name=filter" json:"filter,omitempty"` + // A comma-separated list of fields by which to sort the result. Supports + // the same set of field references as the `filter` field. Entries can be + // prefixed with a minus sign to sort by the field in descending order. + // + // For more details, see [sorting and + // filtering](/monitoring/api/v3/sorting-and-filtering). + OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy" json:"order_by,omitempty"` + // The maximum number of results to return in a single response. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return more results from the previous method call. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListAlertPoliciesRequest) Reset() { *m = ListAlertPoliciesRequest{} } +func (m *ListAlertPoliciesRequest) String() string { return proto.CompactTextString(m) } +func (*ListAlertPoliciesRequest) ProtoMessage() {} +func (*ListAlertPoliciesRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *ListAlertPoliciesRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListAlertPoliciesRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListAlertPoliciesRequest) GetOrderBy() string { + if m != nil { + return m.OrderBy + } + return "" +} + +func (m *ListAlertPoliciesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListAlertPoliciesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The protocol for the `ListAlertPolicies` response. +type ListAlertPoliciesResponse struct { + // The returned alert policies. + AlertPolicies []*AlertPolicy `protobuf:"bytes,3,rep,name=alert_policies,json=alertPolicies" json:"alert_policies,omitempty"` + // If there might be more results than were returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `pageToken` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListAlertPoliciesResponse) Reset() { *m = ListAlertPoliciesResponse{} } +func (m *ListAlertPoliciesResponse) String() string { return proto.CompactTextString(m) } +func (*ListAlertPoliciesResponse) ProtoMessage() {} +func (*ListAlertPoliciesResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *ListAlertPoliciesResponse) GetAlertPolicies() []*AlertPolicy { + if m != nil { + return m.AlertPolicies + } + return nil +} + +func (m *ListAlertPoliciesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The protocol for the `UpdateAlertPolicy` request. +type UpdateAlertPolicyRequest struct { + // Optional. A list of alerting policy field names. If this field is not + // empty, each listed field in the existing alerting policy is set to the + // value of the corresponding field in the supplied policy (`alert_policy`), + // or to the field's default value if the field is not in the supplied + // alerting policy. Fields not listed retain their previous value. + // + // Examples of valid field masks include `display_name`, `documentation`, + // `documentation.content`, `documentation.mime_type`, `user_labels`, + // `user_label.nameofkey`, `enabled`, `conditions`, `combiner`, etc. + // + // If this field is empty, then the supplied alerting policy replaces the + // existing policy. It is the same as deleting the existing policy and + // adding the supplied policy, except for the following: + // + // + The new policy will have the same `[ALERT_POLICY_ID]` as the former + // policy. This gives you continuity with the former policy in your + // notifications and incidents. + // + Conditions in the new policy will keep their former `[CONDITION_ID]` if + // the supplied condition includes the `name` field with that + // `[CONDITION_ID]`. If the supplied condition omits the `name` field, + // then a new `[CONDITION_ID]` is created. + UpdateMask *google_protobuf6.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` + // Required. The updated alerting policy or the updated values for the + // fields listed in `update_mask`. + // If `update_mask` is not empty, any fields in this policy that are + // not in `update_mask` are ignored. + AlertPolicy *AlertPolicy `protobuf:"bytes,3,opt,name=alert_policy,json=alertPolicy" json:"alert_policy,omitempty"` +} + +func (m *UpdateAlertPolicyRequest) Reset() { *m = UpdateAlertPolicyRequest{} } +func (m *UpdateAlertPolicyRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateAlertPolicyRequest) ProtoMessage() {} +func (*UpdateAlertPolicyRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *UpdateAlertPolicyRequest) GetUpdateMask() *google_protobuf6.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy { + if m != nil { + return m.AlertPolicy + } + return nil +} + +// The protocol for the `DeleteAlertPolicy` request. +type DeleteAlertPolicyRequest struct { + // The alerting policy to delete. The format is: + // + // projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID] + // + // For more information, see [AlertPolicy][google.monitoring.v3.AlertPolicy]. + Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteAlertPolicyRequest) Reset() { *m = DeleteAlertPolicyRequest{} } +func (m *DeleteAlertPolicyRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteAlertPolicyRequest) ProtoMessage() {} +func (*DeleteAlertPolicyRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +func (m *DeleteAlertPolicyRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterType((*CreateAlertPolicyRequest)(nil), "google.monitoring.v3.CreateAlertPolicyRequest") + proto.RegisterType((*GetAlertPolicyRequest)(nil), "google.monitoring.v3.GetAlertPolicyRequest") + proto.RegisterType((*ListAlertPoliciesRequest)(nil), "google.monitoring.v3.ListAlertPoliciesRequest") + proto.RegisterType((*ListAlertPoliciesResponse)(nil), "google.monitoring.v3.ListAlertPoliciesResponse") + proto.RegisterType((*UpdateAlertPolicyRequest)(nil), "google.monitoring.v3.UpdateAlertPolicyRequest") + proto.RegisterType((*DeleteAlertPolicyRequest)(nil), "google.monitoring.v3.DeleteAlertPolicyRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for AlertPolicyService service + +type AlertPolicyServiceClient interface { + // Lists the existing alerting policies for the project. + ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error) + // Gets a single alerting policy. + GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) + // Creates a new alerting policy. + CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) + // Deletes an alerting policy. + DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) + // Updates an alerting policy. You can either replace the entire policy with + // a new one or replace only certain fields in the current alerting policy by + // specifying the fields to be updated via `updateMask`. Returns the + // updated alerting policy. + UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) +} + +type alertPolicyServiceClient struct { + cc *grpc.ClientConn +} + +func NewAlertPolicyServiceClient(cc *grpc.ClientConn) AlertPolicyServiceClient { + return &alertPolicyServiceClient{cc} +} + +func (c *alertPolicyServiceClient) ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error) { + out := new(ListAlertPoliciesResponse) + err := grpc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { + out := new(AlertPolicy) + err := grpc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { + out := new(AlertPolicy) + err := grpc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) { + out := new(google_protobuf5.Empty) + err := grpc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { + out := new(AlertPolicy) + err := grpc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for AlertPolicyService service + +type AlertPolicyServiceServer interface { + // Lists the existing alerting policies for the project. + ListAlertPolicies(context.Context, *ListAlertPoliciesRequest) (*ListAlertPoliciesResponse, error) + // Gets a single alerting policy. + GetAlertPolicy(context.Context, *GetAlertPolicyRequest) (*AlertPolicy, error) + // Creates a new alerting policy. + CreateAlertPolicy(context.Context, *CreateAlertPolicyRequest) (*AlertPolicy, error) + // Deletes an alerting policy. + DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*google_protobuf5.Empty, error) + // Updates an alerting policy. You can either replace the entire policy with + // a new one or replace only certain fields in the current alerting policy by + // specifying the fields to be updated via `updateMask`. Returns the + // updated alerting policy. + UpdateAlertPolicy(context.Context, *UpdateAlertPolicyRequest) (*AlertPolicy, error) +} + +func RegisterAlertPolicyServiceServer(s *grpc.Server, srv AlertPolicyServiceServer) { + s.RegisterService(&_AlertPolicyService_serviceDesc, srv) +} + +func _AlertPolicyService_ListAlertPolicies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListAlertPoliciesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, req.(*ListAlertPoliciesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_GetAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, req.(*GetAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_CreateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, req.(*CreateAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_DeleteAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, req.(*DeleteAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_UpdateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, req.(*UpdateAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _AlertPolicyService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.AlertPolicyService", + HandlerType: (*AlertPolicyServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListAlertPolicies", + Handler: _AlertPolicyService_ListAlertPolicies_Handler, + }, + { + MethodName: "GetAlertPolicy", + Handler: _AlertPolicyService_GetAlertPolicy_Handler, + }, + { + MethodName: "CreateAlertPolicy", + Handler: _AlertPolicyService_CreateAlertPolicy_Handler, + }, + { + MethodName: "DeleteAlertPolicy", + Handler: _AlertPolicyService_DeleteAlertPolicy_Handler, + }, + { + MethodName: "UpdateAlertPolicy", + Handler: _AlertPolicyService_UpdateAlertPolicy_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/alert_service.proto", +} + +func init() { proto.RegisterFile("google/monitoring/v3/alert_service.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 656 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x41, 0x6f, 0xd3, 0x4c, + 0x10, 0x95, 0x93, 0x36, 0x5f, 0xbb, 0xfd, 0x5a, 0x94, 0x15, 0x54, 0xae, 0x0b, 0x52, 0x30, 0x2a, + 0x54, 0xad, 0xb0, 0xa5, 0xf8, 0x04, 0x15, 0x48, 0xa4, 0x85, 0xf6, 0x40, 0xa5, 0x28, 0x85, 0x1e, + 0x50, 0xa4, 0x68, 0x93, 0x4c, 0xac, 0x25, 0x8e, 0xd7, 0x78, 0x37, 0x11, 0x29, 0xea, 0x85, 0x23, + 0x12, 0xe2, 0xc0, 0x99, 0x03, 0x47, 0x38, 0x20, 0x7e, 0x07, 0x57, 0xfe, 0x02, 0x3f, 0x04, 0x79, + 0xed, 0x34, 0x76, 0x6d, 0xab, 0x16, 0xb7, 0xcc, 0xce, 0xdb, 0x99, 0xb7, 0x6f, 0xde, 0x38, 0x68, + 0xdb, 0x66, 0xcc, 0x76, 0xc0, 0x1c, 0x31, 0x97, 0x0a, 0xe6, 0x53, 0xd7, 0x36, 0x27, 0x96, 0x49, + 0x1c, 0xf0, 0x45, 0x87, 0x83, 0x3f, 0xa1, 0x3d, 0x30, 0x3c, 0x9f, 0x09, 0x86, 0xaf, 0x87, 0x48, + 0x63, 0x8e, 0x34, 0x26, 0x96, 0x76, 0x33, 0xba, 0x4f, 0x3c, 0x6a, 0x12, 0xd7, 0x65, 0x82, 0x08, + 0xca, 0x5c, 0x1e, 0xde, 0xd1, 0x6a, 0xf9, 0xd5, 0x23, 0xc4, 0x66, 0x84, 0x90, 0x51, 0x77, 0x3c, + 0x30, 0x61, 0xe4, 0x89, 0xe9, 0xa5, 0xeb, 0x17, 0xc9, 0x01, 0x05, 0xa7, 0xdf, 0x19, 0x11, 0x3e, + 0x0c, 0x11, 0xba, 0x40, 0xea, 0xbe, 0x0f, 0x44, 0xc0, 0x93, 0xa0, 0x66, 0x93, 0x39, 0xb4, 0x37, + 0x6d, 0xc1, 0x9b, 0x31, 0x70, 0x81, 0x31, 0x5a, 0x70, 0xc9, 0x08, 0xd4, 0x72, 0x4d, 0xd9, 0x5e, + 0x6e, 0xc9, 0xdf, 0xf8, 0x00, 0xfd, 0x1f, 0xbe, 0xcd, 0x93, 0x50, 0xb5, 0x54, 0x53, 0xb6, 0x57, + 0xea, 0xb7, 0x8d, 0xac, 0xb7, 0x19, 0xf1, 0x9a, 0x2b, 0x64, 0x1e, 0xe8, 0xbb, 0xe8, 0xc6, 0x21, + 0x88, 0x62, 0x2d, 0xf5, 0x2f, 0x0a, 0x52, 0x9f, 0x53, 0x1e, 0x83, 0x53, 0xe0, 0x97, 0x2f, 0x2c, + 0xc4, 0x38, 0xae, 0xa3, 0xca, 0x80, 0x3a, 0x02, 0x7c, 0x75, 0x51, 0x9e, 0x46, 0x11, 0xde, 0x40, + 0x4b, 0xcc, 0xef, 0x83, 0xdf, 0xe9, 0x4e, 0xd5, 0x8a, 0xcc, 0xfc, 0x27, 0xe3, 0xc6, 0x14, 0x6f, + 0xa2, 0x65, 0x8f, 0xd8, 0xd0, 0xe1, 0xf4, 0x0c, 0xe4, 0x9b, 0x16, 0x5b, 0x4b, 0xc1, 0xc1, 0x09, + 0x3d, 0x03, 0x7c, 0x0b, 0x21, 0x99, 0x14, 0x6c, 0x08, 0x6e, 0x44, 0x4d, 0xc2, 0x5f, 0x04, 0x07, + 0xfa, 0x47, 0x05, 0x6d, 0x64, 0xf0, 0xe3, 0x1e, 0x73, 0x39, 0xe0, 0x23, 0xb4, 0x16, 0x13, 0x8c, + 0x02, 0x57, 0xcb, 0xb5, 0x72, 0x31, 0xc9, 0x56, 0x49, 0xbc, 0x22, 0xbe, 0x8b, 0xae, 0xb9, 0xf0, + 0x56, 0x74, 0x62, 0x5c, 0x4a, 0x92, 0xcb, 0x6a, 0x70, 0xdc, 0xbc, 0xe0, 0x13, 0xe8, 0xf5, 0xd2, + 0xeb, 0x67, 0xcf, 0x74, 0x0f, 0xad, 0x8c, 0x65, 0x4e, 0x9a, 0x20, 0x1a, 0x9f, 0x36, 0xe3, 0x32, + 0xf3, 0x89, 0xf1, 0x2c, 0xf0, 0xc9, 0x31, 0xe1, 0xc3, 0x16, 0x0a, 0xe1, 0xc1, 0xef, 0xd4, 0xf0, + 0xcb, 0xff, 0x34, 0x7c, 0x03, 0xa9, 0x07, 0xe0, 0x40, 0x51, 0xcb, 0xd5, 0x7f, 0x54, 0x10, 0x8e, + 0x41, 0x4f, 0xc2, 0xa5, 0xc2, 0x5f, 0x15, 0x54, 0x4d, 0xc9, 0x8e, 0x8d, 0x6c, 0x32, 0x79, 0xfe, + 0xd1, 0xcc, 0xc2, 0xf8, 0x70, 0x9e, 0xfa, 0xee, 0xfb, 0xdf, 0x7f, 0x3e, 0x97, 0xb6, 0xf0, 0x9d, + 0x60, 0x11, 0xdf, 0x05, 0x04, 0x1f, 0x79, 0x3e, 0x7b, 0x0d, 0x3d, 0xc1, 0xcd, 0x9d, 0x73, 0x33, + 0x39, 0xb2, 0x4f, 0x0a, 0x5a, 0x4b, 0x1a, 0x1d, 0xef, 0x66, 0x37, 0xcc, 0x5c, 0x07, 0xed, 0x6a, + 0x69, 0xf5, 0xfb, 0x92, 0xcf, 0x3d, 0xbc, 0x95, 0xc5, 0x27, 0x49, 0xc7, 0xdc, 0x39, 0x97, 0xaa, + 0xa5, 0x16, 0x3e, 0x4f, 0xb5, 0xbc, 0x2f, 0x43, 0x11, 0x5e, 0x0f, 0x24, 0x2f, 0x4b, 0x2f, 0xa2, + 0xd3, 0xc3, 0x84, 0xad, 0xf0, 0x07, 0x05, 0x55, 0x53, 0x0e, 0xc9, 0xe3, 0x98, 0x67, 0x25, 0x6d, + 0x3d, 0x65, 0xea, 0xa7, 0xc1, 0x97, 0x71, 0x26, 0xd8, 0x4e, 0x41, 0xc1, 0x7e, 0x2a, 0xa8, 0x9a, + 0xda, 0xa6, 0x3c, 0x32, 0x79, 0x6b, 0x57, 0x44, 0xb0, 0x23, 0xc9, 0xab, 0x51, 0xaf, 0x4b, 0x5e, + 0x71, 0x41, 0x8c, 0xab, 0x48, 0x26, 0xf5, 0x6b, 0x7c, 0x53, 0x90, 0xda, 0x63, 0xa3, 0xcc, 0x96, + 0x8d, 0xaa, 0xec, 0x19, 0x2d, 0x51, 0x33, 0x90, 0xa6, 0xa9, 0xbc, 0x7a, 0x1c, 0x41, 0x6d, 0xe6, + 0x10, 0xd7, 0x36, 0x98, 0x6f, 0x9b, 0x36, 0xb8, 0x52, 0x38, 0x33, 0x4c, 0x11, 0x8f, 0xf2, 0xe4, + 0xbf, 0xd0, 0xde, 0x3c, 0xfa, 0x5e, 0xd2, 0x0e, 0xc3, 0x02, 0xfb, 0x0e, 0x1b, 0xf7, 0x8d, 0xe3, + 0x79, 0xc7, 0x53, 0xeb, 0xd7, 0x2c, 0xd9, 0x96, 0xc9, 0xf6, 0x3c, 0xd9, 0x3e, 0xb5, 0xba, 0x15, + 0xd9, 0xc4, 0xfa, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x6f, 0x1f, 0xe6, 0xf0, 0x47, 0x07, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go index 4d967f529..602399671 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go @@ -1,61 +1,12 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/monitoring/v3/common.proto -/* -Package monitoring is a generated protocol buffer package. - -It is generated from these files: - google/monitoring/v3/common.proto - google/monitoring/v3/group.proto - google/monitoring/v3/group_service.proto - google/monitoring/v3/metric.proto - google/monitoring/v3/metric_service.proto - google/monitoring/v3/uptime.proto - google/monitoring/v3/uptime_service.proto - -It has these top-level messages: - TypedValue - TimeInterval - Aggregation - Group - ListGroupsRequest - ListGroupsResponse - GetGroupRequest - CreateGroupRequest - UpdateGroupRequest - DeleteGroupRequest - ListGroupMembersRequest - ListGroupMembersResponse - Point - TimeSeries - ListMonitoredResourceDescriptorsRequest - ListMonitoredResourceDescriptorsResponse - GetMonitoredResourceDescriptorRequest - ListMetricDescriptorsRequest - ListMetricDescriptorsResponse - GetMetricDescriptorRequest - CreateMetricDescriptorRequest - DeleteMetricDescriptorRequest - ListTimeSeriesRequest - ListTimeSeriesResponse - CreateTimeSeriesRequest - CreateTimeSeriesError - UptimeCheckConfig - UptimeCheckIp - ListUptimeCheckConfigsRequest - ListUptimeCheckConfigsResponse - GetUptimeCheckConfigRequest - CreateUptimeCheckConfigRequest - UpdateUptimeCheckConfigRequest - DeleteUptimeCheckConfigRequest - ListUptimeCheckIpsRequest - ListUptimeCheckIpsResponse -*/ package monitoring import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" import google_api2 "google.golang.org/genproto/googleapis/api/distribution" import google_protobuf3 "github.com/golang/protobuf/ptypes/duration" import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" @@ -65,11 +16,87 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +// Specifies an ordering relationship on two arguments, here called left and +// right. +type ComparisonType int32 + +const ( + // No ordering relationship is specified. + ComparisonType_COMPARISON_UNSPECIFIED ComparisonType = 0 + // The left argument is greater than the right argument. + ComparisonType_COMPARISON_GT ComparisonType = 1 + // The left argument is greater than or equal to the right argument. + ComparisonType_COMPARISON_GE ComparisonType = 2 + // The left argument is less than the right argument. + ComparisonType_COMPARISON_LT ComparisonType = 3 + // The left argument is less than or equal to the right argument. + ComparisonType_COMPARISON_LE ComparisonType = 4 + // The left argument is equal to the right argument. + ComparisonType_COMPARISON_EQ ComparisonType = 5 + // The left argument is not equal to the right argument. + ComparisonType_COMPARISON_NE ComparisonType = 6 +) + +var ComparisonType_name = map[int32]string{ + 0: "COMPARISON_UNSPECIFIED", + 1: "COMPARISON_GT", + 2: "COMPARISON_GE", + 3: "COMPARISON_LT", + 4: "COMPARISON_LE", + 5: "COMPARISON_EQ", + 6: "COMPARISON_NE", +} +var ComparisonType_value = map[string]int32{ + "COMPARISON_UNSPECIFIED": 0, + "COMPARISON_GT": 1, + "COMPARISON_GE": 2, + "COMPARISON_LT": 3, + "COMPARISON_LE": 4, + "COMPARISON_EQ": 5, + "COMPARISON_NE": 6, +} + +func (x ComparisonType) String() string { + return proto.EnumName(ComparisonType_name, int32(x)) +} +func (ComparisonType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +// The tier of service for a Stackdriver account. Please see the +// [service tiers documentation](https://cloud.google.com/monitoring/accounts/tiers) +// for more details. +type ServiceTier int32 + +const ( + // An invalid sentinel value, used to indicate that a tier has not + // been provided explicitly. + ServiceTier_SERVICE_TIER_UNSPECIFIED ServiceTier = 0 + // The Stackdriver Basic tier, a free tier of service that provides basic + // features, a moderate allotment of logs, and access to built-in metrics. + // A number of features are not available in this tier. For more details, + // see [the service tiers documentation](https://cloud.google.com/monitoring/accounts/tiers). + ServiceTier_SERVICE_TIER_BASIC ServiceTier = 1 + // The Stackdriver Premium tier, a higher, more expensive tier of service + // that provides access to all Stackdriver features, lets you use Stackdriver + // with AWS accounts, and has a larger allotments for logs and metrics. For + // more details, see [the service tiers documentation](https://cloud.google.com/monitoring/accounts/tiers). + ServiceTier_SERVICE_TIER_PREMIUM ServiceTier = 2 +) + +var ServiceTier_name = map[int32]string{ + 0: "SERVICE_TIER_UNSPECIFIED", + 1: "SERVICE_TIER_BASIC", + 2: "SERVICE_TIER_PREMIUM", +} +var ServiceTier_value = map[string]int32{ + "SERVICE_TIER_UNSPECIFIED": 0, + "SERVICE_TIER_BASIC": 1, + "SERVICE_TIER_PREMIUM": 2, +} + +func (x ServiceTier) String() string { + return proto.EnumName(ServiceTier_name, int32(x)) +} +func (ServiceTier) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } // The Aligner describes how to bring the data points in a single // time series into temporal alignment. @@ -85,11 +112,22 @@ const ( // delta metric to a delta metric requires that the alignment // period be increased. The value type of the result is the same // as the value type of the input. + // + // One can think of this aligner as a rate but without time units; that + // is, the output is conceptually (second_point - first_point). Aggregation_ALIGN_DELTA Aggregation_Aligner = 1 // Align and convert to a rate. This alignment is valid for // cumulative metrics and delta metrics with numeric values. The output is a // gauge metric with value type // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + // + // One can think of this aligner as conceptually providing the slope of + // the line that passes through the value at the start and end of the + // window. In other words, this is conceptually ((y1 - y0)/(t1 - t0)), + // and the output unit is one that has a "/time" dimension. + // + // If, by rate, you are looking for percentage change, see the + // `ALIGN_PERCENT_CHANGE` aligner option. Aggregation_ALIGN_RATE Aggregation_Aligner = 2 // Align by interpolating between adjacent points around the // period boundary. This alignment is valid for gauge @@ -144,6 +182,12 @@ const ( // [INT64][google.api.MetricDescriptor.ValueType.INT64]. Aggregation_ALIGN_COUNT_TRUE Aggregation_Aligner = 16 // Align time series via aggregation. The resulting data point in + // the alignment period is the count of False-valued data points in the + // period. This alignment is valid for gauge metrics with + // Boolean values. The value type of the output is + // [INT64][google.api.MetricDescriptor.ValueType.INT64]. + Aggregation_ALIGN_COUNT_FALSE Aggregation_Aligner = 24 + // Align time series via aggregation. The resulting data point in // the alignment period is the fraction of True-valued data points in the // period. This alignment is valid for gauge metrics with Boolean values. // The output value is in the range [0, 1] and has value type @@ -173,6 +217,22 @@ const ( // with distribution values. The output is a gauge metric with value type // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. Aggregation_ALIGN_PERCENTILE_05 Aggregation_Aligner = 21 + // Align and convert to a percentage change. This alignment is valid for + // gauge and delta metrics with numeric values. This alignment conceptually + // computes the equivalent of "((current - previous)/previous)*100" + // where previous value is determined based on the alignmentPeriod. + // In the event that previous is 0 the calculated value is infinity with the + // exception that if both (current - previous) and previous are 0 the + // calculated value is 0. + // A 10 minute moving mean is computed at each point of the time window + // prior to the above calculation to smooth the metric and prevent false + // positives from very short lived spikes. + // Only applicable for data that is >= 0. Any values < 0 are treated as + // no data. While delta metrics are accepted by this alignment special care + // should be taken that the values for the metric will always be positive. + // The output is a gauge metric with value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_PERCENT_CHANGE Aggregation_Aligner = 23 ) var Aggregation_Aligner_name = map[int32]string{ @@ -188,36 +248,40 @@ var Aggregation_Aligner_name = map[int32]string{ 14: "ALIGN_SUM", 15: "ALIGN_STDDEV", 16: "ALIGN_COUNT_TRUE", + 24: "ALIGN_COUNT_FALSE", 17: "ALIGN_FRACTION_TRUE", 18: "ALIGN_PERCENTILE_99", 19: "ALIGN_PERCENTILE_95", 20: "ALIGN_PERCENTILE_50", 21: "ALIGN_PERCENTILE_05", + 23: "ALIGN_PERCENT_CHANGE", } var Aggregation_Aligner_value = map[string]int32{ - "ALIGN_NONE": 0, - "ALIGN_DELTA": 1, - "ALIGN_RATE": 2, - "ALIGN_INTERPOLATE": 3, - "ALIGN_NEXT_OLDER": 4, - "ALIGN_MIN": 10, - "ALIGN_MAX": 11, - "ALIGN_MEAN": 12, - "ALIGN_COUNT": 13, - "ALIGN_SUM": 14, - "ALIGN_STDDEV": 15, - "ALIGN_COUNT_TRUE": 16, - "ALIGN_FRACTION_TRUE": 17, - "ALIGN_PERCENTILE_99": 18, - "ALIGN_PERCENTILE_95": 19, - "ALIGN_PERCENTILE_50": 20, - "ALIGN_PERCENTILE_05": 21, + "ALIGN_NONE": 0, + "ALIGN_DELTA": 1, + "ALIGN_RATE": 2, + "ALIGN_INTERPOLATE": 3, + "ALIGN_NEXT_OLDER": 4, + "ALIGN_MIN": 10, + "ALIGN_MAX": 11, + "ALIGN_MEAN": 12, + "ALIGN_COUNT": 13, + "ALIGN_SUM": 14, + "ALIGN_STDDEV": 15, + "ALIGN_COUNT_TRUE": 16, + "ALIGN_COUNT_FALSE": 24, + "ALIGN_FRACTION_TRUE": 17, + "ALIGN_PERCENTILE_99": 18, + "ALIGN_PERCENTILE_95": 19, + "ALIGN_PERCENTILE_50": 20, + "ALIGN_PERCENTILE_05": 21, + "ALIGN_PERCENT_CHANGE": 23, } func (x Aggregation_Aligner) String() string { return proto.EnumName(Aggregation_Aligner_name, int32(x)) } -func (Aggregation_Aligner) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } +func (Aggregation_Aligner) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{2, 0} } // A Reducer describes how to aggregate data points from multiple // time series into a single time series. @@ -263,6 +327,11 @@ const ( // and gauge metrics of Boolean value type. The value type of // the output is [INT64][google.api.MetricDescriptor.ValueType.INT64]. Aggregation_REDUCE_COUNT_TRUE Aggregation_Reducer = 7 + // Reduce by computing the count of False-valued data points across time + // series for each alignment period. This reducer is valid for delta + // and gauge metrics of Boolean value type. The value type of + // the output is [INT64][google.api.MetricDescriptor.ValueType.INT64]. + Aggregation_REDUCE_COUNT_FALSE Aggregation_Reducer = 15 // Reduce by computing the fraction of True-valued data points across time // series for each alignment period. This reducer is valid for delta // and gauge metrics of Boolean value type. The output value is in the @@ -300,6 +369,7 @@ var Aggregation_Reducer_name = map[int32]string{ 5: "REDUCE_STDDEV", 6: "REDUCE_COUNT", 7: "REDUCE_COUNT_TRUE", + 15: "REDUCE_COUNT_FALSE", 8: "REDUCE_FRACTION_TRUE", 9: "REDUCE_PERCENTILE_99", 10: "REDUCE_PERCENTILE_95", @@ -315,6 +385,7 @@ var Aggregation_Reducer_value = map[string]int32{ "REDUCE_STDDEV": 5, "REDUCE_COUNT": 6, "REDUCE_COUNT_TRUE": 7, + "REDUCE_COUNT_FALSE": 15, "REDUCE_FRACTION_TRUE": 8, "REDUCE_PERCENTILE_99": 9, "REDUCE_PERCENTILE_95": 10, @@ -325,7 +396,7 @@ var Aggregation_Reducer_value = map[string]int32{ func (x Aggregation_Reducer) String() string { return proto.EnumName(Aggregation_Reducer_name, int32(x)) } -func (Aggregation_Reducer) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 1} } +func (Aggregation_Reducer) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{2, 1} } // A single strongly-typed value. type TypedValue struct { @@ -343,7 +414,7 @@ type TypedValue struct { func (m *TypedValue) Reset() { *m = TypedValue{} } func (m *TypedValue) String() string { return proto.CompactTextString(m) } func (*TypedValue) ProtoMessage() {} -func (*TypedValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*TypedValue) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } type isTypedValue_Value interface { isTypedValue_Value() @@ -544,7 +615,7 @@ type TimeInterval struct { func (m *TimeInterval) Reset() { *m = TimeInterval{} } func (m *TimeInterval) String() string { return proto.CompactTextString(m) } func (*TimeInterval) ProtoMessage() {} -func (*TimeInterval) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (*TimeInterval) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } func (m *TimeInterval) GetEndTime() *google_protobuf2.Timestamp { if m != nil { @@ -562,8 +633,9 @@ func (m *TimeInterval) GetStartTime() *google_protobuf2.Timestamp { // Describes how to combine multiple time series to provide different views of // the data. Aggregation consists of an alignment step on individual time -// series (`per_series_aligner`) followed by an optional reduction of the data -// across different time series (`cross_series_reducer`). For more details, see +// series (`alignment_period` and `per_series_aligner`) followed by an optional +// reduction step of the data across the aligned time series +// (`cross_series_reducer` and `group_by_fields`). For more details, see // [Aggregation](/monitoring/api/learn_more#aggregation). type Aggregation struct { // The alignment period for per-[time series][google.monitoring.v3.TimeSeries] @@ -619,7 +691,7 @@ type Aggregation struct { func (m *Aggregation) Reset() { *m = Aggregation{} } func (m *Aggregation) String() string { return proto.CompactTextString(m) } func (*Aggregation) ProtoMessage() {} -func (*Aggregation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (*Aggregation) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } func (m *Aggregation) GetAlignmentPeriod() *google_protobuf3.Duration { if m != nil { @@ -653,62 +725,74 @@ func init() { proto.RegisterType((*TypedValue)(nil), "google.monitoring.v3.TypedValue") proto.RegisterType((*TimeInterval)(nil), "google.monitoring.v3.TimeInterval") proto.RegisterType((*Aggregation)(nil), "google.monitoring.v3.Aggregation") + proto.RegisterEnum("google.monitoring.v3.ComparisonType", ComparisonType_name, ComparisonType_value) + proto.RegisterEnum("google.monitoring.v3.ServiceTier", ServiceTier_name, ServiceTier_value) proto.RegisterEnum("google.monitoring.v3.Aggregation_Aligner", Aggregation_Aligner_name, Aggregation_Aligner_value) proto.RegisterEnum("google.monitoring.v3.Aggregation_Reducer", Aggregation_Reducer_name, Aggregation_Reducer_value) } -func init() { proto.RegisterFile("google/monitoring/v3/common.proto", fileDescriptor0) } +func init() { proto.RegisterFile("google/monitoring/v3/common.proto", fileDescriptor2) } -var fileDescriptor0 = []byte{ - // 792 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0xdd, 0x6a, 0xe3, 0x46, - 0x14, 0xb6, 0xec, 0x64, 0x1d, 0x1f, 0x39, 0xf1, 0x64, 0xd6, 0x4b, 0xdd, 0x40, 0xbb, 0x5e, 0x17, - 0x8a, 0x7b, 0x23, 0x87, 0xb8, 0x2e, 0x84, 0x42, 0x41, 0xb1, 0xb5, 0x1b, 0x83, 0x23, 0x9b, 0x59, - 0x25, 0x0d, 0x6d, 0x40, 0xc8, 0xd1, 0xac, 0x10, 0xc8, 0x1a, 0x31, 0x92, 0x0d, 0xb9, 0xeb, 0x6b, - 0xf4, 0xba, 0x77, 0xfb, 0x28, 0x7d, 0x84, 0x3e, 0x44, 0x9f, 0xa1, 0x68, 0x66, 0xb4, 0x52, 0x5a, - 0x97, 0xf6, 0xf2, 0xfb, 0x39, 0xdf, 0xe8, 0x7c, 0x23, 0xd9, 0xf0, 0x26, 0x60, 0x2c, 0x88, 0xe8, - 0x68, 0xc3, 0xe2, 0x30, 0x63, 0x3c, 0x8c, 0x83, 0xd1, 0x6e, 0x3c, 0x7a, 0x64, 0x9b, 0x0d, 0x8b, - 0x8d, 0x84, 0xb3, 0x8c, 0xe1, 0xae, 0xb4, 0x18, 0xa5, 0xc5, 0xd8, 0x8d, 0xcf, 0xbe, 0x50, 0x83, - 0x5e, 0x12, 0x8e, 0xfc, 0x30, 0xcd, 0x78, 0xb8, 0xde, 0x66, 0x61, 0x31, 0x74, 0xf6, 0xa5, 0x92, - 0x05, 0x5a, 0x6f, 0x3f, 0x8c, 0xfc, 0x2d, 0xf7, 0x2a, 0xfa, 0xeb, 0xbf, 0xeb, 0x59, 0xb8, 0xa1, - 0x69, 0xe6, 0x6d, 0x12, 0x69, 0x18, 0xfc, 0xa9, 0x01, 0x38, 0x4f, 0x09, 0xf5, 0xef, 0xbc, 0x68, - 0x4b, 0xf1, 0x6b, 0x80, 0x35, 0x63, 0x91, 0xbb, 0xcb, 0x51, 0x4f, 0xeb, 0x6b, 0xc3, 0xa3, 0xeb, - 0x1a, 0x69, 0xe5, 0x9c, 0x34, 0xbc, 0x01, 0x3d, 0x8c, 0xb3, 0xef, 0xbe, 0x55, 0x8e, 0x7a, 0x5f, - 0x1b, 0x36, 0xae, 0x6b, 0x04, 0x04, 0x29, 0x2d, 0x5f, 0x41, 0xdb, 0x67, 0xdb, 0x75, 0x44, 0x95, - 0xa7, 0xd1, 0xd7, 0x86, 0xda, 0x75, 0x8d, 0xe8, 0x92, 0xfd, 0x64, 0xca, 0x97, 0x89, 0x03, 0x65, - 0x3a, 0xe8, 0x6b, 0xc3, 0x56, 0x6e, 0x92, 0xac, 0x34, 0xcd, 0x01, 0x57, 0x77, 0x56, 0xd6, 0xc3, - 0xbe, 0x36, 0xd4, 0x2f, 0x7a, 0x86, 0xea, 0xcb, 0x4b, 0x42, 0x63, 0x56, 0x71, 0x5d, 0xd7, 0xc8, - 0x69, 0x75, 0x4a, 0x44, 0x5d, 0x35, 0xe1, 0x50, 0x4c, 0x0f, 0x7e, 0xd1, 0xa0, 0xed, 0x84, 0x1b, - 0x3a, 0x8f, 0x33, 0xca, 0x77, 0x5e, 0x84, 0x27, 0x70, 0x44, 0x63, 0xdf, 0xcd, 0x8b, 0x11, 0xeb, - 0xe8, 0x17, 0x67, 0x45, 0x74, 0xd1, 0x9a, 0xe1, 0x14, 0xad, 0x91, 0x26, 0x8d, 0xfd, 0x1c, 0xe1, - 0x4b, 0x80, 0x34, 0xf3, 0x78, 0x26, 0x07, 0xb5, 0xff, 0x1c, 0x6c, 0x09, 0x77, 0x8e, 0x07, 0x1f, - 0x9b, 0xa0, 0x9b, 0x41, 0xc0, 0x69, 0x20, 0xae, 0x0a, 0xcf, 0x00, 0x79, 0x51, 0x18, 0xc4, 0x1b, - 0x1a, 0x67, 0x6e, 0x42, 0x79, 0xc8, 0x7c, 0x15, 0xf8, 0xf9, 0x3f, 0x02, 0x67, 0xea, 0x7e, 0x49, - 0xe7, 0xd3, 0xc8, 0x4a, 0x4c, 0xe0, 0x1f, 0x01, 0x27, 0x94, 0xbb, 0x29, 0xe5, 0x21, 0x4d, 0x5d, - 0xa1, 0x52, 0x2e, 0x36, 0x3a, 0xb9, 0xf8, 0xc6, 0xd8, 0xf7, 0x72, 0x19, 0x95, 0x87, 0x30, 0x4c, - 0x39, 0x40, 0x50, 0x42, 0xf9, 0x7b, 0x91, 0xa1, 0x18, 0xfc, 0x33, 0x74, 0x1f, 0x39, 0x4b, 0xd3, - 0x22, 0x9a, 0x53, 0x7f, 0xfb, 0x48, 0xb9, 0xb8, 0xb2, 0xff, 0x15, 0x4d, 0xe4, 0x00, 0xc1, 0x22, - 0x46, 0x86, 0x2b, 0x0e, 0x7f, 0x0d, 0x9d, 0x80, 0xb3, 0x6d, 0xe2, 0xae, 0x9f, 0xdc, 0x0f, 0x21, - 0x8d, 0xfc, 0xb4, 0x77, 0xd8, 0x6f, 0x0c, 0x5b, 0xe4, 0x58, 0xd0, 0x57, 0x4f, 0x6f, 0x05, 0x39, - 0xf8, 0xa3, 0x0e, 0xcd, 0xe2, 0x81, 0x4e, 0x00, 0xcc, 0xc5, 0xfc, 0x9d, 0xed, 0xda, 0x4b, 0xdb, - 0x42, 0x35, 0xdc, 0x01, 0x5d, 0xe2, 0x99, 0xb5, 0x70, 0x4c, 0xa4, 0x95, 0x06, 0x62, 0x3a, 0x16, - 0xaa, 0xe3, 0x57, 0x70, 0x2a, 0xf1, 0xdc, 0x76, 0x2c, 0xb2, 0x5a, 0x2e, 0x72, 0xba, 0x81, 0xbb, - 0x80, 0x54, 0x8e, 0x75, 0xef, 0xb8, 0xcb, 0xc5, 0xcc, 0x22, 0xe8, 0x00, 0x1f, 0x43, 0x4b, 0xb2, - 0x37, 0x73, 0x1b, 0x41, 0x05, 0x9a, 0xf7, 0x48, 0x2f, 0xa3, 0x6f, 0x2c, 0xd3, 0x46, 0xed, 0xf2, - 0xec, 0xe9, 0xf2, 0xd6, 0x76, 0xd0, 0x71, 0xe9, 0x7f, 0x7f, 0x7b, 0x83, 0x4e, 0x30, 0x82, 0xb6, - 0x82, 0xce, 0x6c, 0x66, 0xdd, 0xa1, 0x4e, 0x79, 0xaa, 0x98, 0x70, 0x1d, 0x72, 0x6b, 0x21, 0x84, - 0x3f, 0x83, 0x97, 0x92, 0x7d, 0x4b, 0xcc, 0xa9, 0x33, 0x5f, 0xda, 0x52, 0x38, 0x2d, 0x85, 0x95, - 0x45, 0xa6, 0x96, 0xed, 0xcc, 0x17, 0x96, 0x7b, 0x79, 0x89, 0xf0, 0x7e, 0x61, 0x82, 0x5e, 0xee, - 0x15, 0x26, 0xe7, 0xa8, 0xbb, 0x57, 0x38, 0x9f, 0xa0, 0x57, 0x83, 0x5f, 0xeb, 0xd0, 0x2c, 0x2e, - 0xa4, 0x03, 0x3a, 0xb1, 0x66, 0xb7, 0x53, 0xab, 0xd2, 0xae, 0x22, 0xc4, 0xca, 0xa2, 0xdd, 0x82, - 0x98, 0xdb, 0xa8, 0x5e, 0xc5, 0xe6, 0x3d, 0x6a, 0x54, 0x70, 0x5e, 0xc1, 0x01, 0x3e, 0x85, 0xe3, - 0x02, 0xcb, 0x0e, 0x0e, 0xf3, 0x56, 0x14, 0x25, 0x6b, 0x7b, 0x91, 0x5f, 0x51, 0x95, 0x91, 0xdb, - 0x37, 0x71, 0x0f, 0xba, 0x8a, 0x7e, 0xde, 0xcb, 0x51, 0x45, 0x79, 0x5e, 0x4c, 0xeb, 0x5f, 0x94, - 0x09, 0x82, 0xfd, 0xca, 0xe4, 0x1c, 0xe9, 0xfb, 0x95, 0xf3, 0x09, 0x6a, 0x5f, 0xfd, 0xa6, 0x41, - 0xef, 0x91, 0x6d, 0xf6, 0xbe, 0xe5, 0x57, 0xfa, 0x54, 0xfc, 0x82, 0xaf, 0xf2, 0xaf, 0x73, 0xa5, - 0xfd, 0xf4, 0x83, 0x32, 0x05, 0x2c, 0xf2, 0xe2, 0xc0, 0x60, 0x3c, 0x18, 0x05, 0x34, 0x16, 0xdf, - 0xee, 0x48, 0x4a, 0x5e, 0x12, 0xa6, 0xcf, 0xff, 0x04, 0xbe, 0x2f, 0xd1, 0xc7, 0xfa, 0xd9, 0x3b, - 0x19, 0x30, 0x8d, 0xd8, 0xd6, 0x37, 0x6e, 0xca, 0xb3, 0xee, 0xc6, 0xbf, 0x17, 0xe2, 0x83, 0x10, - 0x1f, 0x4a, 0xf1, 0xe1, 0x6e, 0xbc, 0x7e, 0x21, 0x0e, 0x19, 0xff, 0x15, 0x00, 0x00, 0xff, 0xff, - 0x57, 0xa4, 0xb9, 0xce, 0x68, 0x06, 0x00, 0x00, +var fileDescriptor2 = []byte{ + // 954 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x95, 0xc1, 0x6e, 0xe3, 0x44, + 0x18, 0xc7, 0xe3, 0xa4, 0x6d, 0x9a, 0xcf, 0x6d, 0x33, 0x9d, 0xed, 0x76, 0x43, 0xb5, 0xb0, 0xd9, + 0x22, 0xa1, 0xb0, 0x07, 0xa7, 0x6a, 0x09, 0x52, 0x85, 0x84, 0xe4, 0x3a, 0xd3, 0xd6, 0x52, 0xe2, + 0x84, 0x89, 0x53, 0x2a, 0x28, 0xb2, 0x9c, 0x66, 0xd6, 0xb2, 0x94, 0x78, 0x2c, 0xdb, 0xa9, 0xd4, + 0x1b, 0x77, 0xde, 0x81, 0x0b, 0x37, 0x6e, 0xbc, 0x06, 0x0f, 0xc3, 0x85, 0x17, 0x40, 0x9e, 0x71, + 0xd6, 0x4e, 0x08, 0x62, 0x8f, 0xdf, 0xef, 0xff, 0xff, 0xbe, 0x99, 0xf9, 0x8f, 0x35, 0x86, 0xb7, + 0x1e, 0xe7, 0xde, 0x8c, 0xb5, 0xe7, 0x3c, 0xf0, 0x13, 0x1e, 0xf9, 0x81, 0xd7, 0x7e, 0xba, 0x68, + 0x3f, 0xf2, 0xf9, 0x9c, 0x07, 0x5a, 0x18, 0xf1, 0x84, 0xe3, 0x23, 0x69, 0xd1, 0x72, 0x8b, 0xf6, + 0x74, 0x71, 0xf2, 0x3a, 0x6b, 0x74, 0x43, 0xbf, 0xed, 0x06, 0x01, 0x4f, 0xdc, 0xc4, 0xe7, 0x41, + 0x2c, 0x7b, 0x4e, 0x3e, 0x2d, 0xa8, 0x53, 0x3f, 0x4e, 0x22, 0x7f, 0xb2, 0x48, 0xf5, 0x4c, 0xfe, + 0x2c, 0x93, 0x45, 0x35, 0x59, 0xbc, 0x6f, 0x4f, 0x17, 0x91, 0x5b, 0xd0, 0xdf, 0xac, 0xeb, 0x89, + 0x3f, 0x67, 0x71, 0xe2, 0xce, 0x43, 0x69, 0x38, 0xfd, 0x4b, 0x01, 0xb0, 0x9f, 0x43, 0x36, 0xbd, + 0x73, 0x67, 0x0b, 0x86, 0xdf, 0x00, 0x4c, 0x38, 0x9f, 0x39, 0x4f, 0x69, 0xd5, 0x50, 0x9a, 0x4a, + 0x6b, 0xf7, 0xb6, 0x44, 0x6b, 0x29, 0x93, 0x86, 0xb7, 0xa0, 0xfa, 0x41, 0xf2, 0xf5, 0x57, 0x99, + 0xa3, 0xdc, 0x54, 0x5a, 0x95, 0xdb, 0x12, 0x05, 0x01, 0xa5, 0xe5, 0x73, 0xd8, 0x9b, 0xf2, 0xc5, + 0x64, 0xc6, 0x32, 0x4f, 0xa5, 0xa9, 0xb4, 0x94, 0xdb, 0x12, 0x55, 0x25, 0xfd, 0x60, 0x4a, 0x0f, + 0x13, 0x78, 0x99, 0x69, 0xab, 0xa9, 0xb4, 0x6a, 0xa9, 0x49, 0x52, 0x69, 0x32, 0x01, 0x17, 0xcf, + 0x9c, 0x59, 0xb7, 0x9b, 0x4a, 0x4b, 0x3d, 0x6f, 0x68, 0x59, 0x9a, 0x6e, 0xe8, 0x6b, 0xdd, 0x82, + 0xeb, 0xb6, 0x44, 0x0f, 0x8b, 0x5d, 0x62, 0xd4, 0x55, 0x15, 0xb6, 0x45, 0xf7, 0xe9, 0xcf, 0x0a, + 0xec, 0xd9, 0xfe, 0x9c, 0x99, 0x41, 0xc2, 0xa2, 0x27, 0x77, 0x86, 0x3b, 0xb0, 0xcb, 0x82, 0xa9, + 0x93, 0x06, 0x23, 0x8e, 0xa3, 0x9e, 0x9f, 0x2c, 0x47, 0x2f, 0x53, 0xd3, 0xec, 0x65, 0x6a, 0xb4, + 0xca, 0x82, 0x69, 0x5a, 0xe1, 0x4b, 0x80, 0x38, 0x71, 0xa3, 0x44, 0x36, 0x2a, 0xff, 0xdb, 0x58, + 0x13, 0xee, 0xb4, 0x3e, 0xfd, 0xbb, 0x0a, 0xaa, 0xee, 0x79, 0x11, 0xf3, 0xc4, 0x55, 0xe1, 0x2e, + 0x20, 0x77, 0xe6, 0x7b, 0xc1, 0x9c, 0x05, 0x89, 0x13, 0xb2, 0xc8, 0xe7, 0xd3, 0x6c, 0xe0, 0x27, + 0xff, 0x1a, 0xd8, 0xcd, 0xee, 0x97, 0xd6, 0x3f, 0xb4, 0x0c, 0x45, 0x07, 0xfe, 0x1e, 0x70, 0xc8, + 0x22, 0x27, 0x66, 0x91, 0xcf, 0x62, 0x47, 0xa8, 0x2c, 0x12, 0x27, 0x3a, 0x38, 0xff, 0x52, 0xdb, + 0xf4, 0xe9, 0x69, 0x85, 0x4d, 0x68, 0xba, 0x6c, 0xa0, 0x28, 0x64, 0xd1, 0x48, 0xcc, 0xc8, 0x08, + 0xfe, 0x11, 0x8e, 0x1e, 0x23, 0x1e, 0xc7, 0xcb, 0xd1, 0x11, 0x9b, 0x2e, 0x1e, 0x59, 0x24, 0xae, + 0xec, 0xa3, 0x46, 0x53, 0xd9, 0x40, 0xb1, 0x18, 0x23, 0x87, 0x67, 0x0c, 0x7f, 0x01, 0x75, 0x2f, + 0xe2, 0x8b, 0xd0, 0x99, 0x3c, 0x3b, 0xef, 0x7d, 0x36, 0x9b, 0xc6, 0x8d, 0xed, 0x66, 0xa5, 0x55, + 0xa3, 0xfb, 0x02, 0x5f, 0x3d, 0x5f, 0x0b, 0x78, 0xfa, 0x4b, 0x05, 0xaa, 0xcb, 0x0d, 0x1d, 0x00, + 0xe8, 0x3d, 0xf3, 0xc6, 0x72, 0xac, 0x81, 0x45, 0x50, 0x09, 0xd7, 0x41, 0x95, 0x75, 0x97, 0xf4, + 0x6c, 0x1d, 0x29, 0xb9, 0x81, 0xea, 0x36, 0x41, 0x65, 0xfc, 0x12, 0x0e, 0x65, 0x6d, 0x5a, 0x36, + 0xa1, 0xc3, 0x41, 0x2f, 0xc5, 0x15, 0x7c, 0x04, 0x28, 0x9b, 0x43, 0xee, 0x6d, 0x67, 0xd0, 0xeb, + 0x12, 0x8a, 0xb6, 0xf0, 0x3e, 0xd4, 0x24, 0xed, 0x9b, 0x16, 0x82, 0x42, 0xa9, 0xdf, 0x23, 0x35, + 0x1f, 0xdd, 0x27, 0xba, 0x85, 0xf6, 0xf2, 0xb5, 0x8d, 0xc1, 0xd8, 0xb2, 0xd1, 0x7e, 0xee, 0x1f, + 0x8d, 0xfb, 0xe8, 0x00, 0x23, 0xd8, 0xcb, 0x4a, 0xbb, 0xdb, 0x25, 0x77, 0xa8, 0x9e, 0xaf, 0x2a, + 0x3a, 0x1c, 0x9b, 0x8e, 0x09, 0x42, 0xf9, 0x16, 0x25, 0xbd, 0xd6, 0x7b, 0x23, 0x82, 0x1a, 0xf8, + 0x15, 0xbc, 0x90, 0xf8, 0x9a, 0xea, 0x86, 0x6d, 0x0e, 0x2c, 0xe9, 0x3f, 0xcc, 0x85, 0x21, 0xa1, + 0x06, 0xb1, 0x6c, 0xb3, 0x47, 0x9c, 0xcb, 0x4b, 0x84, 0x37, 0x0b, 0x1d, 0xf4, 0x62, 0xa3, 0xd0, + 0x39, 0x43, 0x47, 0x1b, 0x85, 0xb3, 0x0e, 0x7a, 0x89, 0x1b, 0x70, 0xb4, 0x22, 0x38, 0xc6, 0xad, + 0x6e, 0xdd, 0x10, 0xf4, 0xea, 0xf4, 0x8f, 0x32, 0x54, 0x97, 0x37, 0x58, 0x07, 0x95, 0x92, 0xee, + 0xd8, 0x20, 0x85, 0xeb, 0xc8, 0x80, 0xc8, 0x48, 0x5c, 0xc7, 0x12, 0x98, 0x16, 0x2a, 0x17, 0x6b, + 0xfd, 0x1e, 0x55, 0x0a, 0x75, 0x9a, 0xd9, 0x16, 0x3e, 0x84, 0xfd, 0x65, 0x2d, 0x43, 0xdb, 0x4e, + 0x63, 0xcc, 0x90, 0xcc, 0x79, 0x27, 0x0d, 0xac, 0x48, 0x64, 0x2e, 0x55, 0x7c, 0x0c, 0x78, 0x05, + 0xcb, 0x20, 0xeb, 0xe9, 0x59, 0x32, 0xbe, 0x9a, 0xe4, 0x6e, 0x41, 0x59, 0x8d, 0xb2, 0xf6, 0x1f, + 0x4a, 0x07, 0xc1, 0x66, 0xa5, 0x73, 0x86, 0xd4, 0xcd, 0xca, 0x59, 0x07, 0xed, 0xbd, 0xfb, 0x55, + 0x81, 0x03, 0x83, 0xcf, 0x43, 0x37, 0xf2, 0x63, 0x1e, 0xa4, 0x6f, 0x2e, 0x3e, 0x81, 0x63, 0x63, + 0xd0, 0x1f, 0xea, 0xd4, 0x1c, 0x0d, 0x2c, 0x67, 0x6c, 0x8d, 0x86, 0xc4, 0x30, 0xaf, 0x4d, 0xd2, + 0x45, 0xa5, 0x34, 0x84, 0x82, 0x76, 0x63, 0x23, 0x65, 0x1d, 0xa5, 0x5f, 0xf6, 0x2a, 0xea, 0xd9, + 0xa8, 0xb2, 0x8e, 0x88, 0x0c, 0xb4, 0x80, 0xc8, 0x77, 0x68, 0x7b, 0x0d, 0x59, 0x04, 0xed, 0xbc, + 0xfb, 0x09, 0xd4, 0x11, 0x8b, 0x9e, 0xfc, 0x47, 0x66, 0xfb, 0x2c, 0xc2, 0xaf, 0xa1, 0x31, 0x22, + 0xf4, 0xce, 0x34, 0x88, 0x63, 0x9b, 0x84, 0xae, 0x6d, 0xef, 0x18, 0xf0, 0x8a, 0x7a, 0xa5, 0x8f, + 0x4c, 0x03, 0x29, 0xe9, 0xf9, 0x57, 0xf8, 0x90, 0x92, 0xbe, 0x39, 0xee, 0xa3, 0xf2, 0xd5, 0x6f, + 0x0a, 0x34, 0x1e, 0xf9, 0x7c, 0xe3, 0x73, 0x71, 0xa5, 0x1a, 0xe2, 0x47, 0x39, 0x4c, 0x9f, 0xb9, + 0xa1, 0xf2, 0xc3, 0xb7, 0x99, 0xc9, 0xe3, 0x33, 0x37, 0xf0, 0x34, 0x1e, 0x79, 0x6d, 0x8f, 0x05, + 0xe2, 0x11, 0x6c, 0x4b, 0xc9, 0x0d, 0xfd, 0x78, 0xf5, 0x5f, 0xfb, 0x4d, 0x5e, 0xfd, 0x5e, 0x3e, + 0xb9, 0x91, 0x03, 0x8c, 0x19, 0x5f, 0x4c, 0xb5, 0x7e, 0xbe, 0xd6, 0xdd, 0xc5, 0x9f, 0x4b, 0xf1, + 0x41, 0x88, 0x0f, 0xb9, 0xf8, 0x70, 0x77, 0x31, 0xd9, 0x11, 0x8b, 0x5c, 0xfc, 0x13, 0x00, 0x00, + 0xff, 0xff, 0xe2, 0x9f, 0x67, 0xb2, 0xcf, 0x07, 0x00, 0x00, } diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go index a9c0dd811..0afad7a68 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go @@ -61,7 +61,7 @@ type Group struct { func (m *Group) Reset() { *m = Group{} } func (m *Group) String() string { return proto.CompactTextString(m) } func (*Group) ProtoMessage() {} -func (*Group) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } +func (*Group) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } func (m *Group) GetName() string { if m != nil { @@ -102,9 +102,9 @@ func init() { proto.RegisterType((*Group)(nil), "google.monitoring.v3.Group") } -func init() { proto.RegisterFile("google/monitoring/v3/group.proto", fileDescriptor1) } +func init() { proto.RegisterFile("google/monitoring/v3/group.proto", fileDescriptor3) } -var fileDescriptor1 = []byte{ +var fileDescriptor3 = []byte{ // 261 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xcf, 0x4a, 0x2b, 0x31, 0x14, 0x87, 0x49, 0xef, 0xed, 0x60, 0x4f, 0x5d, 0x0d, 0x22, 0x83, 0x20, 0x8e, 0xae, 0xba, 0xca, diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go index 14750fa97..de212a50b 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go @@ -8,7 +8,7 @@ import fmt "fmt" import math "math" import _ "google.golang.org/genproto/googleapis/api/annotations" import google_api4 "google.golang.org/genproto/googleapis/api/monitoredres" -import google_protobuf4 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf5 "github.com/golang/protobuf/ptypes/empty" import ( context "golang.org/x/net/context" @@ -45,7 +45,7 @@ type ListGroupsRequest struct { func (m *ListGroupsRequest) Reset() { *m = ListGroupsRequest{} } func (m *ListGroupsRequest) String() string { return proto.CompactTextString(m) } func (*ListGroupsRequest) ProtoMessage() {} -func (*ListGroupsRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } +func (*ListGroupsRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } type isListGroupsRequest_Filter interface { isListGroupsRequest_Filter() @@ -208,7 +208,7 @@ type ListGroupsResponse struct { func (m *ListGroupsResponse) Reset() { *m = ListGroupsResponse{} } func (m *ListGroupsResponse) String() string { return proto.CompactTextString(m) } func (*ListGroupsResponse) ProtoMessage() {} -func (*ListGroupsResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } +func (*ListGroupsResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} } func (m *ListGroupsResponse) GetGroup() []*Group { if m != nil { @@ -234,7 +234,7 @@ type GetGroupRequest struct { func (m *GetGroupRequest) Reset() { *m = GetGroupRequest{} } func (m *GetGroupRequest) String() string { return proto.CompactTextString(m) } func (*GetGroupRequest) ProtoMessage() {} -func (*GetGroupRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } +func (*GetGroupRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2} } func (m *GetGroupRequest) GetName() string { if m != nil { @@ -258,7 +258,7 @@ type CreateGroupRequest struct { func (m *CreateGroupRequest) Reset() { *m = CreateGroupRequest{} } func (m *CreateGroupRequest) String() string { return proto.CompactTextString(m) } func (*CreateGroupRequest) ProtoMessage() {} -func (*CreateGroupRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } +func (*CreateGroupRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{3} } func (m *CreateGroupRequest) GetName() string { if m != nil { @@ -293,7 +293,7 @@ type UpdateGroupRequest struct { func (m *UpdateGroupRequest) Reset() { *m = UpdateGroupRequest{} } func (m *UpdateGroupRequest) String() string { return proto.CompactTextString(m) } func (*UpdateGroupRequest) ProtoMessage() {} -func (*UpdateGroupRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} } +func (*UpdateGroupRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{4} } func (m *UpdateGroupRequest) GetGroup() *Group { if m != nil { @@ -319,7 +319,7 @@ type DeleteGroupRequest struct { func (m *DeleteGroupRequest) Reset() { *m = DeleteGroupRequest{} } func (m *DeleteGroupRequest) String() string { return proto.CompactTextString(m) } func (*DeleteGroupRequest) ProtoMessage() {} -func (*DeleteGroupRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} } +func (*DeleteGroupRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{5} } func (m *DeleteGroupRequest) GetName() string { if m != nil { @@ -357,7 +357,7 @@ type ListGroupMembersRequest struct { func (m *ListGroupMembersRequest) Reset() { *m = ListGroupMembersRequest{} } func (m *ListGroupMembersRequest) String() string { return proto.CompactTextString(m) } func (*ListGroupMembersRequest) ProtoMessage() {} -func (*ListGroupMembersRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{6} } +func (*ListGroupMembersRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{6} } func (m *ListGroupMembersRequest) GetName() string { if m != nil { @@ -409,7 +409,7 @@ type ListGroupMembersResponse struct { func (m *ListGroupMembersResponse) Reset() { *m = ListGroupMembersResponse{} } func (m *ListGroupMembersResponse) String() string { return proto.CompactTextString(m) } func (*ListGroupMembersResponse) ProtoMessage() {} -func (*ListGroupMembersResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{7} } +func (*ListGroupMembersResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{7} } func (m *ListGroupMembersResponse) GetMembers() []*google_api4.MonitoredResource { if m != nil { @@ -464,7 +464,7 @@ type GroupServiceClient interface { // You can change any group attributes except `name`. UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error) // Deletes an existing group. - DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) + DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) // Lists the monitored resources that are members of a group. ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error) } @@ -513,8 +513,8 @@ func (c *groupServiceClient) UpdateGroup(ctx context.Context, in *UpdateGroupReq return out, nil } -func (c *groupServiceClient) DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) { - out := new(google_protobuf4.Empty) +func (c *groupServiceClient) DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) { + out := new(google_protobuf5.Empty) err := grpc.Invoke(ctx, "/google.monitoring.v3.GroupService/DeleteGroup", in, out, c.cc, opts...) if err != nil { return nil, err @@ -544,7 +544,7 @@ type GroupServiceServer interface { // You can change any group attributes except `name`. UpdateGroup(context.Context, *UpdateGroupRequest) (*Group, error) // Deletes an existing group. - DeleteGroup(context.Context, *DeleteGroupRequest) (*google_protobuf4.Empty, error) + DeleteGroup(context.Context, *DeleteGroupRequest) (*google_protobuf5.Empty, error) // Lists the monitored resources that are members of a group. ListGroupMembers(context.Context, *ListGroupMembersRequest) (*ListGroupMembersResponse, error) } @@ -694,9 +694,9 @@ var _GroupService_serviceDesc = grpc.ServiceDesc{ Metadata: "google/monitoring/v3/group_service.proto", } -func init() { proto.RegisterFile("google/monitoring/v3/group_service.proto", fileDescriptor2) } +func init() { proto.RegisterFile("google/monitoring/v3/group_service.proto", fileDescriptor4) } -var fileDescriptor2 = []byte{ +var fileDescriptor4 = []byte{ // 826 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x4d, 0x6f, 0xd3, 0x4c, 0x10, 0x7e, 0xdd, 0xa4, 0x69, 0xb2, 0x69, 0xd5, 0x76, 0x55, 0xf5, 0x8d, 0xdc, 0x0f, 0x05, 0xf7, diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go index 533b5373c..99e745139 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go @@ -32,7 +32,7 @@ type Point struct { func (m *Point) Reset() { *m = Point{} } func (m *Point) String() string { return proto.CompactTextString(m) } func (*Point) ProtoMessage() {} -func (*Point) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } +func (*Point) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0} } func (m *Point) GetInterval() *TimeInterval { if m != nil { @@ -90,7 +90,7 @@ type TimeSeries struct { func (m *TimeSeries) Reset() { *m = TimeSeries{} } func (m *TimeSeries) String() string { return proto.CompactTextString(m) } func (*TimeSeries) ProtoMessage() {} -func (*TimeSeries) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} } +func (*TimeSeries) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{1} } func (m *TimeSeries) GetMetric() *google_api5.Metric { if m != nil { @@ -132,9 +132,9 @@ func init() { proto.RegisterType((*TimeSeries)(nil), "google.monitoring.v3.TimeSeries") } -func init() { proto.RegisterFile("google/monitoring/v3/metric.proto", fileDescriptor3) } +func init() { proto.RegisterFile("google/monitoring/v3/metric.proto", fileDescriptor5) } -var fileDescriptor3 = []byte{ +var fileDescriptor5 = []byte{ // 396 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x4a, 0xeb, 0x40, 0x14, 0x86, 0x49, 0x7b, 0x5b, 0x7a, 0x27, 0x70, 0x17, 0xc3, 0x05, 0x43, 0x45, 0x88, 0x15, 0xb4, diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go index 77840f1cf..1673fecb8 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go @@ -9,7 +9,7 @@ import math "math" import _ "google.golang.org/genproto/googleapis/api/annotations" import google_api5 "google.golang.org/genproto/googleapis/api/metric" import google_api4 "google.golang.org/genproto/googleapis/api/monitoredres" -import google_protobuf4 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf5 "github.com/golang/protobuf/ptypes/empty" import google_rpc "google.golang.org/genproto/googleapis/rpc/status" import ( @@ -47,7 +47,7 @@ func (x ListTimeSeriesRequest_TimeSeriesView) String() string { return proto.EnumName(ListTimeSeriesRequest_TimeSeriesView_name, int32(x)) } func (ListTimeSeriesRequest_TimeSeriesView) EnumDescriptor() ([]byte, []int) { - return fileDescriptor4, []int{8, 0} + return fileDescriptor6, []int{8, 0} } // The `ListMonitoredResourceDescriptors` request. @@ -77,7 +77,7 @@ func (m *ListMonitoredResourceDescriptorsRequest) Reset() { func (m *ListMonitoredResourceDescriptorsRequest) String() string { return proto.CompactTextString(m) } func (*ListMonitoredResourceDescriptorsRequest) ProtoMessage() {} func (*ListMonitoredResourceDescriptorsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{0} + return fileDescriptor6, []int{0} } func (m *ListMonitoredResourceDescriptorsRequest) GetName() string { @@ -125,7 +125,7 @@ func (m *ListMonitoredResourceDescriptorsResponse) Reset() { func (m *ListMonitoredResourceDescriptorsResponse) String() string { return proto.CompactTextString(m) } func (*ListMonitoredResourceDescriptorsResponse) ProtoMessage() {} func (*ListMonitoredResourceDescriptorsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{1} + return fileDescriptor6, []int{1} } func (m *ListMonitoredResourceDescriptorsResponse) GetResourceDescriptors() []*google_api4.MonitoredResourceDescriptor { @@ -155,7 +155,7 @@ func (m *GetMonitoredResourceDescriptorRequest) Reset() { *m = GetMonito func (m *GetMonitoredResourceDescriptorRequest) String() string { return proto.CompactTextString(m) } func (*GetMonitoredResourceDescriptorRequest) ProtoMessage() {} func (*GetMonitoredResourceDescriptorRequest) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{2} + return fileDescriptor6, []int{2} } func (m *GetMonitoredResourceDescriptorRequest) GetName() string { @@ -190,7 +190,7 @@ type ListMetricDescriptorsRequest struct { func (m *ListMetricDescriptorsRequest) Reset() { *m = ListMetricDescriptorsRequest{} } func (m *ListMetricDescriptorsRequest) String() string { return proto.CompactTextString(m) } func (*ListMetricDescriptorsRequest) ProtoMessage() {} -func (*ListMetricDescriptorsRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{3} } +func (*ListMetricDescriptorsRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{3} } func (m *ListMetricDescriptorsRequest) GetName() string { if m != nil { @@ -234,7 +234,7 @@ type ListMetricDescriptorsResponse struct { func (m *ListMetricDescriptorsResponse) Reset() { *m = ListMetricDescriptorsResponse{} } func (m *ListMetricDescriptorsResponse) String() string { return proto.CompactTextString(m) } func (*ListMetricDescriptorsResponse) ProtoMessage() {} -func (*ListMetricDescriptorsResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{4} } +func (*ListMetricDescriptorsResponse) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{4} } func (m *ListMetricDescriptorsResponse) GetMetricDescriptors() []*google_api5.MetricDescriptor { if m != nil { @@ -262,7 +262,7 @@ type GetMetricDescriptorRequest struct { func (m *GetMetricDescriptorRequest) Reset() { *m = GetMetricDescriptorRequest{} } func (m *GetMetricDescriptorRequest) String() string { return proto.CompactTextString(m) } func (*GetMetricDescriptorRequest) ProtoMessage() {} -func (*GetMetricDescriptorRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{5} } +func (*GetMetricDescriptorRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{5} } func (m *GetMetricDescriptorRequest) GetName() string { if m != nil { @@ -284,7 +284,7 @@ type CreateMetricDescriptorRequest struct { func (m *CreateMetricDescriptorRequest) Reset() { *m = CreateMetricDescriptorRequest{} } func (m *CreateMetricDescriptorRequest) String() string { return proto.CompactTextString(m) } func (*CreateMetricDescriptorRequest) ProtoMessage() {} -func (*CreateMetricDescriptorRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{6} } +func (*CreateMetricDescriptorRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{6} } func (m *CreateMetricDescriptorRequest) GetName() string { if m != nil { @@ -312,7 +312,7 @@ type DeleteMetricDescriptorRequest struct { func (m *DeleteMetricDescriptorRequest) Reset() { *m = DeleteMetricDescriptorRequest{} } func (m *DeleteMetricDescriptorRequest) String() string { return proto.CompactTextString(m) } func (*DeleteMetricDescriptorRequest) ProtoMessage() {} -func (*DeleteMetricDescriptorRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{7} } +func (*DeleteMetricDescriptorRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{7} } func (m *DeleteMetricDescriptorRequest) GetName() string { if m != nil { @@ -362,7 +362,7 @@ type ListTimeSeriesRequest struct { func (m *ListTimeSeriesRequest) Reset() { *m = ListTimeSeriesRequest{} } func (m *ListTimeSeriesRequest) String() string { return proto.CompactTextString(m) } func (*ListTimeSeriesRequest) ProtoMessage() {} -func (*ListTimeSeriesRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{8} } +func (*ListTimeSeriesRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{8} } func (m *ListTimeSeriesRequest) GetName() string { if m != nil { @@ -433,7 +433,7 @@ type ListTimeSeriesResponse struct { func (m *ListTimeSeriesResponse) Reset() { *m = ListTimeSeriesResponse{} } func (m *ListTimeSeriesResponse) String() string { return proto.CompactTextString(m) } func (*ListTimeSeriesResponse) ProtoMessage() {} -func (*ListTimeSeriesResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{9} } +func (*ListTimeSeriesResponse) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{9} } func (m *ListTimeSeriesResponse) GetTimeSeries() []*TimeSeries { if m != nil { @@ -465,7 +465,7 @@ type CreateTimeSeriesRequest struct { func (m *CreateTimeSeriesRequest) Reset() { *m = CreateTimeSeriesRequest{} } func (m *CreateTimeSeriesRequest) String() string { return proto.CompactTextString(m) } func (*CreateTimeSeriesRequest) ProtoMessage() {} -func (*CreateTimeSeriesRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{10} } +func (*CreateTimeSeriesRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{10} } func (m *CreateTimeSeriesRequest) GetName() string { if m != nil { @@ -495,7 +495,7 @@ type CreateTimeSeriesError struct { func (m *CreateTimeSeriesError) Reset() { *m = CreateTimeSeriesError{} } func (m *CreateTimeSeriesError) String() string { return proto.CompactTextString(m) } func (*CreateTimeSeriesError) ProtoMessage() {} -func (*CreateTimeSeriesError) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{11} } +func (*CreateTimeSeriesError) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{11} } func (m *CreateTimeSeriesError) GetTimeSeries() *TimeSeries { if m != nil { @@ -552,14 +552,14 @@ type MetricServiceClient interface { CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*google_api5.MetricDescriptor, error) // Deletes a metric descriptor. Only user-created // [custom metrics](/monitoring/custom-metrics) can be deleted. - DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) + DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) // Lists time series that match a filter. This method does not require a Stackdriver account. ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error) // Creates or adds data to one or more time series. // The response is empty if all time series in the request were written. // If any time series could not be written, a corresponding failure message is // included in the error response. - CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) + CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) } type metricServiceClient struct { @@ -615,8 +615,8 @@ func (c *metricServiceClient) CreateMetricDescriptor(ctx context.Context, in *Cr return out, nil } -func (c *metricServiceClient) DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) { - out := new(google_protobuf4.Empty) +func (c *metricServiceClient) DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) { + out := new(google_protobuf5.Empty) err := grpc.Invoke(ctx, "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", in, out, c.cc, opts...) if err != nil { return nil, err @@ -633,8 +633,8 @@ func (c *metricServiceClient) ListTimeSeries(ctx context.Context, in *ListTimeSe return out, nil } -func (c *metricServiceClient) CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) { - out := new(google_protobuf4.Empty) +func (c *metricServiceClient) CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) { + out := new(google_protobuf5.Empty) err := grpc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateTimeSeries", in, out, c.cc, opts...) if err != nil { return nil, err @@ -659,14 +659,14 @@ type MetricServiceServer interface { CreateMetricDescriptor(context.Context, *CreateMetricDescriptorRequest) (*google_api5.MetricDescriptor, error) // Deletes a metric descriptor. Only user-created // [custom metrics](/monitoring/custom-metrics) can be deleted. - DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*google_protobuf4.Empty, error) + DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*google_protobuf5.Empty, error) // Lists time series that match a filter. This method does not require a Stackdriver account. ListTimeSeries(context.Context, *ListTimeSeriesRequest) (*ListTimeSeriesResponse, error) // Creates or adds data to one or more time series. // The response is empty if all time series in the request were written. // If any time series could not be written, a corresponding failure message is // included in the error response. - CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*google_protobuf4.Empty, error) + CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*google_protobuf5.Empty, error) } func RegisterMetricServiceServer(s *grpc.Server, srv MetricServiceServer) { @@ -858,9 +858,9 @@ var _MetricService_serviceDesc = grpc.ServiceDesc{ Metadata: "google/monitoring/v3/metric_service.proto", } -func init() { proto.RegisterFile("google/monitoring/v3/metric_service.proto", fileDescriptor4) } +func init() { proto.RegisterFile("google/monitoring/v3/metric_service.proto", fileDescriptor6) } -var fileDescriptor4 = []byte{ +var fileDescriptor6 = []byte{ // 1011 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x57, 0x4d, 0x6f, 0x1b, 0x45, 0x18, 0x66, 0x92, 0x34, 0x1f, 0xaf, 0xd5, 0x90, 0x4e, 0x5b, 0xd7, 0x6c, 0x13, 0xe4, 0x2e, 0x2a, diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go new file mode 100644 index 000000000..32f983d04 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go @@ -0,0 +1,67 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/mutation_record.proto + +package monitoring + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Describes a change made to a configuration. +type MutationRecord struct { + // When the change occurred. + MutateTime *google_protobuf2.Timestamp `protobuf:"bytes,1,opt,name=mutate_time,json=mutateTime" json:"mutate_time,omitempty"` + // The email address of the user making the change. + MutatedBy string `protobuf:"bytes,2,opt,name=mutated_by,json=mutatedBy" json:"mutated_by,omitempty"` +} + +func (m *MutationRecord) Reset() { *m = MutationRecord{} } +func (m *MutationRecord) String() string { return proto.CompactTextString(m) } +func (*MutationRecord) ProtoMessage() {} +func (*MutationRecord) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{0} } + +func (m *MutationRecord) GetMutateTime() *google_protobuf2.Timestamp { + if m != nil { + return m.MutateTime + } + return nil +} + +func (m *MutationRecord) GetMutatedBy() string { + if m != nil { + return m.MutatedBy + } + return "" +} + +func init() { + proto.RegisterType((*MutationRecord)(nil), "google.monitoring.v3.MutationRecord") +} + +func init() { proto.RegisterFile("google/monitoring/v3/mutation_record.proto", fileDescriptor7) } + +var fileDescriptor7 = []byte{ + // 251 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4a, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0xcf, 0xcd, 0xcf, 0xcb, 0x2c, 0xc9, 0x2f, 0xca, 0xcc, 0x4b, 0xd7, 0x2f, 0x33, + 0xd6, 0xcf, 0x2d, 0x2d, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0x8b, 0x2f, 0x4a, 0x4d, 0xce, 0x2f, 0x4a, + 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x81, 0xa8, 0xd5, 0x43, 0xa8, 0xd5, 0x2b, 0x33, + 0x96, 0x92, 0x87, 0x9a, 0x00, 0x56, 0x93, 0x54, 0x9a, 0xa6, 0x5f, 0x92, 0x99, 0x9b, 0x5a, 0x5c, + 0x92, 0x98, 0x5b, 0x00, 0xd1, 0xa6, 0x94, 0xc3, 0xc5, 0xe7, 0x0b, 0x35, 0x2f, 0x08, 0x6c, 0x9c, + 0x90, 0x35, 0x17, 0x37, 0xd8, 0x86, 0xd4, 0x78, 0x90, 0x5a, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x6e, + 0x23, 0x29, 0x3d, 0xa8, 0xf1, 0x30, 0x83, 0xf4, 0x42, 0x60, 0x06, 0x05, 0x71, 0x41, 0x94, 0x83, + 0x04, 0x84, 0x64, 0xb9, 0xa0, 0xbc, 0x94, 0xf8, 0xa4, 0x4a, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xce, + 0x20, 0x4e, 0xa8, 0x88, 0x53, 0xa5, 0xd3, 0x6a, 0x46, 0x2e, 0x89, 0xe4, 0xfc, 0x5c, 0x3d, 0x6c, + 0x6e, 0x75, 0x12, 0x46, 0x75, 0x48, 0x00, 0xc8, 0xa6, 0x00, 0xc6, 0x28, 0x3b, 0xa8, 0xe2, 0xf4, + 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74, 0xfd, 0xf4, 0xd4, 0x3c, 0xb0, 0x3b, 0xf4, + 0x21, 0x52, 0x89, 0x05, 0x99, 0xc5, 0xa8, 0x61, 0x64, 0x8d, 0xe0, 0xad, 0x62, 0x92, 0x72, 0x87, + 0x18, 0xe0, 0x9c, 0x93, 0x5f, 0x9a, 0xa2, 0xe7, 0x8b, 0xb0, 0x33, 0xcc, 0xf8, 0x14, 0x4c, 0x32, + 0x06, 0x2c, 0x19, 0x83, 0x90, 0x8c, 0x09, 0x33, 0x4e, 0x62, 0x03, 0x5b, 0x62, 0x0c, 0x08, 0x00, + 0x00, 0xff, 0xff, 0x95, 0xa7, 0xf3, 0xbd, 0x87, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go new file mode 100644 index 000000000..6d0534efb --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go @@ -0,0 +1,313 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/notification.proto + +package monitoring + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_api3 "google.golang.org/genproto/googleapis/api/label" +import google_protobuf4 "github.com/golang/protobuf/ptypes/wrappers" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Indicates whether the channel has been verified or not. It is illegal +// to specify this field in a +// [`CreateNotificationChannel`][google.monitoring.v3.NotificationChannelService.CreateNotificationChannel] +// or an +// [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel] +// operation. +type NotificationChannel_VerificationStatus int32 + +const ( + // Sentinel value used to indicate that the state is unknown, omitted, or + // is not applicable (as in the case of channels that neither support + // nor require verification in order to function). + NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED NotificationChannel_VerificationStatus = 0 + // The channel has yet to be verified and requires verification to function. + // Note that this state also applies to the case where the verification + // process has been initiated by sending a verification code but where + // the verification code has not been submitted to complete the process. + NotificationChannel_UNVERIFIED NotificationChannel_VerificationStatus = 1 + // It has been proven that notifications can be received on this + // notification channel and that someone on the project has access + // to messages that are delivered to that channel. + NotificationChannel_VERIFIED NotificationChannel_VerificationStatus = 2 +) + +var NotificationChannel_VerificationStatus_name = map[int32]string{ + 0: "VERIFICATION_STATUS_UNSPECIFIED", + 1: "UNVERIFIED", + 2: "VERIFIED", +} +var NotificationChannel_VerificationStatus_value = map[string]int32{ + "VERIFICATION_STATUS_UNSPECIFIED": 0, + "UNVERIFIED": 1, + "VERIFIED": 2, +} + +func (x NotificationChannel_VerificationStatus) String() string { + return proto.EnumName(NotificationChannel_VerificationStatus_name, int32(x)) +} +func (NotificationChannel_VerificationStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor8, []int{1, 0} +} + +// A description of a notification channel. The descriptor includes +// the properties of the channel and the set of labels or fields that +// must be specified to configure channels of a given type. +type NotificationChannelDescriptor struct { + // The full REST resource name for this descriptor. The syntax is: + // + // projects/[PROJECT_ID]/notificationChannelDescriptors/[TYPE] + // + // In the above, `[TYPE]` is the value of the `type` field. + Name string `protobuf:"bytes,6,opt,name=name" json:"name,omitempty"` + // The type of notification channel, such as "email", "sms", etc. + // Notification channel types are globally unique. + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + // A human-readable name for the notification channel type. This + // form of the name is suitable for a user interface. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // A human-readable description of the notification channel + // type. The description may include a description of the properties + // of the channel and pointers to external documentation. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The set of labels that must be defined to identify a particular + // channel of the corresponding type. Each label includes a + // description for how that field should be populated. + Labels []*google_api3.LabelDescriptor `protobuf:"bytes,4,rep,name=labels" json:"labels,omitempty"` + // The tiers that support this notification channel; the project service tier + // must be one of the supported_tiers. + SupportedTiers []ServiceTier `protobuf:"varint,5,rep,packed,name=supported_tiers,json=supportedTiers,enum=google.monitoring.v3.ServiceTier" json:"supported_tiers,omitempty"` +} + +func (m *NotificationChannelDescriptor) Reset() { *m = NotificationChannelDescriptor{} } +func (m *NotificationChannelDescriptor) String() string { return proto.CompactTextString(m) } +func (*NotificationChannelDescriptor) ProtoMessage() {} +func (*NotificationChannelDescriptor) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{0} } + +func (m *NotificationChannelDescriptor) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NotificationChannelDescriptor) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *NotificationChannelDescriptor) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *NotificationChannelDescriptor) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *NotificationChannelDescriptor) GetLabels() []*google_api3.LabelDescriptor { + if m != nil { + return m.Labels + } + return nil +} + +func (m *NotificationChannelDescriptor) GetSupportedTiers() []ServiceTier { + if m != nil { + return m.SupportedTiers + } + return nil +} + +// A `NotificationChannel` is a medium through which an alert is +// delivered when a policy violation is detected. Examples of channels +// include email, SMS, and third-party messaging applications. Fields +// containing sensitive information like authentication tokens or +// contact info are only partially populated on retrieval. +type NotificationChannel struct { + // The type of the notification channel. This field matches the + // value of the [NotificationChannelDescriptor.type][google.monitoring.v3.NotificationChannelDescriptor.type] field. + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + // The full REST resource name for this channel. The syntax is: + // + // projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID] + // + // The `[CHANNEL_ID]` is automatically assigned by the server on creation. + Name string `protobuf:"bytes,6,opt,name=name" json:"name,omitempty"` + // An optional human-readable name for this notification channel. It is + // recommended that you specify a non-empty and unique name in order to + // make it easier to identify the channels in your project, though this is + // not enforced. The display name is limited to 512 Unicode characters. + DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // An optional human-readable description of this notification channel. This + // description may provide additional details, beyond the display + // name, for the channel. This may not exceeed 1024 Unicode characters. + Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` + // Configuration fields that define the channel and its behavior. The + // permissible and required labels are specified in the + // [NotificationChannelDescriptor.labels][google.monitoring.v3.NotificationChannelDescriptor.labels] of the + // `NotificationChannelDescriptor` corresponding to the `type` field. + Labels map[string]string `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // User-supplied key/value data that does not need to conform to + // the corresponding `NotificationChannelDescriptor`'s schema, unlike + // the `labels` field. This field is intended to be used for organizing + // and identifying the `NotificationChannel` objects. + // + // The field can contain up to 64 entries. Each key and value is limited to + // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and + // values can contain only lowercase letters, numerals, underscores, and + // dashes. Keys must begin with a letter. + UserLabels map[string]string `protobuf:"bytes,8,rep,name=user_labels,json=userLabels" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Indicates whether this channel has been verified or not. On a + // [`ListNotificationChannels`][google.monitoring.v3.NotificationChannelService.ListNotificationChannels] + // or + // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] + // operation, this field is expected to be populated. + // + // If the value is `UNVERIFIED`, then it indicates that the channel is + // non-functioning (it both requires verification and lacks verification); + // otherwise, it is assumed that the channel works. + // + // If the channel is neither `VERIFIED` nor `UNVERIFIED`, it implies that + // the channel is of a type that does not require verification or that + // this specific channel has been exempted from verification because it was + // created prior to verification being required for channels of this type. + // + // This field cannot be modified using a standard + // [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel] + // operation. To change the value of this field, you must call + // [`VerifyNotificationChannel`][google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel]. + VerificationStatus NotificationChannel_VerificationStatus `protobuf:"varint,9,opt,name=verification_status,json=verificationStatus,enum=google.monitoring.v3.NotificationChannel_VerificationStatus" json:"verification_status,omitempty"` + // Whether notifications are forwarded to the described channel. This makes + // it possible to disable delivery of notifications to a particular channel + // without removing the channel from all alerting policies that reference + // the channel. This is a more convenient approach when the change is + // temporary and you want to receive notifications from the same set + // of alerting policies on the channel at some point in the future. + Enabled *google_protobuf4.BoolValue `protobuf:"bytes,11,opt,name=enabled" json:"enabled,omitempty"` +} + +func (m *NotificationChannel) Reset() { *m = NotificationChannel{} } +func (m *NotificationChannel) String() string { return proto.CompactTextString(m) } +func (*NotificationChannel) ProtoMessage() {} +func (*NotificationChannel) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{1} } + +func (m *NotificationChannel) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *NotificationChannel) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NotificationChannel) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *NotificationChannel) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *NotificationChannel) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *NotificationChannel) GetUserLabels() map[string]string { + if m != nil { + return m.UserLabels + } + return nil +} + +func (m *NotificationChannel) GetVerificationStatus() NotificationChannel_VerificationStatus { + if m != nil { + return m.VerificationStatus + } + return NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED +} + +func (m *NotificationChannel) GetEnabled() *google_protobuf4.BoolValue { + if m != nil { + return m.Enabled + } + return nil +} + +func init() { + proto.RegisterType((*NotificationChannelDescriptor)(nil), "google.monitoring.v3.NotificationChannelDescriptor") + proto.RegisterType((*NotificationChannel)(nil), "google.monitoring.v3.NotificationChannel") + proto.RegisterEnum("google.monitoring.v3.NotificationChannel_VerificationStatus", NotificationChannel_VerificationStatus_name, NotificationChannel_VerificationStatus_value) +} + +func init() { proto.RegisterFile("google/monitoring/v3/notification.proto", fileDescriptor8) } + +var fileDescriptor8 = []byte{ + // 599 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0xdd, 0x6e, 0xd3, 0x30, + 0x14, 0xc7, 0x49, 0xbb, 0x8e, 0xcd, 0x99, 0xba, 0xe1, 0x4d, 0x28, 0x0a, 0x5f, 0xdd, 0xb8, 0xa0, + 0x57, 0x89, 0xd4, 0x82, 0xc4, 0xf8, 0x92, 0xb6, 0xae, 0x43, 0x45, 0xac, 0x4c, 0xfd, 0x42, 0x9a, + 0x26, 0x55, 0x6e, 0xeb, 0x05, 0x8b, 0xc4, 0x8e, 0x6c, 0x27, 0xa8, 0x0f, 0xc1, 0x63, 0x70, 0x01, + 0x8f, 0xc2, 0x53, 0xa1, 0x38, 0x6e, 0x12, 0xb6, 0x48, 0x8c, 0x3b, 0x9f, 0x73, 0xfe, 0xe7, 0x7f, + 0xce, 0xf9, 0x35, 0x2a, 0x78, 0xe6, 0x31, 0xe6, 0xf9, 0xd8, 0x0d, 0x18, 0x25, 0x92, 0x71, 0x42, + 0x3d, 0x37, 0x6e, 0xbb, 0x94, 0x49, 0x72, 0x45, 0xe6, 0x48, 0x12, 0x46, 0x9d, 0x90, 0x33, 0xc9, + 0xe0, 0x5e, 0x2a, 0x74, 0x72, 0xa1, 0x13, 0xb7, 0xed, 0x87, 0xba, 0x1d, 0x85, 0xc4, 0x45, 0x94, + 0x32, 0xa9, 0x5a, 0x44, 0xda, 0x63, 0xdf, 0x2f, 0x54, 0x7d, 0x34, 0xc3, 0xbe, 0xce, 0xef, 0x97, + 0x0e, 0x9d, 0xb3, 0x20, 0x58, 0x8d, 0xb3, 0x1f, 0x6b, 0x89, 0x8a, 0x66, 0xd1, 0x95, 0xfb, 0x8d, + 0xa3, 0x30, 0xc4, 0x5c, 0x5b, 0x1f, 0x7c, 0xaf, 0x80, 0x47, 0xfd, 0xc2, 0x96, 0x9d, 0x2f, 0x88, + 0x52, 0xec, 0x9f, 0x60, 0x31, 0xe7, 0x24, 0x94, 0x8c, 0x43, 0x08, 0xd6, 0x28, 0x0a, 0xb0, 0xb5, + 0xde, 0x30, 0x9a, 0x9b, 0x03, 0xf5, 0x4e, 0x72, 0x72, 0x19, 0x62, 0xcb, 0x48, 0x73, 0xc9, 0x1b, + 0xee, 0x83, 0xad, 0x05, 0x11, 0xa1, 0x8f, 0x96, 0x53, 0xa5, 0xaf, 0xa8, 0x9a, 0xa9, 0x73, 0xfd, + 0xa4, 0xad, 0x01, 0xcc, 0x85, 0x36, 0x26, 0x8c, 0x5a, 0x55, 0xad, 0xc8, 0x53, 0xb0, 0x0d, 0xd6, + 0xd5, 0x81, 0xc2, 0x5a, 0x6b, 0x54, 0x9b, 0x66, 0xeb, 0x81, 0xa3, 0x71, 0xa1, 0x90, 0x38, 0x1f, + 0x93, 0x4a, 0xbe, 0xd9, 0x40, 0x4b, 0xe1, 0x07, 0xb0, 0x2d, 0xa2, 0x30, 0x64, 0x5c, 0xe2, 0xc5, + 0x54, 0x12, 0xcc, 0x85, 0x55, 0x6b, 0x54, 0x9b, 0xf5, 0xd6, 0xbe, 0x53, 0x06, 0xdb, 0x19, 0x62, + 0x1e, 0x93, 0x39, 0x1e, 0x11, 0xcc, 0x07, 0xf5, 0xac, 0x33, 0x09, 0xc5, 0xc1, 0x8f, 0x1a, 0xd8, + 0x2d, 0xe1, 0x51, 0x7a, 0x71, 0x19, 0x99, 0xeb, 0x14, 0xaa, 0xff, 0xa4, 0xb0, 0x76, 0x93, 0xc2, + 0x59, 0x46, 0xa1, 0xa6, 0x28, 0xbc, 0x28, 0xbf, 0xa3, 0x64, 0xcf, 0x94, 0x91, 0xe8, 0x52, 0xc9, + 0x97, 0x19, 0x9f, 0x0b, 0x60, 0x46, 0x02, 0xf3, 0xa9, 0xf6, 0xdc, 0x50, 0x9e, 0x87, 0xb7, 0xf7, + 0x1c, 0x0b, 0xcc, 0x8b, 0xbe, 0x20, 0xca, 0x12, 0x30, 0x00, 0xbb, 0x31, 0xe6, 0x59, 0xcb, 0x54, + 0x48, 0x24, 0x23, 0x61, 0x6d, 0x36, 0x8c, 0x66, 0xbd, 0xf5, 0xe6, 0xf6, 0x33, 0x26, 0x05, 0x93, + 0xa1, 0xf2, 0x18, 0xc0, 0xf8, 0x46, 0x0e, 0x3e, 0x07, 0x77, 0x31, 0x45, 0x33, 0x1f, 0x2f, 0x2c, + 0xb3, 0x61, 0x34, 0xcd, 0x96, 0xbd, 0x1a, 0xb1, 0xfa, 0xc0, 0x9d, 0x63, 0xc6, 0xfc, 0x09, 0xf2, + 0x23, 0x3c, 0x58, 0x49, 0xed, 0x43, 0x60, 0x16, 0xf6, 0x87, 0x3b, 0xa0, 0xfa, 0x15, 0x2f, 0xf5, + 0x4f, 0x99, 0x3c, 0xe1, 0x1e, 0xa8, 0xc5, 0x49, 0x8b, 0xfe, 0x68, 0xd3, 0xe0, 0x55, 0xe5, 0xa5, + 0x61, 0xbf, 0x05, 0xdb, 0xd7, 0xce, 0xff, 0x9f, 0xf6, 0x83, 0xcf, 0x00, 0xde, 0xbc, 0x0c, 0x3e, + 0x05, 0x4f, 0x26, 0xdd, 0x41, 0xef, 0xb4, 0xd7, 0x39, 0x1a, 0xf5, 0x3e, 0xf5, 0xa7, 0xc3, 0xd1, + 0xd1, 0x68, 0x3c, 0x9c, 0x8e, 0xfb, 0xc3, 0xf3, 0x6e, 0xa7, 0x77, 0xda, 0xeb, 0x9e, 0xec, 0xdc, + 0x81, 0x75, 0x00, 0xc6, 0xfd, 0x54, 0xd6, 0x3d, 0xd9, 0x31, 0xe0, 0x16, 0xd8, 0xc8, 0xa2, 0xca, + 0xf1, 0x4f, 0x03, 0x58, 0x73, 0x16, 0x94, 0x02, 0x3e, 0xbe, 0x57, 0x24, 0x7c, 0x9e, 0x80, 0x39, + 0x37, 0x2e, 0xde, 0x69, 0xa9, 0xc7, 0x7c, 0x44, 0x3d, 0x87, 0x71, 0xcf, 0xf5, 0x30, 0x55, 0xd8, + 0xdc, 0xb4, 0x84, 0x42, 0x22, 0xfe, 0xfe, 0x2f, 0x79, 0x9d, 0x47, 0xbf, 0x2a, 0xf6, 0xfb, 0xd4, + 0xa0, 0xe3, 0xb3, 0x68, 0xe1, 0x9c, 0xe5, 0x13, 0x27, 0xed, 0xdf, 0xab, 0xe2, 0xa5, 0x2a, 0x5e, + 0xe6, 0xc5, 0xcb, 0x49, 0x7b, 0xb6, 0xae, 0x86, 0xb4, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0xdf, + 0xb9, 0x3f, 0x8b, 0x24, 0x05, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go new file mode 100644 index 000000000..ea5cf925c --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go @@ -0,0 +1,1035 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/notification_service.proto + +package monitoring + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf5 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf6 "google.golang.org/genproto/protobuf/field_mask" +import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The `ListNotificationChannelDescriptors` request. +type ListNotificationChannelDescriptorsRequest struct { + // The REST resource name of the parent from which to retrieve + // the notification channel descriptors. The expected syntax is: + // + // projects/[PROJECT_ID] + // + // Note that this names the parent container in which to look for the + // descriptors; to retrieve a single descriptor by name, use the + // [GetNotificationChannelDescriptor][google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor] + // operation, instead. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // The maximum number of results to return in a single response. If + // not set to a positive number, a reasonable value will be chosen by the + // service. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // If non-empty, `page_token` must contain a value returned as the + // `next_page_token` in a previous response to request the next set + // of results. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListNotificationChannelDescriptorsRequest) Reset() { + *m = ListNotificationChannelDescriptorsRequest{} +} +func (m *ListNotificationChannelDescriptorsRequest) String() string { return proto.CompactTextString(m) } +func (*ListNotificationChannelDescriptorsRequest) ProtoMessage() {} +func (*ListNotificationChannelDescriptorsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor9, []int{0} +} + +func (m *ListNotificationChannelDescriptorsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListNotificationChannelDescriptorsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListNotificationChannelDescriptorsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The `ListNotificationChannelDescriptors` response. +type ListNotificationChannelDescriptorsResponse struct { + // The monitored resource descriptors supported for the specified + // project, optionally filtered. + ChannelDescriptors []*NotificationChannelDescriptor `protobuf:"bytes,1,rep,name=channel_descriptors,json=channelDescriptors" json:"channel_descriptors,omitempty"` + // If not empty, indicates that there may be more results that match + // the request. Use the value in the `page_token` field in a + // subsequent request to fetch the next set of results. If empty, + // all results have been returned. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListNotificationChannelDescriptorsResponse) Reset() { + *m = ListNotificationChannelDescriptorsResponse{} +} +func (m *ListNotificationChannelDescriptorsResponse) String() string { + return proto.CompactTextString(m) +} +func (*ListNotificationChannelDescriptorsResponse) ProtoMessage() {} +func (*ListNotificationChannelDescriptorsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor9, []int{1} +} + +func (m *ListNotificationChannelDescriptorsResponse) GetChannelDescriptors() []*NotificationChannelDescriptor { + if m != nil { + return m.ChannelDescriptors + } + return nil +} + +func (m *ListNotificationChannelDescriptorsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The `GetNotificationChannelDescriptor` response. +type GetNotificationChannelDescriptorRequest struct { + // The channel type for which to execute the request. The format is + // `projects/[PROJECT_ID]/notificationChannelDescriptors/{channel_type}`. + Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` +} + +func (m *GetNotificationChannelDescriptorRequest) Reset() { + *m = GetNotificationChannelDescriptorRequest{} +} +func (m *GetNotificationChannelDescriptorRequest) String() string { return proto.CompactTextString(m) } +func (*GetNotificationChannelDescriptorRequest) ProtoMessage() {} +func (*GetNotificationChannelDescriptorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor9, []int{2} +} + +func (m *GetNotificationChannelDescriptorRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `CreateNotificationChannel` request. +type CreateNotificationChannelRequest struct { + // The project on which to execute the request. The format is: + // + // projects/[PROJECT_ID] + // + // Note that this names the container into which the channel will be + // written. This does not name the newly created channel. The resulting + // channel's name will have a normalized version of this field as a prefix, + // but will add `/notificationChannels/[CHANNEL_ID]` to identify the channel. + Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` + // The definition of the `NotificationChannel` to create. + NotificationChannel *NotificationChannel `protobuf:"bytes,2,opt,name=notification_channel,json=notificationChannel" json:"notification_channel,omitempty"` +} + +func (m *CreateNotificationChannelRequest) Reset() { *m = CreateNotificationChannelRequest{} } +func (m *CreateNotificationChannelRequest) String() string { return proto.CompactTextString(m) } +func (*CreateNotificationChannelRequest) ProtoMessage() {} +func (*CreateNotificationChannelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor9, []int{3} +} + +func (m *CreateNotificationChannelRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel { + if m != nil { + return m.NotificationChannel + } + return nil +} + +// The `ListNotificationChannels` request. +type ListNotificationChannelsRequest struct { + // The project on which to execute the request. The format is + // `projects/[PROJECT_ID]`. That is, this names the container + // in which to look for the notification channels; it does not name a + // specific channel. To query a specific channel by REST resource name, use + // the + // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] operation. + Name string `protobuf:"bytes,5,opt,name=name" json:"name,omitempty"` + // If provided, this field specifies the criteria that must be met by + // notification channels to be included in the response. + // + // For more details, see [sorting and + // filtering](/monitoring/api/v3/sorting-and-filtering). + Filter string `protobuf:"bytes,6,opt,name=filter" json:"filter,omitempty"` + // A comma-separated list of fields by which to sort the result. Supports + // the same set of fields as in `filter`. Entries can be prefixed with + // a minus sign to sort in descending rather than ascending order. + // + // For more details, see [sorting and + // filtering](/monitoring/api/v3/sorting-and-filtering). + OrderBy string `protobuf:"bytes,7,opt,name=order_by,json=orderBy" json:"order_by,omitempty"` + // The maximum number of results to return in a single response. If + // not set to a positive number, a reasonable value will be chosen by the + // service. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // If non-empty, `page_token` must contain a value returned as the + // `next_page_token` in a previous response to request the next set + // of results. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListNotificationChannelsRequest) Reset() { *m = ListNotificationChannelsRequest{} } +func (m *ListNotificationChannelsRequest) String() string { return proto.CompactTextString(m) } +func (*ListNotificationChannelsRequest) ProtoMessage() {} +func (*ListNotificationChannelsRequest) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{4} } + +func (m *ListNotificationChannelsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListNotificationChannelsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListNotificationChannelsRequest) GetOrderBy() string { + if m != nil { + return m.OrderBy + } + return "" +} + +func (m *ListNotificationChannelsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListNotificationChannelsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The `ListNotificationChannels` response. +type ListNotificationChannelsResponse struct { + // The notification channels defined for the specified project. + NotificationChannels []*NotificationChannel `protobuf:"bytes,3,rep,name=notification_channels,json=notificationChannels" json:"notification_channels,omitempty"` + // If not empty, indicates that there may be more results that match + // the request. Use the value in the `page_token` field in a + // subsequent request to fetch the next set of results. If empty, + // all results have been returned. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListNotificationChannelsResponse) Reset() { *m = ListNotificationChannelsResponse{} } +func (m *ListNotificationChannelsResponse) String() string { return proto.CompactTextString(m) } +func (*ListNotificationChannelsResponse) ProtoMessage() {} +func (*ListNotificationChannelsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor9, []int{5} +} + +func (m *ListNotificationChannelsResponse) GetNotificationChannels() []*NotificationChannel { + if m != nil { + return m.NotificationChannels + } + return nil +} + +func (m *ListNotificationChannelsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The `GetNotificationChannel` request. +type GetNotificationChannelRequest struct { + // The channel for which to execute the request. The format is + // `projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`. + Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` +} + +func (m *GetNotificationChannelRequest) Reset() { *m = GetNotificationChannelRequest{} } +func (m *GetNotificationChannelRequest) String() string { return proto.CompactTextString(m) } +func (*GetNotificationChannelRequest) ProtoMessage() {} +func (*GetNotificationChannelRequest) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{6} } + +func (m *GetNotificationChannelRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `UpdateNotificationChannel` request. +type UpdateNotificationChannelRequest struct { + // The fields to update. + UpdateMask *google_protobuf6.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` + // A description of the changes to be applied to the specified + // notification channel. The description must provide a definition for + // fields to be updated; the names of these fields should also be + // included in the `update_mask`. + NotificationChannel *NotificationChannel `protobuf:"bytes,3,opt,name=notification_channel,json=notificationChannel" json:"notification_channel,omitempty"` +} + +func (m *UpdateNotificationChannelRequest) Reset() { *m = UpdateNotificationChannelRequest{} } +func (m *UpdateNotificationChannelRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateNotificationChannelRequest) ProtoMessage() {} +func (*UpdateNotificationChannelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor9, []int{7} +} + +func (m *UpdateNotificationChannelRequest) GetUpdateMask() *google_protobuf6.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel { + if m != nil { + return m.NotificationChannel + } + return nil +} + +// The `DeleteNotificationChannel` request. +type DeleteNotificationChannelRequest struct { + // The channel for which to execute the request. The format is + // `projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`. + Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` + // If true, the notification channel will be deleted regardless of its + // use in alert policies (the policies will be updated to remove the + // channel). If false, channels that are still referenced by an existing + // alerting policy will fail to be deleted in a delete operation. + Force bool `protobuf:"varint,5,opt,name=force" json:"force,omitempty"` +} + +func (m *DeleteNotificationChannelRequest) Reset() { *m = DeleteNotificationChannelRequest{} } +func (m *DeleteNotificationChannelRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteNotificationChannelRequest) ProtoMessage() {} +func (*DeleteNotificationChannelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor9, []int{8} +} + +func (m *DeleteNotificationChannelRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DeleteNotificationChannelRequest) GetForce() bool { + if m != nil { + return m.Force + } + return false +} + +// The `SendNotificationChannelVerificationCode` request. +type SendNotificationChannelVerificationCodeRequest struct { + // The notification channel to which to send a verification code. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *SendNotificationChannelVerificationCodeRequest) Reset() { + *m = SendNotificationChannelVerificationCodeRequest{} +} +func (m *SendNotificationChannelVerificationCodeRequest) String() string { + return proto.CompactTextString(m) +} +func (*SendNotificationChannelVerificationCodeRequest) ProtoMessage() {} +func (*SendNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor9, []int{9} +} + +func (m *SendNotificationChannelVerificationCodeRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `GetNotificationChannelVerificationCode` request. +type GetNotificationChannelVerificationCodeRequest struct { + // The notification channel for which a verification code is to be generated + // and retrieved. This must name a channel that is already verified; if + // the specified channel is not verified, the request will fail. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The desired expiration time. If specified, the API will guarantee that + // the returned code will not be valid after the specified timestamp; + // however, the API cannot guarantee that the returned code will be + // valid for at least as long as the requested time (the API puts an upper + // bound on the amount of time for which a code may be valid). If omitted, + // a default expiration will be used, which may be less than the max + // permissible expiration (so specifying an expiration may extend the + // code's lifetime over omitting an expiration, even though the API does + // impose an upper limit on the maximum expiration that is permitted). + ExpireTime *google_protobuf2.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime" json:"expire_time,omitempty"` +} + +func (m *GetNotificationChannelVerificationCodeRequest) Reset() { + *m = GetNotificationChannelVerificationCodeRequest{} +} +func (m *GetNotificationChannelVerificationCodeRequest) String() string { + return proto.CompactTextString(m) +} +func (*GetNotificationChannelVerificationCodeRequest) ProtoMessage() {} +func (*GetNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor9, []int{10} +} + +func (m *GetNotificationChannelVerificationCodeRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetNotificationChannelVerificationCodeRequest) GetExpireTime() *google_protobuf2.Timestamp { + if m != nil { + return m.ExpireTime + } + return nil +} + +// The `GetNotificationChannelVerificationCode` request. +type GetNotificationChannelVerificationCodeResponse struct { + // The verification code, which may be used to verify other channels + // that have an equivalent identity (i.e. other channels of the same + // type with the same fingerprint such as other email channels with + // the same email address or other sms channels with the same number). + Code string `protobuf:"bytes,1,opt,name=code" json:"code,omitempty"` + // The expiration time associated with the code that was returned. If + // an expiration was provided in the request, this is the minimum of the + // requested expiration in the request and the max permitted expiration. + ExpireTime *google_protobuf2.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime" json:"expire_time,omitempty"` +} + +func (m *GetNotificationChannelVerificationCodeResponse) Reset() { + *m = GetNotificationChannelVerificationCodeResponse{} +} +func (m *GetNotificationChannelVerificationCodeResponse) String() string { + return proto.CompactTextString(m) +} +func (*GetNotificationChannelVerificationCodeResponse) ProtoMessage() {} +func (*GetNotificationChannelVerificationCodeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor9, []int{11} +} + +func (m *GetNotificationChannelVerificationCodeResponse) GetCode() string { + if m != nil { + return m.Code + } + return "" +} + +func (m *GetNotificationChannelVerificationCodeResponse) GetExpireTime() *google_protobuf2.Timestamp { + if m != nil { + return m.ExpireTime + } + return nil +} + +// The `VerifyNotificationChannel` request. +type VerifyNotificationChannelRequest struct { + // The notification channel to verify. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The verification code that was delivered to the channel as + // a result of invoking the `SendNotificationChannelVerificationCode` API + // method or that was retrieved from a verified channel via + // `GetNotificationChannelVerificationCode`. For example, one might have + // "G-123456" or "TKNZGhhd2EyN3I1MnRnMjRv" (in general, one is only + // guaranteed that the code is valid UTF-8; one should not + // make any assumptions regarding the structure or format of the code). + Code string `protobuf:"bytes,2,opt,name=code" json:"code,omitempty"` +} + +func (m *VerifyNotificationChannelRequest) Reset() { *m = VerifyNotificationChannelRequest{} } +func (m *VerifyNotificationChannelRequest) String() string { return proto.CompactTextString(m) } +func (*VerifyNotificationChannelRequest) ProtoMessage() {} +func (*VerifyNotificationChannelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor9, []int{12} +} + +func (m *VerifyNotificationChannelRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *VerifyNotificationChannelRequest) GetCode() string { + if m != nil { + return m.Code + } + return "" +} + +func init() { + proto.RegisterType((*ListNotificationChannelDescriptorsRequest)(nil), "google.monitoring.v3.ListNotificationChannelDescriptorsRequest") + proto.RegisterType((*ListNotificationChannelDescriptorsResponse)(nil), "google.monitoring.v3.ListNotificationChannelDescriptorsResponse") + proto.RegisterType((*GetNotificationChannelDescriptorRequest)(nil), "google.monitoring.v3.GetNotificationChannelDescriptorRequest") + proto.RegisterType((*CreateNotificationChannelRequest)(nil), "google.monitoring.v3.CreateNotificationChannelRequest") + proto.RegisterType((*ListNotificationChannelsRequest)(nil), "google.monitoring.v3.ListNotificationChannelsRequest") + proto.RegisterType((*ListNotificationChannelsResponse)(nil), "google.monitoring.v3.ListNotificationChannelsResponse") + proto.RegisterType((*GetNotificationChannelRequest)(nil), "google.monitoring.v3.GetNotificationChannelRequest") + proto.RegisterType((*UpdateNotificationChannelRequest)(nil), "google.monitoring.v3.UpdateNotificationChannelRequest") + proto.RegisterType((*DeleteNotificationChannelRequest)(nil), "google.monitoring.v3.DeleteNotificationChannelRequest") + proto.RegisterType((*SendNotificationChannelVerificationCodeRequest)(nil), "google.monitoring.v3.SendNotificationChannelVerificationCodeRequest") + proto.RegisterType((*GetNotificationChannelVerificationCodeRequest)(nil), "google.monitoring.v3.GetNotificationChannelVerificationCodeRequest") + proto.RegisterType((*GetNotificationChannelVerificationCodeResponse)(nil), "google.monitoring.v3.GetNotificationChannelVerificationCodeResponse") + proto.RegisterType((*VerifyNotificationChannelRequest)(nil), "google.monitoring.v3.VerifyNotificationChannelRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for NotificationChannelService service + +type NotificationChannelServiceClient interface { + // Lists the descriptors for supported channel types. The use of descriptors + // makes it possible for new channel types to be dynamically added. + ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error) + // Gets a single channel descriptor. The descriptor indicates which fields + // are expected / permitted for a notification channel of the given type. + GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error) + // Lists the notification channels that have been created for the project. + ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error) + // Gets a single notification channel. The channel includes the relevant + // configuration details with which the channel was created. However, the + // response may truncate or omit passwords, API keys, or other private key + // matter and thus the response may not be 100% identical to the information + // that was supplied in the call to the create method. + GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) + // Creates a new notification channel, representing a single notification + // endpoint such as an email address, SMS number, or pagerduty service. + CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) + // Updates a notification channel. Fields not specified in the field mask + // remain unchanged. + UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) + // Deletes a notification channel. + DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) + // Causes a verification code to be delivered to the channel. The code + // can then be supplied in `VerifyNotificationChannel` to verify the channel. + SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) + // Requests a verification code for an already verified channel that can then + // be used in a call to VerifyNotificationChannel() on a different channel + // with an equivalent identity in the same or in a different project. This + // makes it possible to copy a channel between projects without requiring + // manual reverification of the channel. If the channel is not in the + // verified state, this method will fail (in other words, this may only be + // used if the SendNotificationChannelVerificationCode and + // VerifyNotificationChannel paths have already been used to put the given + // channel into the verified state). + // + // There is no guarantee that the verification codes returned by this method + // will be of a similar structure or form as the ones that are delivered + // to the channel via SendNotificationChannelVerificationCode; while + // VerifyNotificationChannel() will recognize both the codes delivered via + // SendNotificationChannelVerificationCode() and returned from + // GetNotificationChannelVerificationCode(), it is typically the case that + // the verification codes delivered via + // SendNotificationChannelVerificationCode() will be shorter and also + // have a shorter expiration (e.g. codes such as "G-123456") whereas + // GetVerificationCode() will typically return a much longer, websafe base + // 64 encoded string that has a longer expiration time. + GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) + // Verifies a `NotificationChannel` by proving receipt of the code + // delivered to the channel as a result of calling + // `SendNotificationChannelVerificationCode`. + VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) +} + +type notificationChannelServiceClient struct { + cc *grpc.ClientConn +} + +func NewNotificationChannelServiceClient(cc *grpc.ClientConn) NotificationChannelServiceClient { + return ¬ificationChannelServiceClient{cc} +} + +func (c *notificationChannelServiceClient) ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error) { + out := new(ListNotificationChannelDescriptorsResponse) + err := grpc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error) { + out := new(NotificationChannelDescriptor) + err := grpc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error) { + out := new(ListNotificationChannelsResponse) + err := grpc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := grpc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := grpc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := grpc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) { + out := new(google_protobuf5.Empty) + err := grpc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) { + out := new(google_protobuf5.Empty) + err := grpc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) { + out := new(GetNotificationChannelVerificationCodeResponse) + err := grpc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := grpc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for NotificationChannelService service + +type NotificationChannelServiceServer interface { + // Lists the descriptors for supported channel types. The use of descriptors + // makes it possible for new channel types to be dynamically added. + ListNotificationChannelDescriptors(context.Context, *ListNotificationChannelDescriptorsRequest) (*ListNotificationChannelDescriptorsResponse, error) + // Gets a single channel descriptor. The descriptor indicates which fields + // are expected / permitted for a notification channel of the given type. + GetNotificationChannelDescriptor(context.Context, *GetNotificationChannelDescriptorRequest) (*NotificationChannelDescriptor, error) + // Lists the notification channels that have been created for the project. + ListNotificationChannels(context.Context, *ListNotificationChannelsRequest) (*ListNotificationChannelsResponse, error) + // Gets a single notification channel. The channel includes the relevant + // configuration details with which the channel was created. However, the + // response may truncate or omit passwords, API keys, or other private key + // matter and thus the response may not be 100% identical to the information + // that was supplied in the call to the create method. + GetNotificationChannel(context.Context, *GetNotificationChannelRequest) (*NotificationChannel, error) + // Creates a new notification channel, representing a single notification + // endpoint such as an email address, SMS number, or pagerduty service. + CreateNotificationChannel(context.Context, *CreateNotificationChannelRequest) (*NotificationChannel, error) + // Updates a notification channel. Fields not specified in the field mask + // remain unchanged. + UpdateNotificationChannel(context.Context, *UpdateNotificationChannelRequest) (*NotificationChannel, error) + // Deletes a notification channel. + DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*google_protobuf5.Empty, error) + // Causes a verification code to be delivered to the channel. The code + // can then be supplied in `VerifyNotificationChannel` to verify the channel. + SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*google_protobuf5.Empty, error) + // Requests a verification code for an already verified channel that can then + // be used in a call to VerifyNotificationChannel() on a different channel + // with an equivalent identity in the same or in a different project. This + // makes it possible to copy a channel between projects without requiring + // manual reverification of the channel. If the channel is not in the + // verified state, this method will fail (in other words, this may only be + // used if the SendNotificationChannelVerificationCode and + // VerifyNotificationChannel paths have already been used to put the given + // channel into the verified state). + // + // There is no guarantee that the verification codes returned by this method + // will be of a similar structure or form as the ones that are delivered + // to the channel via SendNotificationChannelVerificationCode; while + // VerifyNotificationChannel() will recognize both the codes delivered via + // SendNotificationChannelVerificationCode() and returned from + // GetNotificationChannelVerificationCode(), it is typically the case that + // the verification codes delivered via + // SendNotificationChannelVerificationCode() will be shorter and also + // have a shorter expiration (e.g. codes such as "G-123456") whereas + // GetVerificationCode() will typically return a much longer, websafe base + // 64 encoded string that has a longer expiration time. + GetNotificationChannelVerificationCode(context.Context, *GetNotificationChannelVerificationCodeRequest) (*GetNotificationChannelVerificationCodeResponse, error) + // Verifies a `NotificationChannel` by proving receipt of the code + // delivered to the channel as a result of calling + // `SendNotificationChannelVerificationCode`. + VerifyNotificationChannel(context.Context, *VerifyNotificationChannelRequest) (*NotificationChannel, error) +} + +func RegisterNotificationChannelServiceServer(s *grpc.Server, srv NotificationChannelServiceServer) { + s.RegisterService(&_NotificationChannelService_serviceDesc, srv) +} + +func _NotificationChannelService_ListNotificationChannelDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNotificationChannelDescriptorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, req.(*ListNotificationChannelDescriptorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_GetNotificationChannelDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationChannelDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, req.(*GetNotificationChannelDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_ListNotificationChannels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNotificationChannelsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, req.(*ListNotificationChannelsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_GetNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, req.(*GetNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_CreateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, req.(*CreateNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_UpdateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, req.(*UpdateNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_DeleteNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, req.(*DeleteNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_SendNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendNotificationChannelVerificationCodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, req.(*SendNotificationChannelVerificationCodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_GetNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationChannelVerificationCodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, req.(*GetNotificationChannelVerificationCodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_VerifyNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VerifyNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, req.(*VerifyNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _NotificationChannelService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.NotificationChannelService", + HandlerType: (*NotificationChannelServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListNotificationChannelDescriptors", + Handler: _NotificationChannelService_ListNotificationChannelDescriptors_Handler, + }, + { + MethodName: "GetNotificationChannelDescriptor", + Handler: _NotificationChannelService_GetNotificationChannelDescriptor_Handler, + }, + { + MethodName: "ListNotificationChannels", + Handler: _NotificationChannelService_ListNotificationChannels_Handler, + }, + { + MethodName: "GetNotificationChannel", + Handler: _NotificationChannelService_GetNotificationChannel_Handler, + }, + { + MethodName: "CreateNotificationChannel", + Handler: _NotificationChannelService_CreateNotificationChannel_Handler, + }, + { + MethodName: "UpdateNotificationChannel", + Handler: _NotificationChannelService_UpdateNotificationChannel_Handler, + }, + { + MethodName: "DeleteNotificationChannel", + Handler: _NotificationChannelService_DeleteNotificationChannel_Handler, + }, + { + MethodName: "SendNotificationChannelVerificationCode", + Handler: _NotificationChannelService_SendNotificationChannelVerificationCode_Handler, + }, + { + MethodName: "GetNotificationChannelVerificationCode", + Handler: _NotificationChannelService_GetNotificationChannelVerificationCode_Handler, + }, + { + MethodName: "VerifyNotificationChannel", + Handler: _NotificationChannelService_VerifyNotificationChannel_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/notification_service.proto", +} + +func init() { proto.RegisterFile("google/monitoring/v3/notification_service.proto", fileDescriptor9) } + +var fileDescriptor9 = []byte{ + // 1011 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x41, 0x6f, 0xdc, 0x44, + 0x14, 0xd6, 0xec, 0x26, 0x69, 0xfa, 0x22, 0x04, 0x9a, 0x86, 0xc8, 0xbb, 0xa5, 0xaa, 0xe5, 0x43, + 0x93, 0xae, 0x8a, 0x2d, 0xad, 0x4b, 0x84, 0x52, 0x52, 0xda, 0x64, 0xdb, 0x22, 0x48, 0x51, 0xb4, + 0x29, 0x91, 0x40, 0x11, 0x2b, 0xc7, 0x7e, 0x6b, 0x4c, 0x76, 0x67, 0x8c, 0x3d, 0x89, 0x9a, 0x56, + 0x95, 0x0a, 0x7f, 0x01, 0xfe, 0x00, 0x12, 0xa7, 0x1e, 0x10, 0x67, 0x50, 0x39, 0x23, 0xae, 0x08, + 0xae, 0x5c, 0xe0, 0x7f, 0x20, 0x8f, 0xbd, 0xd9, 0xcd, 0x66, 0xbc, 0x6b, 0x37, 0xdc, 0x3c, 0xf3, + 0xde, 0xbc, 0xf7, 0xbd, 0xef, 0x7d, 0x9e, 0x67, 0x83, 0xe5, 0x73, 0xee, 0xf7, 0xd0, 0xea, 0x73, + 0x16, 0x08, 0x1e, 0x05, 0xcc, 0xb7, 0x8e, 0x6c, 0x8b, 0x71, 0x11, 0x74, 0x03, 0xd7, 0x11, 0x01, + 0x67, 0x9d, 0x18, 0xa3, 0xa3, 0xc0, 0x45, 0x33, 0x8c, 0xb8, 0xe0, 0x74, 0x31, 0x3d, 0x60, 0x0e, + 0x0f, 0x98, 0x47, 0x76, 0xfd, 0xad, 0x2c, 0x8c, 0x13, 0x06, 0x96, 0xc3, 0x18, 0x17, 0xf2, 0x68, + 0x9c, 0x9e, 0xa9, 0x2f, 0x4f, 0x4d, 0x92, 0x39, 0x5e, 0xce, 0x1c, 0xe5, 0x6a, 0xff, 0xb0, 0x6b, + 0x61, 0x3f, 0x14, 0xc7, 0x99, 0x51, 0x1f, 0x37, 0x76, 0x03, 0xec, 0x79, 0x9d, 0xbe, 0x13, 0x1f, + 0x64, 0x1e, 0x57, 0xc7, 0x3d, 0x44, 0xd0, 0xc7, 0x58, 0x38, 0xfd, 0x30, 0x75, 0x30, 0x9e, 0xc2, + 0xf5, 0xad, 0x20, 0x16, 0x1f, 0x8f, 0x64, 0xde, 0xfc, 0xc2, 0x61, 0x0c, 0x7b, 0x2d, 0x8c, 0xdd, + 0x28, 0x08, 0x05, 0x8f, 0xe2, 0x36, 0x7e, 0x75, 0x88, 0xb1, 0xa0, 0x14, 0x66, 0x98, 0xd3, 0x47, + 0x6d, 0x46, 0x27, 0x2b, 0x17, 0xdb, 0xf2, 0x99, 0x5e, 0x86, 0x8b, 0xa1, 0xe3, 0x63, 0x27, 0x0e, + 0x9e, 0xa0, 0x56, 0xd1, 0xc9, 0xca, 0x6c, 0x7b, 0x3e, 0xd9, 0xd8, 0x09, 0x9e, 0x20, 0xbd, 0x02, + 0x20, 0x8d, 0x82, 0x1f, 0x20, 0xd3, 0xaa, 0xf2, 0x98, 0x74, 0x7f, 0x94, 0x6c, 0x18, 0x3f, 0x13, + 0x68, 0x14, 0xc9, 0x1e, 0x87, 0x9c, 0xc5, 0x48, 0x3d, 0xb8, 0xe4, 0xa6, 0xd6, 0x8e, 0x37, 0x34, + 0x6b, 0x44, 0xaf, 0xae, 0x2c, 0x34, 0x6d, 0x53, 0xd5, 0x06, 0x73, 0x62, 0xe8, 0x36, 0x75, 0xcf, + 0x64, 0xa3, 0xd7, 0xe0, 0x75, 0x86, 0x8f, 0x45, 0x67, 0x04, 0x78, 0x45, 0x02, 0x7f, 0x2d, 0xd9, + 0xde, 0x3e, 0x01, 0xbf, 0x0e, 0xcb, 0x0f, 0x70, 0x32, 0xf4, 0x71, 0xde, 0xaa, 0x43, 0xde, 0x8c, + 0xef, 0x08, 0xe8, 0x9b, 0x11, 0x3a, 0x02, 0x15, 0x21, 0x26, 0x1c, 0xa4, 0x7b, 0xb0, 0x78, 0x4a, + 0x8c, 0x59, 0x09, 0x12, 0xe4, 0x42, 0xf3, 0x7a, 0x61, 0x1a, 0xda, 0x97, 0xd8, 0xd9, 0x4d, 0xe3, + 0x07, 0x02, 0x57, 0x73, 0x5a, 0x72, 0x46, 0x06, 0xb3, 0x23, 0xa8, 0x96, 0x60, 0xae, 0x1b, 0xf4, + 0x04, 0x46, 0xda, 0x9c, 0xdc, 0xcd, 0x56, 0xb4, 0x06, 0xf3, 0x3c, 0xf2, 0x30, 0xea, 0xec, 0x1f, + 0x6b, 0x17, 0xa4, 0xe5, 0x82, 0x5c, 0x6f, 0x1c, 0x9f, 0x56, 0x4e, 0x75, 0xa2, 0x72, 0x66, 0xc6, + 0x95, 0xf3, 0x82, 0x80, 0x9e, 0x0f, 0x33, 0xd3, 0xcb, 0xe7, 0xf0, 0xa6, 0x8a, 0xa9, 0x58, 0xab, + 0x4a, 0xc5, 0x94, 0xa0, 0x6a, 0x51, 0x41, 0x55, 0x71, 0xa5, 0xd8, 0x70, 0x45, 0xad, 0x94, 0x49, + 0xfa, 0x78, 0x49, 0x40, 0xff, 0x24, 0xf4, 0x26, 0xeb, 0xe3, 0x16, 0x2c, 0x1c, 0x4a, 0x1f, 0xf9, + 0xce, 0x67, 0x12, 0xa8, 0x0f, 0xea, 0x1a, 0xbc, 0xf4, 0xe6, 0xfd, 0xe4, 0x5a, 0x78, 0xe8, 0xc4, + 0x07, 0x6d, 0x48, 0xdd, 0x93, 0xe7, 0x5c, 0x21, 0x55, 0xff, 0x17, 0x21, 0x6d, 0x81, 0xde, 0xc2, + 0x1e, 0x96, 0x96, 0xf7, 0x22, 0xcc, 0x76, 0x79, 0xe4, 0xa6, 0xea, 0x9a, 0x6f, 0xa7, 0x0b, 0xa3, + 0x05, 0xe6, 0x0e, 0x32, 0x4f, 0x11, 0x6b, 0x17, 0xa3, 0xe1, 0x16, 0xf7, 0x70, 0x3c, 0x36, 0x19, + 0xe1, 0xf4, 0x39, 0x81, 0xb7, 0xd5, 0x9d, 0x28, 0x11, 0x25, 0x21, 0x1d, 0x1f, 0x87, 0x41, 0x84, + 0x9d, 0xe4, 0x32, 0xcd, 0x25, 0xfd, 0xd1, 0xe0, 0xa6, 0x6d, 0x43, 0xea, 0x9e, 0x6c, 0x18, 0x5f, + 0x13, 0x30, 0x8b, 0x42, 0xc8, 0x64, 0x4c, 0x61, 0xc6, 0xe5, 0xde, 0x09, 0x86, 0xe4, 0xf9, 0x7c, + 0x18, 0x3e, 0x04, 0x5d, 0x26, 0x3b, 0x2e, 0xd0, 0x9a, 0xd1, 0xc2, 0x07, 0x40, 0x2a, 0x43, 0x20, + 0xcd, 0x5f, 0xde, 0x80, 0xba, 0x22, 0xcc, 0x4e, 0x3a, 0x21, 0xe9, 0xbf, 0x04, 0x8c, 0xe9, 0x37, + 0x3c, 0x7d, 0x5f, 0x2d, 0xb6, 0xc2, 0x93, 0xa9, 0x7e, 0xe7, 0xd5, 0x03, 0xa4, 0x2c, 0x1b, 0xef, + 0x7d, 0xf3, 0xc7, 0x3f, 0xdf, 0x56, 0x56, 0xe9, 0xcd, 0x64, 0x10, 0x3f, 0x4d, 0xea, 0x5d, 0x0f, + 0x23, 0xfe, 0x25, 0xba, 0x22, 0xb6, 0x1a, 0xcf, 0x2c, 0x36, 0xb9, 0x80, 0xbf, 0x08, 0xe8, 0xd3, + 0xa6, 0x01, 0x5d, 0x57, 0x83, 0x2c, 0x38, 0x45, 0xea, 0xaf, 0x32, 0xe1, 0x8c, 0xdb, 0xb2, 0xac, + 0x77, 0xe9, 0xaa, 0xaa, 0xac, 0x29, 0x55, 0x59, 0x8d, 0x67, 0xf4, 0x25, 0x01, 0x2d, 0xef, 0xa2, + 0xa5, 0xef, 0x94, 0x62, 0xfd, 0xa4, 0x59, 0xab, 0x65, 0x8f, 0x65, 0x2d, 0x6a, 0xca, 0x5a, 0x6e, + 0xd0, 0x46, 0xe1, 0x16, 0xc5, 0xf4, 0x47, 0x02, 0x4b, 0x6a, 0x82, 0xa9, 0x5d, 0xa6, 0x1d, 0x03, + 0xec, 0xc5, 0xaf, 0x45, 0xe3, 0xa6, 0x84, 0x6b, 0xd2, 0x1b, 0x45, 0xa9, 0x97, 0x84, 0xff, 0x46, + 0xa0, 0x96, 0xfb, 0x5d, 0x40, 0x73, 0xa8, 0x9b, 0xf6, 0x21, 0x51, 0x06, 0xf6, 0x07, 0x12, 0xf6, + 0x86, 0x51, 0x82, 0xe5, 0x35, 0xe5, 0x20, 0xa1, 0x7f, 0x13, 0xa8, 0xe5, 0x8e, 0xb0, 0xbc, 0x52, + 0xa6, 0xcd, 0xbc, 0x32, 0xa5, 0x74, 0x64, 0x29, 0x9f, 0x36, 0xef, 0xa6, 0xa5, 0x28, 0x30, 0x9a, + 0x05, 0xdb, 0x92, 0x53, 0xe1, 0xf7, 0x04, 0x6a, 0xb9, 0x53, 0x2e, 0xaf, 0xc2, 0x69, 0x63, 0xb1, + 0xbe, 0x74, 0xe6, 0x1e, 0xbf, 0x97, 0x7c, 0xf4, 0x0f, 0x04, 0xd5, 0x28, 0x27, 0xa8, 0x3f, 0x09, + 0x2c, 0x17, 0x9c, 0x9d, 0xb4, 0xa5, 0x46, 0x5c, 0x6e, 0xf4, 0xe6, 0xe2, 0xdf, 0x92, 0xf8, 0xef, + 0x1b, 0x77, 0xcb, 0xe0, 0x5f, 0x8b, 0x91, 0x79, 0xe3, 0x99, 0xd6, 0x48, 0x83, 0x3e, 0xaf, 0xc0, + 0xb5, 0x62, 0x93, 0x94, 0x6e, 0x96, 0x79, 0xd3, 0xf3, 0xaa, 0x6a, 0x9d, 0x2f, 0x48, 0x76, 0x87, + 0x7d, 0x24, 0x39, 0xb8, 0x67, 0xdc, 0x29, 0xc5, 0x81, 0x8f, 0x42, 0x45, 0xc1, 0xaf, 0x04, 0x6a, + 0xb9, 0x93, 0x3c, 0x4f, 0x7e, 0xd3, 0x46, 0x7f, 0x99, 0x17, 0x2c, 0x9b, 0x2e, 0x86, 0x5d, 0xaa, + 0x9a, 0x23, 0x89, 0x60, 0x8d, 0x34, 0x36, 0x7e, 0x22, 0xa0, 0xb9, 0xbc, 0xaf, 0x4c, 0xb8, 0xa1, + 0x8d, 0x66, 0xcc, 0x3e, 0x28, 0xb6, 0x13, 0x45, 0x6d, 0x93, 0xcf, 0x6e, 0x67, 0x27, 0x7c, 0xde, + 0x73, 0x98, 0x6f, 0xf2, 0xc8, 0xb7, 0x7c, 0x64, 0x52, 0x6f, 0xd9, 0xff, 0xbb, 0x13, 0x06, 0xf1, + 0xe9, 0xdf, 0xeb, 0x5b, 0xc3, 0xd5, 0x8b, 0x4a, 0xfd, 0x41, 0x1a, 0x60, 0xb3, 0xc7, 0x0f, 0x3d, + 0xf3, 0xe1, 0x30, 0xf1, 0xae, 0xfd, 0xfb, 0xc0, 0xb8, 0x27, 0x8d, 0x7b, 0x43, 0xe3, 0xde, 0xae, + 0xbd, 0x3f, 0x27, 0x93, 0xd8, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x3b, 0xf3, 0x96, 0xf5, 0x27, + 0x10, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go index 042cddd04..9357d352a 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go @@ -51,7 +51,7 @@ var UptimeCheckRegion_value = map[string]int32{ func (x UptimeCheckRegion) String() string { return proto.EnumName(UptimeCheckRegion_name, int32(x)) } -func (UptimeCheckRegion) EnumDescriptor() ([]byte, []int) { return fileDescriptor5, []int{0} } +func (UptimeCheckRegion) EnumDescriptor() ([]byte, []int) { return fileDescriptor10, []int{0} } // The supported resource types that can be used as values of // group_resource.resource_type. gae_app and uptime_url are not allowed @@ -81,7 +81,7 @@ var GroupResourceType_value = map[string]int32{ func (x GroupResourceType) String() string { return proto.EnumName(GroupResourceType_name, int32(x)) } -func (GroupResourceType) EnumDescriptor() ([]byte, []int) { return fileDescriptor5, []int{1} } +func (GroupResourceType) EnumDescriptor() ([]byte, []int) { return fileDescriptor10, []int{1} } // This message configures which resources and services to monitor for // availability. @@ -136,7 +136,7 @@ type UptimeCheckConfig struct { func (m *UptimeCheckConfig) Reset() { *m = UptimeCheckConfig{} } func (m *UptimeCheckConfig) String() string { return proto.CompactTextString(m) } func (*UptimeCheckConfig) ProtoMessage() {} -func (*UptimeCheckConfig) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0} } +func (*UptimeCheckConfig) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{0} } type isUptimeCheckConfig_Resource interface { isUptimeCheckConfig_Resource() @@ -391,7 +391,7 @@ func (m *UptimeCheckConfig_ResourceGroup) Reset() { *m = UptimeCheckConf func (m *UptimeCheckConfig_ResourceGroup) String() string { return proto.CompactTextString(m) } func (*UptimeCheckConfig_ResourceGroup) ProtoMessage() {} func (*UptimeCheckConfig_ResourceGroup) Descriptor() ([]byte, []int) { - return fileDescriptor5, []int{0, 0} + return fileDescriptor10, []int{0, 0} } func (m *UptimeCheckConfig_ResourceGroup) GetGroupId() string { @@ -443,7 +443,7 @@ type UptimeCheckConfig_HttpCheck struct { func (m *UptimeCheckConfig_HttpCheck) Reset() { *m = UptimeCheckConfig_HttpCheck{} } func (m *UptimeCheckConfig_HttpCheck) String() string { return proto.CompactTextString(m) } func (*UptimeCheckConfig_HttpCheck) ProtoMessage() {} -func (*UptimeCheckConfig_HttpCheck) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0, 1} } +func (*UptimeCheckConfig_HttpCheck) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{0, 1} } func (m *UptimeCheckConfig_HttpCheck) GetUseSsl() bool { if m != nil { @@ -505,7 +505,7 @@ func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) String() string { } func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoMessage() {} func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) Descriptor() ([]byte, []int) { - return fileDescriptor5, []int{0, 1, 0} + return fileDescriptor10, []int{0, 1, 0} } func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetUsername() string { @@ -533,7 +533,7 @@ type UptimeCheckConfig_TcpCheck struct { func (m *UptimeCheckConfig_TcpCheck) Reset() { *m = UptimeCheckConfig_TcpCheck{} } func (m *UptimeCheckConfig_TcpCheck) String() string { return proto.CompactTextString(m) } func (*UptimeCheckConfig_TcpCheck) ProtoMessage() {} -func (*UptimeCheckConfig_TcpCheck) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0, 2} } +func (*UptimeCheckConfig_TcpCheck) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{0, 2} } func (m *UptimeCheckConfig_TcpCheck) GetPort() int32 { if m != nil { @@ -554,7 +554,7 @@ func (m *UptimeCheckConfig_ContentMatcher) Reset() { *m = UptimeCheckCon func (m *UptimeCheckConfig_ContentMatcher) String() string { return proto.CompactTextString(m) } func (*UptimeCheckConfig_ContentMatcher) ProtoMessage() {} func (*UptimeCheckConfig_ContentMatcher) Descriptor() ([]byte, []int) { - return fileDescriptor5, []int{0, 3} + return fileDescriptor10, []int{0, 3} } func (m *UptimeCheckConfig_ContentMatcher) GetContent() string { @@ -583,7 +583,7 @@ func (m *UptimeCheckConfig_InternalChecker) Reset() { *m = UptimeCheckCo func (m *UptimeCheckConfig_InternalChecker) String() string { return proto.CompactTextString(m) } func (*UptimeCheckConfig_InternalChecker) ProtoMessage() {} func (*UptimeCheckConfig_InternalChecker) Descriptor() ([]byte, []int) { - return fileDescriptor5, []int{0, 4} + return fileDescriptor10, []int{0, 4} } func (m *UptimeCheckConfig_InternalChecker) GetProjectId() string { @@ -641,7 +641,7 @@ type UptimeCheckIp struct { func (m *UptimeCheckIp) Reset() { *m = UptimeCheckIp{} } func (m *UptimeCheckIp) String() string { return proto.CompactTextString(m) } func (*UptimeCheckIp) ProtoMessage() {} -func (*UptimeCheckIp) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{1} } +func (*UptimeCheckIp) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{1} } func (m *UptimeCheckIp) GetRegion() UptimeCheckRegion { if m != nil { @@ -677,9 +677,9 @@ func init() { proto.RegisterEnum("google.monitoring.v3.GroupResourceType", GroupResourceType_name, GroupResourceType_value) } -func init() { proto.RegisterFile("google/monitoring/v3/uptime.proto", fileDescriptor5) } +func init() { proto.RegisterFile("google/monitoring/v3/uptime.proto", fileDescriptor10) } -var fileDescriptor5 = []byte{ +var fileDescriptor10 = []byte{ // 1021 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xdd, 0x4e, 0xe3, 0x46, 0x14, 0x5e, 0x13, 0xc8, 0xcf, 0x21, 0xb0, 0x66, 0x4a, 0xdb, 0x60, 0x89, 0x15, 0xbb, 0xbd, 0x28, diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go index 5e2fb2a7c..a2ee162d5 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go @@ -7,8 +7,8 @@ import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import _ "google.golang.org/genproto/googleapis/api/annotations" -import google_protobuf4 "github.com/golang/protobuf/ptypes/empty" -import google_protobuf5 "google.golang.org/genproto/protobuf/field_mask" +import google_protobuf5 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf6 "google.golang.org/genproto/protobuf/field_mask" import ( context "golang.org/x/net/context" @@ -40,7 +40,7 @@ type ListUptimeCheckConfigsRequest struct { func (m *ListUptimeCheckConfigsRequest) Reset() { *m = ListUptimeCheckConfigsRequest{} } func (m *ListUptimeCheckConfigsRequest) String() string { return proto.CompactTextString(m) } func (*ListUptimeCheckConfigsRequest) ProtoMessage() {} -func (*ListUptimeCheckConfigsRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{0} } +func (*ListUptimeCheckConfigsRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{0} } func (m *ListUptimeCheckConfigsRequest) GetParent() string { if m != nil { @@ -78,7 +78,7 @@ type ListUptimeCheckConfigsResponse struct { func (m *ListUptimeCheckConfigsResponse) Reset() { *m = ListUptimeCheckConfigsResponse{} } func (m *ListUptimeCheckConfigsResponse) String() string { return proto.CompactTextString(m) } func (*ListUptimeCheckConfigsResponse) ProtoMessage() {} -func (*ListUptimeCheckConfigsResponse) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{1} } +func (*ListUptimeCheckConfigsResponse) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{1} } func (m *ListUptimeCheckConfigsResponse) GetUptimeCheckConfigs() []*UptimeCheckConfig { if m != nil { @@ -105,7 +105,7 @@ type GetUptimeCheckConfigRequest struct { func (m *GetUptimeCheckConfigRequest) Reset() { *m = GetUptimeCheckConfigRequest{} } func (m *GetUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) } func (*GetUptimeCheckConfigRequest) ProtoMessage() {} -func (*GetUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{2} } +func (*GetUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{2} } func (m *GetUptimeCheckConfigRequest) GetName() string { if m != nil { @@ -127,7 +127,7 @@ type CreateUptimeCheckConfigRequest struct { func (m *CreateUptimeCheckConfigRequest) Reset() { *m = CreateUptimeCheckConfigRequest{} } func (m *CreateUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) } func (*CreateUptimeCheckConfigRequest) ProtoMessage() {} -func (*CreateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{3} } +func (*CreateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{3} } func (m *CreateUptimeCheckConfigRequest) GetParent() string { if m != nil { @@ -149,7 +149,7 @@ type UpdateUptimeCheckConfigRequest struct { // configuration are updated with values from the new configuration. If this // field is empty, then the current configuration is completely replaced with // the new configuration. - UpdateMask *google_protobuf5.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` + UpdateMask *google_protobuf6.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` // Required. If an `"updateMask"` has been specified, this field gives // the values for the set of fields mentioned in the `"updateMask"`. If an // `"updateMask"` has not been given, this uptime check configuration replaces @@ -163,9 +163,9 @@ type UpdateUptimeCheckConfigRequest struct { func (m *UpdateUptimeCheckConfigRequest) Reset() { *m = UpdateUptimeCheckConfigRequest{} } func (m *UpdateUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) } func (*UpdateUptimeCheckConfigRequest) ProtoMessage() {} -func (*UpdateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{4} } +func (*UpdateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{4} } -func (m *UpdateUptimeCheckConfigRequest) GetUpdateMask() *google_protobuf5.FieldMask { +func (m *UpdateUptimeCheckConfigRequest) GetUpdateMask() *google_protobuf6.FieldMask { if m != nil { return m.UpdateMask } @@ -190,7 +190,7 @@ type DeleteUptimeCheckConfigRequest struct { func (m *DeleteUptimeCheckConfigRequest) Reset() { *m = DeleteUptimeCheckConfigRequest{} } func (m *DeleteUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) } func (*DeleteUptimeCheckConfigRequest) ProtoMessage() {} -func (*DeleteUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{5} } +func (*DeleteUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{5} } func (m *DeleteUptimeCheckConfigRequest) GetName() string { if m != nil { @@ -217,7 +217,7 @@ type ListUptimeCheckIpsRequest struct { func (m *ListUptimeCheckIpsRequest) Reset() { *m = ListUptimeCheckIpsRequest{} } func (m *ListUptimeCheckIpsRequest) String() string { return proto.CompactTextString(m) } func (*ListUptimeCheckIpsRequest) ProtoMessage() {} -func (*ListUptimeCheckIpsRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{6} } +func (*ListUptimeCheckIpsRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{6} } func (m *ListUptimeCheckIpsRequest) GetPageSize() int32 { if m != nil { @@ -250,7 +250,7 @@ type ListUptimeCheckIpsResponse struct { func (m *ListUptimeCheckIpsResponse) Reset() { *m = ListUptimeCheckIpsResponse{} } func (m *ListUptimeCheckIpsResponse) String() string { return proto.CompactTextString(m) } func (*ListUptimeCheckIpsResponse) ProtoMessage() {} -func (*ListUptimeCheckIpsResponse) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{7} } +func (*ListUptimeCheckIpsResponse) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{7} } func (m *ListUptimeCheckIpsResponse) GetUptimeCheckIps() []*UptimeCheckIp { if m != nil { @@ -303,7 +303,7 @@ type UptimeCheckServiceClient interface { // Deletes an uptime check configuration. Note that this method will fail // if the uptime check configuration is referenced by an alert policy or // other dependent configs that would be rendered invalid by the deletion. - DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) + DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) // Returns the list of IPs that checkers run from ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error) } @@ -352,8 +352,8 @@ func (c *uptimeCheckServiceClient) UpdateUptimeCheckConfig(ctx context.Context, return out, nil } -func (c *uptimeCheckServiceClient) DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) { - out := new(google_protobuf4.Empty) +func (c *uptimeCheckServiceClient) DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) { + out := new(google_protobuf5.Empty) err := grpc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", in, out, c.cc, opts...) if err != nil { return nil, err @@ -388,7 +388,7 @@ type UptimeCheckServiceServer interface { // Deletes an uptime check configuration. Note that this method will fail // if the uptime check configuration is referenced by an alert policy or // other dependent configs that would be rendered invalid by the deletion. - DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*google_protobuf4.Empty, error) + DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*google_protobuf5.Empty, error) // Returns the list of IPs that checkers run from ListUptimeCheckIps(context.Context, *ListUptimeCheckIpsRequest) (*ListUptimeCheckIpsResponse, error) } @@ -538,9 +538,9 @@ var _UptimeCheckService_serviceDesc = grpc.ServiceDesc{ Metadata: "google/monitoring/v3/uptime_service.proto", } -func init() { proto.RegisterFile("google/monitoring/v3/uptime_service.proto", fileDescriptor6) } +func init() { proto.RegisterFile("google/monitoring/v3/uptime_service.proto", fileDescriptor11) } -var fileDescriptor6 = []byte{ +var fileDescriptor11 = []byte{ // 735 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdf, 0x4e, 0x13, 0x4f, 0x14, 0xce, 0xb4, 0xfc, 0x08, 0x1c, 0xf2, 0xf3, 0xcf, 0xd8, 0x40, 0x5d, 0xa4, 0xa9, 0x35, 0x51, diff --git a/vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2beta2/dlp.pb.go b/vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2beta2/dlp.pb.go index 5a89152f2..fe6e1f454 100644 --- a/vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2beta2/dlp.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2beta2/dlp.pb.go @@ -40,6 +40,8 @@ It has these top-level messages: AnalyzeDataSourceRiskDetails ValueFrequency Value + QuoteInfo + DateTime DeidentifyConfig PrimitiveTransformation TimePartConfig @@ -56,6 +58,7 @@ It has these top-level messages: TransientCryptoKey UnwrappedCryptoKey KmsWrappedCryptoKey + DateShiftConfig InfoTypeTransformations FieldTransformation RecordTransformations @@ -63,14 +66,23 @@ It has these top-level messages: RecordCondition TransformationOverview TransformationSummary + Schedule InspectTemplate DeidentifyTemplate + JobTrigger + Action CreateInspectTemplateRequest UpdateInspectTemplateRequest GetInspectTemplateRequest ListInspectTemplatesRequest ListInspectTemplatesResponse DeleteInspectTemplateRequest + CreateJobTriggerRequest + UpdateJobTriggerRequest + GetJobTriggerRequest + ListJobTriggersRequest + ListJobTriggersResponse + DeleteJobTriggerRequest InspectJobConfig DlpJob GetDlpJobRequest @@ -93,6 +105,7 @@ It has these top-level messages: CloudStorageOptions BigQueryOptions StorageConfig + BigQueryKey CloudStorageKey DatastoreKey Key @@ -106,13 +119,13 @@ import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import _ "google.golang.org/genproto/googleapis/api/annotations" -import _ "github.com/golang/protobuf/ptypes/duration" +import google_protobuf2 "github.com/golang/protobuf/ptypes/duration" import google_protobuf3 "github.com/golang/protobuf/ptypes/empty" import google_protobuf4 "google.golang.org/genproto/protobuf/field_mask" import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp" import google_rpc "google.golang.org/genproto/googleapis/rpc/status" import google_type "google.golang.org/genproto/googleapis/type/date" -import _ "google.golang.org/genproto/googleapis/type/dayofweek" +import google_type1 "google.golang.org/genproto/googleapis/type/dayofweek" import google_type2 "google.golang.org/genproto/googleapis/type/timeofday" import ( @@ -232,6 +245,48 @@ func (x DlpJobType) String() string { } func (DlpJobType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +// Predefined schemas for storing findings. +type OutputStorageConfig_OutputSchema int32 + +const ( + OutputStorageConfig_OUTPUT_SCHEMA_UNSPECIFIED OutputStorageConfig_OutputSchema = 0 + // Basic schema including only `info_type`, `quote`, `certainty`, and + // `timestamp`. + OutputStorageConfig_BASIC_COLUMNS OutputStorageConfig_OutputSchema = 1 + // Schema tailored to findings from scanning Google Cloud Storage. + OutputStorageConfig_GCS_COLUMNS OutputStorageConfig_OutputSchema = 2 + // Schema tailored to findings from scanning Google Datastore. + OutputStorageConfig_DATASTORE_COLUMNS OutputStorageConfig_OutputSchema = 3 + // Schema tailored to findings from scanning Google BigQuery. + OutputStorageConfig_BIG_QUERY_COLUMNS OutputStorageConfig_OutputSchema = 4 + // Schema containing all columns. + OutputStorageConfig_ALL_COLUMNS OutputStorageConfig_OutputSchema = 5 +) + +var OutputStorageConfig_OutputSchema_name = map[int32]string{ + 0: "OUTPUT_SCHEMA_UNSPECIFIED", + 1: "BASIC_COLUMNS", + 2: "GCS_COLUMNS", + 3: "DATASTORE_COLUMNS", + 4: "BIG_QUERY_COLUMNS", + 5: "ALL_COLUMNS", +} +var OutputStorageConfig_OutputSchema_value = map[string]int32{ + "OUTPUT_SCHEMA_UNSPECIFIED": 0, + "BASIC_COLUMNS": 1, + "GCS_COLUMNS": 2, + "DATASTORE_COLUMNS": 3, + "BIG_QUERY_COLUMNS": 4, + "ALL_COLUMNS": 5, +} + +func (x OutputStorageConfig_OutputSchema) String() string { + return proto.EnumName(OutputStorageConfig_OutputSchema_name, int32(x)) +} +func (OutputStorageConfig_OutputSchema) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{19, 0} +} + type TimePartConfig_TimePart int32 const ( @@ -272,7 +327,7 @@ var TimePartConfig_TimePart_value = map[string]int32{ func (x TimePartConfig_TimePart) String() string { return proto.EnumName(TimePartConfig_TimePart_name, int32(x)) } -func (TimePartConfig_TimePart) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{33, 0} } +func (TimePartConfig_TimePart) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{35, 0} } type CharsToIgnore_CommonCharsToIgnore int32 @@ -311,7 +366,7 @@ func (x CharsToIgnore_CommonCharsToIgnore) String() string { return proto.EnumName(CharsToIgnore_CommonCharsToIgnore_name, int32(x)) } func (CharsToIgnore_CommonCharsToIgnore) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{38, 0} + return fileDescriptor0, []int{40, 0} } // These are commonly used subsets of the alphabet that the FFX mode @@ -350,7 +405,7 @@ func (x CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet) String() string { return proto.EnumName(CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet_name, int32(x)) } func (CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{42, 0} + return fileDescriptor0, []int{44, 0} } type RecordCondition_Expressions_LogicalOperator int32 @@ -373,7 +428,7 @@ func (x RecordCondition_Expressions_LogicalOperator) String() string { return proto.EnumName(RecordCondition_Expressions_LogicalOperator_name, int32(x)) } func (RecordCondition_Expressions_LogicalOperator) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{51, 2, 0} + return fileDescriptor0, []int{54, 2, 0} } // Possible outcomes of transformations. @@ -400,9 +455,43 @@ func (x TransformationSummary_TransformationResultCode) String() string { return proto.EnumName(TransformationSummary_TransformationResultCode_name, int32(x)) } func (TransformationSummary_TransformationResultCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{53, 0} + return fileDescriptor0, []int{56, 0} } +// Whether the trigger is currently active. If PAUSED or CANCELLED, no jobs +// will be created with this configuration. The service may automatically +// pause triggers experiencing frequent errors. To restart a job, set the +// status to HEALTHY after correcting user errors. +type JobTrigger_Status int32 + +const ( + JobTrigger_STATUS_UNSPECIFIED JobTrigger_Status = 0 + // Trigger is healthy. + JobTrigger_HEALTHY JobTrigger_Status = 1 + // Trigger is temporarily paused. + JobTrigger_PAUSED JobTrigger_Status = 2 + // Trigger is cancelled and can not be resumed. + JobTrigger_CANCELLED JobTrigger_Status = 3 +) + +var JobTrigger_Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "HEALTHY", + 2: "PAUSED", + 3: "CANCELLED", +} +var JobTrigger_Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "HEALTHY": 1, + "PAUSED": 2, + "CANCELLED": 3, +} + +func (x JobTrigger_Status) String() string { + return proto.EnumName(JobTrigger_Status_name, int32(x)) +} +func (JobTrigger_Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{60, 0} } + type DlpJob_JobState int32 const ( @@ -439,7 +528,7 @@ var DlpJob_JobState_value = map[string]int32{ func (x DlpJob_JobState) String() string { return proto.EnumName(DlpJob_JobState_name, int32(x)) } -func (DlpJob_JobState) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{63, 0} } +func (DlpJob_JobState) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{75, 0} } // Configuration description of the scanning process. // When used with redactContent only info_types and min_likelihood are currently @@ -814,7 +903,9 @@ func (m *InspectResult) GetFindingsTruncated() bool { type Finding struct { // The content that was found. Even if the content is not textual, it // may be converted to a textual representation here. - // Provided if requested by the `InspectConfig`. + // Provided if requested by the `InspectConfig` and the finding is + // less than or equal to 4096 bytes long. If the finding exceeds 4096 bytes + // in length, the quote may be omitted. Quote string `protobuf:"bytes,1,opt,name=quote" json:"quote,omitempty"` // The type of content that might have been found. // Provided if requested by the `InspectConfig`. @@ -825,6 +916,8 @@ type Finding struct { Location *Location `protobuf:"bytes,4,opt,name=location" json:"location,omitempty"` // Timestamp when finding was detected. CreateTime *google_protobuf1.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // InfoType-dependent details parsed from quote. + QuoteInfo *QuoteInfo `protobuf:"bytes,7,opt,name=quote_info,json=quoteInfo" json:"quote_info,omitempty"` } func (m *Finding) Reset() { *m = Finding{} } @@ -867,6 +960,13 @@ func (m *Finding) GetCreateTime() *google_protobuf1.Timestamp { return nil } +func (m *Finding) GetQuoteInfo() *QuoteInfo { + if m != nil { + return m.QuoteInfo + } + return nil +} + // Specifies the location of the finding. type Location struct { // Zero-based byte offsets delimiting the finding. @@ -1577,6 +1677,12 @@ type InspectDataSourceRequest struct { Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` // A configuration for the job. JobConfig *InspectJobConfig `protobuf:"bytes,2,opt,name=job_config,json=jobConfig" json:"job_config,omitempty"` + // Optional job ID to use for the created job. If not provided, a job ID will + // automatically be generated. Must be unique within the project. The job ID + // can contain uppercase and lowercase letters, numbers, and hyphens; that is, + // it must match the regular expression: `[a-zA-Z\\d-]+`. The maximum length + // is 100 characters. Can be empty to allow the system to generate one. + JobId string `protobuf:"bytes,3,opt,name=job_id,json=jobId" json:"job_id,omitempty"` } func (m *InspectDataSourceRequest) Reset() { *m = InspectDataSourceRequest{} } @@ -1598,11 +1704,26 @@ func (m *InspectDataSourceRequest) GetJobConfig() *InspectJobConfig { return nil } +func (m *InspectDataSourceRequest) GetJobId() string { + if m != nil { + return m.JobId + } + return "" +} + // Cloud repository for storing output. type OutputStorageConfig struct { // Types that are valid to be assigned to Type: // *OutputStorageConfig_Table Type isOutputStorageConfig_Type `protobuf_oneof:"type"` + // Schema used for writing the findings. Columns are derived from the + // `Finding` object. If appending to an existing table, any columns from the + // predefined schema that are missing will be added. No columns in the + // existing table will be deleted. + // + // If unspecified, then all available columns will be used for a new table, + // and no changes will be made to an existing table. + OutputSchema OutputStorageConfig_OutputSchema `protobuf:"varint,3,opt,name=output_schema,json=outputSchema,enum=google.privacy.dlp.v2beta2.OutputStorageConfig_OutputSchema" json:"output_schema,omitempty"` } func (m *OutputStorageConfig) Reset() { *m = OutputStorageConfig{} } @@ -1634,6 +1755,13 @@ func (m *OutputStorageConfig) GetTable() *BigQueryTable { return nil } +func (m *OutputStorageConfig) GetOutputSchema() OutputStorageConfig_OutputSchema { + if m != nil { + return m.OutputSchema + } + return OutputStorageConfig_OUTPUT_SCHEMA_UNSPECIFIED +} + // XXX_OneofFuncs is for the internal use of the proto package. func (*OutputStorageConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _OutputStorageConfig_OneofMarshaler, _OutputStorageConfig_OneofUnmarshaler, _OutputStorageConfig_OneofSizer, []interface{}{ @@ -1901,6 +2029,12 @@ type AnalyzeDataSourceRiskRequest struct { Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` // Configuration for this risk analysis job. JobConfig *RiskAnalysisJobConfig `protobuf:"bytes,2,opt,name=job_config,json=jobConfig" json:"job_config,omitempty"` + // Optional job ID to use for the created job. If not provided, a job ID will + // automatically be generated. Must be unique within the project. The job ID + // can contain uppercase and lowercase letters, numbers, and hyphens; that is, + // it must match the regular expression: `[a-zA-Z\\d-]+`. The maximum length + // is 100 characters. Can be empty to allow the system to generate one. + JobId string `protobuf:"bytes,3,opt,name=job_id,json=jobId" json:"job_id,omitempty"` } func (m *AnalyzeDataSourceRiskRequest) Reset() { *m = AnalyzeDataSourceRiskRequest{} } @@ -1922,12 +2056,22 @@ func (m *AnalyzeDataSourceRiskRequest) GetJobConfig() *RiskAnalysisJobConfig { return nil } +func (m *AnalyzeDataSourceRiskRequest) GetJobId() string { + if m != nil { + return m.JobId + } + return "" +} + // Configuration for a risk analysis job. type RiskAnalysisJobConfig struct { // Privacy metric to compute. PrivacyMetric *PrivacyMetric `protobuf:"bytes,1,opt,name=privacy_metric,json=privacyMetric" json:"privacy_metric,omitempty"` // Input dataset to compute metrics over. SourceTable *BigQueryTable `protobuf:"bytes,2,opt,name=source_table,json=sourceTable" json:"source_table,omitempty"` + // Actions to execute at the completion of the job. Are executed in the order + // provided. + Actions []*Action `protobuf:"bytes,3,rep,name=actions" json:"actions,omitempty"` } func (m *RiskAnalysisJobConfig) Reset() { *m = RiskAnalysisJobConfig{} } @@ -1949,6 +2093,13 @@ func (m *RiskAnalysisJobConfig) GetSourceTable() *BigQueryTable { return nil } +func (m *RiskAnalysisJobConfig) GetActions() []*Action { + if m != nil { + return m.Actions + } + return nil +} + // Privacy metric to compute for reidentification risk analysis. type PrivacyMetric struct { // Types that are valid to be assigned to Type: @@ -3346,6 +3497,7 @@ type Value struct { // *Value_TimestampValue // *Value_TimeValue // *Value_DateValue + // *Value_DayOfWeekValue Type isValue_Type `protobuf_oneof:"type"` } @@ -3379,6 +3531,9 @@ type Value_TimeValue struct { type Value_DateValue struct { DateValue *google_type.Date `protobuf:"bytes,7,opt,name=date_value,json=dateValue,oneof"` } +type Value_DayOfWeekValue struct { + DayOfWeekValue google_type1.DayOfWeek `protobuf:"varint,8,opt,name=day_of_week_value,json=dayOfWeekValue,enum=google.type.DayOfWeek,oneof"` +} func (*Value_IntegerValue) isValue_Type() {} func (*Value_FloatValue) isValue_Type() {} @@ -3387,6 +3542,7 @@ func (*Value_BooleanValue) isValue_Type() {} func (*Value_TimestampValue) isValue_Type() {} func (*Value_TimeValue) isValue_Type() {} func (*Value_DateValue) isValue_Type() {} +func (*Value_DayOfWeekValue) isValue_Type() {} func (m *Value) GetType() isValue_Type { if m != nil { @@ -3444,6 +3600,13 @@ func (m *Value) GetDateValue() *google_type.Date { return nil } +func (m *Value) GetDayOfWeekValue() google_type1.DayOfWeek { + if x, ok := m.GetType().(*Value_DayOfWeekValue); ok { + return x.DayOfWeekValue + } + return google_type1.DayOfWeek_DAY_OF_WEEK_UNSPECIFIED +} + // XXX_OneofFuncs is for the internal use of the proto package. func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ @@ -3454,6 +3617,7 @@ func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, (*Value_TimestampValue)(nil), (*Value_TimeValue)(nil), (*Value_DateValue)(nil), + (*Value_DayOfWeekValue)(nil), } } @@ -3492,6 +3656,9 @@ func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { if err := b.EncodeMessage(x.DateValue); err != nil { return err } + case *Value_DayOfWeekValue: + b.EncodeVarint(8<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.DayOfWeekValue)) case nil: default: return fmt.Errorf("Value.Type has unexpected type %T", x) @@ -3554,6 +3721,13 @@ func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) err := b.DecodeMessage(msg) m.Type = &Value_DateValue{msg} return true, err + case 8: // type.day_of_week_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Type = &Value_DayOfWeekValue{google_type1.DayOfWeek(x)} + return true, err default: return false, nil } @@ -3591,6 +3765,9 @@ func _Value_OneofSizer(msg proto.Message) (n int) { n += proto.SizeVarint(7<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s + case *Value_DayOfWeekValue: + n += proto.SizeVarint(8<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.DayOfWeekValue)) case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) @@ -3598,6 +3775,160 @@ func _Value_OneofSizer(msg proto.Message) (n int) { return n } +// Message for infoType-dependent details parsed from quote. +type QuoteInfo struct { + // Object representation of the quote. + // + // Types that are valid to be assigned to ParsedQuote: + // *QuoteInfo_DateTime + ParsedQuote isQuoteInfo_ParsedQuote `protobuf_oneof:"parsed_quote"` +} + +func (m *QuoteInfo) Reset() { *m = QuoteInfo{} } +func (m *QuoteInfo) String() string { return proto.CompactTextString(m) } +func (*QuoteInfo) ProtoMessage() {} +func (*QuoteInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +type isQuoteInfo_ParsedQuote interface { + isQuoteInfo_ParsedQuote() +} + +type QuoteInfo_DateTime struct { + DateTime *DateTime `protobuf:"bytes,2,opt,name=date_time,json=dateTime,oneof"` +} + +func (*QuoteInfo_DateTime) isQuoteInfo_ParsedQuote() {} + +func (m *QuoteInfo) GetParsedQuote() isQuoteInfo_ParsedQuote { + if m != nil { + return m.ParsedQuote + } + return nil +} + +func (m *QuoteInfo) GetDateTime() *DateTime { + if x, ok := m.GetParsedQuote().(*QuoteInfo_DateTime); ok { + return x.DateTime + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*QuoteInfo) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _QuoteInfo_OneofMarshaler, _QuoteInfo_OneofUnmarshaler, _QuoteInfo_OneofSizer, []interface{}{ + (*QuoteInfo_DateTime)(nil), + } +} + +func _QuoteInfo_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*QuoteInfo) + // parsed_quote + switch x := m.ParsedQuote.(type) { + case *QuoteInfo_DateTime: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DateTime); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("QuoteInfo.ParsedQuote has unexpected type %T", x) + } + return nil +} + +func _QuoteInfo_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*QuoteInfo) + switch tag { + case 2: // parsed_quote.date_time + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DateTime) + err := b.DecodeMessage(msg) + m.ParsedQuote = &QuoteInfo_DateTime{msg} + return true, err + default: + return false, nil + } +} + +func _QuoteInfo_OneofSizer(msg proto.Message) (n int) { + m := msg.(*QuoteInfo) + // parsed_quote + switch x := m.ParsedQuote.(type) { + case *QuoteInfo_DateTime: + s := proto.Size(x.DateTime) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Message for a date time object. +type DateTime struct { + // One or more of the following must be set. All fields are optional, but + // when set must be valid date or time values. + Date *google_type.Date `protobuf:"bytes,1,opt,name=date" json:"date,omitempty"` + DayOfWeek google_type1.DayOfWeek `protobuf:"varint,2,opt,name=day_of_week,json=dayOfWeek,enum=google.type.DayOfWeek" json:"day_of_week,omitempty"` + Time *google_type2.TimeOfDay `protobuf:"bytes,3,opt,name=time" json:"time,omitempty"` + TimeZone *DateTime_TimeZone `protobuf:"bytes,4,opt,name=time_zone,json=timeZone" json:"time_zone,omitempty"` +} + +func (m *DateTime) Reset() { *m = DateTime{} } +func (m *DateTime) String() string { return proto.CompactTextString(m) } +func (*DateTime) ProtoMessage() {} +func (*DateTime) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } + +func (m *DateTime) GetDate() *google_type.Date { + if m != nil { + return m.Date + } + return nil +} + +func (m *DateTime) GetDayOfWeek() google_type1.DayOfWeek { + if m != nil { + return m.DayOfWeek + } + return google_type1.DayOfWeek_DAY_OF_WEEK_UNSPECIFIED +} + +func (m *DateTime) GetTime() *google_type2.TimeOfDay { + if m != nil { + return m.Time + } + return nil +} + +func (m *DateTime) GetTimeZone() *DateTime_TimeZone { + if m != nil { + return m.TimeZone + } + return nil +} + +type DateTime_TimeZone struct { + // Set only if the offset can be determined. Positive for time ahead of UTC. + // E.g. For "UTC-9", this value is -540. + OffsetMinutes int32 `protobuf:"varint,1,opt,name=offset_minutes,json=offsetMinutes" json:"offset_minutes,omitempty"` +} + +func (m *DateTime_TimeZone) Reset() { *m = DateTime_TimeZone{} } +func (m *DateTime_TimeZone) String() string { return proto.CompactTextString(m) } +func (*DateTime_TimeZone) ProtoMessage() {} +func (*DateTime_TimeZone) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32, 0} } + +func (m *DateTime_TimeZone) GetOffsetMinutes() int32 { + if m != nil { + return m.OffsetMinutes + } + return 0 +} + // The configuration that controls how the data will change. type DeidentifyConfig struct { // Types that are valid to be assigned to Transformation: @@ -3609,7 +3940,7 @@ type DeidentifyConfig struct { func (m *DeidentifyConfig) Reset() { *m = DeidentifyConfig{} } func (m *DeidentifyConfig) String() string { return proto.CompactTextString(m) } func (*DeidentifyConfig) ProtoMessage() {} -func (*DeidentifyConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } +func (*DeidentifyConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } type isDeidentifyConfig_Transformation interface { isDeidentifyConfig_Transformation() @@ -3732,13 +4063,14 @@ type PrimitiveTransformation struct { // *PrimitiveTransformation_ReplaceWithInfoTypeConfig // *PrimitiveTransformation_TimePartConfig // *PrimitiveTransformation_CryptoHashConfig + // *PrimitiveTransformation_DateShiftConfig Transformation isPrimitiveTransformation_Transformation `protobuf_oneof:"transformation"` } func (m *PrimitiveTransformation) Reset() { *m = PrimitiveTransformation{} } func (m *PrimitiveTransformation) String() string { return proto.CompactTextString(m) } func (*PrimitiveTransformation) ProtoMessage() {} -func (*PrimitiveTransformation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } +func (*PrimitiveTransformation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } type isPrimitiveTransformation_Transformation interface { isPrimitiveTransformation_Transformation() @@ -3771,6 +4103,9 @@ type PrimitiveTransformation_TimePartConfig struct { type PrimitiveTransformation_CryptoHashConfig struct { CryptoHashConfig *CryptoHashConfig `protobuf:"bytes,9,opt,name=crypto_hash_config,json=cryptoHashConfig,oneof"` } +type PrimitiveTransformation_DateShiftConfig struct { + DateShiftConfig *DateShiftConfig `protobuf:"bytes,11,opt,name=date_shift_config,json=dateShiftConfig,oneof"` +} func (*PrimitiveTransformation_ReplaceConfig) isPrimitiveTransformation_Transformation() {} func (*PrimitiveTransformation_RedactConfig) isPrimitiveTransformation_Transformation() {} @@ -3781,6 +4116,7 @@ func (*PrimitiveTransformation_BucketingConfig) isPrimitiveTransformation_Transf func (*PrimitiveTransformation_ReplaceWithInfoTypeConfig) isPrimitiveTransformation_Transformation() {} func (*PrimitiveTransformation_TimePartConfig) isPrimitiveTransformation_Transformation() {} func (*PrimitiveTransformation_CryptoHashConfig) isPrimitiveTransformation_Transformation() {} +func (*PrimitiveTransformation_DateShiftConfig) isPrimitiveTransformation_Transformation() {} func (m *PrimitiveTransformation) GetTransformation() isPrimitiveTransformation_Transformation { if m != nil { @@ -3852,6 +4188,13 @@ func (m *PrimitiveTransformation) GetCryptoHashConfig() *CryptoHashConfig { return nil } +func (m *PrimitiveTransformation) GetDateShiftConfig() *DateShiftConfig { + if x, ok := m.GetTransformation().(*PrimitiveTransformation_DateShiftConfig); ok { + return x.DateShiftConfig + } + return nil +} + // XXX_OneofFuncs is for the internal use of the proto package. func (*PrimitiveTransformation) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _PrimitiveTransformation_OneofMarshaler, _PrimitiveTransformation_OneofUnmarshaler, _PrimitiveTransformation_OneofSizer, []interface{}{ @@ -3864,6 +4207,7 @@ func (*PrimitiveTransformation) XXX_OneofFuncs() (func(msg proto.Message, b *pro (*PrimitiveTransformation_ReplaceWithInfoTypeConfig)(nil), (*PrimitiveTransformation_TimePartConfig)(nil), (*PrimitiveTransformation_CryptoHashConfig)(nil), + (*PrimitiveTransformation_DateShiftConfig)(nil), } } @@ -3916,6 +4260,11 @@ func _PrimitiveTransformation_OneofMarshaler(msg proto.Message, b *proto.Buffer) if err := b.EncodeMessage(x.CryptoHashConfig); err != nil { return err } + case *PrimitiveTransformation_DateShiftConfig: + b.EncodeVarint(11<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DateShiftConfig); err != nil { + return err + } case nil: default: return fmt.Errorf("PrimitiveTransformation.Transformation has unexpected type %T", x) @@ -3998,6 +4347,14 @@ func _PrimitiveTransformation_OneofUnmarshaler(msg proto.Message, tag, wire int, err := b.DecodeMessage(msg) m.Transformation = &PrimitiveTransformation_CryptoHashConfig{msg} return true, err + case 11: // transformation.date_shift_config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DateShiftConfig) + err := b.DecodeMessage(msg) + m.Transformation = &PrimitiveTransformation_DateShiftConfig{msg} + return true, err default: return false, nil } @@ -4052,6 +4409,11 @@ func _PrimitiveTransformation_OneofSizer(msg proto.Message) (n int) { n += proto.SizeVarint(9<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s + case *PrimitiveTransformation_DateShiftConfig: + s := proto.Size(x.DateShiftConfig) + n += proto.SizeVarint(11<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) @@ -4068,7 +4430,7 @@ type TimePartConfig struct { func (m *TimePartConfig) Reset() { *m = TimePartConfig{} } func (m *TimePartConfig) String() string { return proto.CompactTextString(m) } func (*TimePartConfig) ProtoMessage() {} -func (*TimePartConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } +func (*TimePartConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } func (m *TimePartConfig) GetPartToExtract() TimePartConfig_TimePart { if m != nil { @@ -4091,7 +4453,7 @@ type CryptoHashConfig struct { func (m *CryptoHashConfig) Reset() { *m = CryptoHashConfig{} } func (m *CryptoHashConfig) String() string { return proto.CompactTextString(m) } func (*CryptoHashConfig) ProtoMessage() {} -func (*CryptoHashConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } +func (*CryptoHashConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } func (m *CryptoHashConfig) GetCryptoKey() *CryptoKey { if m != nil { @@ -4109,7 +4471,7 @@ type ReplaceValueConfig struct { func (m *ReplaceValueConfig) Reset() { *m = ReplaceValueConfig{} } func (m *ReplaceValueConfig) String() string { return proto.CompactTextString(m) } func (*ReplaceValueConfig) ProtoMessage() {} -func (*ReplaceValueConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } +func (*ReplaceValueConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } func (m *ReplaceValueConfig) GetNewValue() *Value { if m != nil { @@ -4125,7 +4487,7 @@ type ReplaceWithInfoTypeConfig struct { func (m *ReplaceWithInfoTypeConfig) Reset() { *m = ReplaceWithInfoTypeConfig{} } func (m *ReplaceWithInfoTypeConfig) String() string { return proto.CompactTextString(m) } func (*ReplaceWithInfoTypeConfig) ProtoMessage() {} -func (*ReplaceWithInfoTypeConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } +func (*ReplaceWithInfoTypeConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } // Redact a given value. For example, if used with an `InfoTypeTransformation` // transforming PHONE_NUMBER, and input 'My phone number is 206-555-0123', the @@ -4136,7 +4498,7 @@ type RedactConfig struct { func (m *RedactConfig) Reset() { *m = RedactConfig{} } func (m *RedactConfig) String() string { return proto.CompactTextString(m) } func (*RedactConfig) ProtoMessage() {} -func (*RedactConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } +func (*RedactConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } // Characters to skip when doing deidentification of a value. These will be left // alone and skipped. @@ -4150,7 +4512,7 @@ type CharsToIgnore struct { func (m *CharsToIgnore) Reset() { *m = CharsToIgnore{} } func (m *CharsToIgnore) String() string { return proto.CompactTextString(m) } func (*CharsToIgnore) ProtoMessage() {} -func (*CharsToIgnore) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } +func (*CharsToIgnore) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } type isCharsToIgnore_Characters interface { isCharsToIgnore_Characters() @@ -4282,7 +4644,7 @@ type CharacterMaskConfig struct { func (m *CharacterMaskConfig) Reset() { *m = CharacterMaskConfig{} } func (m *CharacterMaskConfig) String() string { return proto.CompactTextString(m) } func (*CharacterMaskConfig) ProtoMessage() {} -func (*CharacterMaskConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } +func (*CharacterMaskConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } func (m *CharacterMaskConfig) GetMaskingCharacter() string { if m != nil { @@ -4346,7 +4708,7 @@ type FixedSizeBucketingConfig struct { func (m *FixedSizeBucketingConfig) Reset() { *m = FixedSizeBucketingConfig{} } func (m *FixedSizeBucketingConfig) String() string { return proto.CompactTextString(m) } func (*FixedSizeBucketingConfig) ProtoMessage() {} -func (*FixedSizeBucketingConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } +func (*FixedSizeBucketingConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } func (m *FixedSizeBucketingConfig) GetLowerBound() *Value { if m != nil { @@ -4385,7 +4747,7 @@ type BucketingConfig struct { func (m *BucketingConfig) Reset() { *m = BucketingConfig{} } func (m *BucketingConfig) String() string { return proto.CompactTextString(m) } func (*BucketingConfig) ProtoMessage() {} -func (*BucketingConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } +func (*BucketingConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } func (m *BucketingConfig) GetBuckets() []*BucketingConfig_Bucket { if m != nil { @@ -4409,7 +4771,7 @@ type BucketingConfig_Bucket struct { func (m *BucketingConfig_Bucket) Reset() { *m = BucketingConfig_Bucket{} } func (m *BucketingConfig_Bucket) String() string { return proto.CompactTextString(m) } func (*BucketingConfig_Bucket) ProtoMessage() {} -func (*BucketingConfig_Bucket) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41, 0} } +func (*BucketingConfig_Bucket) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43, 0} } func (m *BucketingConfig_Bucket) GetMin() *Value { if m != nil { @@ -4502,7 +4864,7 @@ type CryptoReplaceFfxFpeConfig struct { func (m *CryptoReplaceFfxFpeConfig) Reset() { *m = CryptoReplaceFfxFpeConfig{} } func (m *CryptoReplaceFfxFpeConfig) String() string { return proto.CompactTextString(m) } func (*CryptoReplaceFfxFpeConfig) ProtoMessage() {} -func (*CryptoReplaceFfxFpeConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } +func (*CryptoReplaceFfxFpeConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } type isCryptoReplaceFfxFpeConfig_Alphabet interface { isCryptoReplaceFfxFpeConfig_Alphabet() @@ -4666,7 +5028,7 @@ type CryptoKey struct { func (m *CryptoKey) Reset() { *m = CryptoKey{} } func (m *CryptoKey) String() string { return proto.CompactTextString(m) } func (*CryptoKey) ProtoMessage() {} -func (*CryptoKey) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } +func (*CryptoKey) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } type isCryptoKey_Source interface { isCryptoKey_Source() @@ -4822,7 +5184,7 @@ type TransientCryptoKey struct { func (m *TransientCryptoKey) Reset() { *m = TransientCryptoKey{} } func (m *TransientCryptoKey) String() string { return proto.CompactTextString(m) } func (*TransientCryptoKey) ProtoMessage() {} -func (*TransientCryptoKey) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } +func (*TransientCryptoKey) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } func (m *TransientCryptoKey) GetName() string { if m != nil { @@ -4841,7 +5203,7 @@ type UnwrappedCryptoKey struct { func (m *UnwrappedCryptoKey) Reset() { *m = UnwrappedCryptoKey{} } func (m *UnwrappedCryptoKey) String() string { return proto.CompactTextString(m) } func (*UnwrappedCryptoKey) ProtoMessage() {} -func (*UnwrappedCryptoKey) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } +func (*UnwrappedCryptoKey) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } func (m *UnwrappedCryptoKey) GetKey() []byte { if m != nil { @@ -4864,7 +5226,7 @@ type KmsWrappedCryptoKey struct { func (m *KmsWrappedCryptoKey) Reset() { *m = KmsWrappedCryptoKey{} } func (m *KmsWrappedCryptoKey) String() string { return proto.CompactTextString(m) } func (*KmsWrappedCryptoKey) ProtoMessage() {} -func (*KmsWrappedCryptoKey) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } +func (*KmsWrappedCryptoKey) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } func (m *KmsWrappedCryptoKey) GetWrappedKey() []byte { if m != nil { @@ -4880,6 +5242,136 @@ func (m *KmsWrappedCryptoKey) GetCryptoKeyName() string { return "" } +// Shifts dates by random number of days, with option to be consistent for the +// same context. +type DateShiftConfig struct { + // Range of shift in days. Actual shift will be selected at random within this + // range (inclusive ends). Negative means shift to earlier in time. Must not + // be more than 365250 days (1000 years) each direction. + // + // For example, 3 means shift date to at most 3 days into the future. + // [Required] + UpperBoundDays int32 `protobuf:"varint,1,opt,name=upper_bound_days,json=upperBoundDays" json:"upper_bound_days,omitempty"` + // For example, -5 means shift date to at most 5 days back in the past. + // [Required] + LowerBoundDays int32 `protobuf:"varint,2,opt,name=lower_bound_days,json=lowerBoundDays" json:"lower_bound_days,omitempty"` + // Points to the field that contains the context, for example, an entity id. + // If set, must also set method. If set, shift will be consistent for the + // given context. + Context *FieldId `protobuf:"bytes,3,opt,name=context" json:"context,omitempty"` + // Method for calculating shift that takes context into consideration. If + // set, must also set context. Can only be applied to table items. + // + // Types that are valid to be assigned to Method: + // *DateShiftConfig_CryptoKey + Method isDateShiftConfig_Method `protobuf_oneof:"method"` +} + +func (m *DateShiftConfig) Reset() { *m = DateShiftConfig{} } +func (m *DateShiftConfig) String() string { return proto.CompactTextString(m) } +func (*DateShiftConfig) ProtoMessage() {} +func (*DateShiftConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } + +type isDateShiftConfig_Method interface { + isDateShiftConfig_Method() +} + +type DateShiftConfig_CryptoKey struct { + CryptoKey *CryptoKey `protobuf:"bytes,4,opt,name=crypto_key,json=cryptoKey,oneof"` +} + +func (*DateShiftConfig_CryptoKey) isDateShiftConfig_Method() {} + +func (m *DateShiftConfig) GetMethod() isDateShiftConfig_Method { + if m != nil { + return m.Method + } + return nil +} + +func (m *DateShiftConfig) GetUpperBoundDays() int32 { + if m != nil { + return m.UpperBoundDays + } + return 0 +} + +func (m *DateShiftConfig) GetLowerBoundDays() int32 { + if m != nil { + return m.LowerBoundDays + } + return 0 +} + +func (m *DateShiftConfig) GetContext() *FieldId { + if m != nil { + return m.Context + } + return nil +} + +func (m *DateShiftConfig) GetCryptoKey() *CryptoKey { + if x, ok := m.GetMethod().(*DateShiftConfig_CryptoKey); ok { + return x.CryptoKey + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*DateShiftConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _DateShiftConfig_OneofMarshaler, _DateShiftConfig_OneofUnmarshaler, _DateShiftConfig_OneofSizer, []interface{}{ + (*DateShiftConfig_CryptoKey)(nil), + } +} + +func _DateShiftConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*DateShiftConfig) + // method + switch x := m.Method.(type) { + case *DateShiftConfig_CryptoKey: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CryptoKey); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("DateShiftConfig.Method has unexpected type %T", x) + } + return nil +} + +func _DateShiftConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*DateShiftConfig) + switch tag { + case 4: // method.crypto_key + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CryptoKey) + err := b.DecodeMessage(msg) + m.Method = &DateShiftConfig_CryptoKey{msg} + return true, err + default: + return false, nil + } +} + +func _DateShiftConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*DateShiftConfig) + // method + switch x := m.Method.(type) { + case *DateShiftConfig_CryptoKey: + s := proto.Size(x.CryptoKey) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + // A type of transformation that will scan unstructured text and // apply various `PrimitiveTransformation`s to each finding, where the // transformation is applied to only values that were identified as a specific @@ -4893,7 +5385,7 @@ type InfoTypeTransformations struct { func (m *InfoTypeTransformations) Reset() { *m = InfoTypeTransformations{} } func (m *InfoTypeTransformations) String() string { return proto.CompactTextString(m) } func (*InfoTypeTransformations) ProtoMessage() {} -func (*InfoTypeTransformations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } +func (*InfoTypeTransformations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } func (m *InfoTypeTransformations) GetTransformations() []*InfoTypeTransformations_InfoTypeTransformation { if m != nil { @@ -4920,7 +5412,7 @@ func (m *InfoTypeTransformations_InfoTypeTransformation) String() string { } func (*InfoTypeTransformations_InfoTypeTransformation) ProtoMessage() {} func (*InfoTypeTransformations_InfoTypeTransformation) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{47, 0} + return fileDescriptor0, []int{50, 0} } func (m *InfoTypeTransformations_InfoTypeTransformation) GetInfoTypes() []*InfoType { @@ -4962,7 +5454,7 @@ type FieldTransformation struct { func (m *FieldTransformation) Reset() { *m = FieldTransformation{} } func (m *FieldTransformation) String() string { return proto.CompactTextString(m) } func (*FieldTransformation) ProtoMessage() {} -func (*FieldTransformation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } +func (*FieldTransformation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } type isFieldTransformation_Transformation interface { isFieldTransformation_Transformation() @@ -5100,7 +5592,7 @@ type RecordTransformations struct { func (m *RecordTransformations) Reset() { *m = RecordTransformations{} } func (m *RecordTransformations) String() string { return proto.CompactTextString(m) } func (*RecordTransformations) ProtoMessage() {} -func (*RecordTransformations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } +func (*RecordTransformations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } func (m *RecordTransformations) GetFieldTransformations() []*FieldTransformation { if m != nil { @@ -5127,7 +5619,7 @@ type RecordSuppression struct { func (m *RecordSuppression) Reset() { *m = RecordSuppression{} } func (m *RecordSuppression) String() string { return proto.CompactTextString(m) } func (*RecordSuppression) ProtoMessage() {} -func (*RecordSuppression) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } +func (*RecordSuppression) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } func (m *RecordSuppression) GetCondition() *RecordCondition { if m != nil { @@ -5146,7 +5638,7 @@ type RecordCondition struct { func (m *RecordCondition) Reset() { *m = RecordCondition{} } func (m *RecordCondition) String() string { return proto.CompactTextString(m) } func (*RecordCondition) ProtoMessage() {} -func (*RecordCondition) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } +func (*RecordCondition) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } func (m *RecordCondition) GetExpressions() *RecordCondition_Expressions { if m != nil { @@ -5185,7 +5677,7 @@ type RecordCondition_Condition struct { func (m *RecordCondition_Condition) Reset() { *m = RecordCondition_Condition{} } func (m *RecordCondition_Condition) String() string { return proto.CompactTextString(m) } func (*RecordCondition_Condition) ProtoMessage() {} -func (*RecordCondition_Condition) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51, 0} } +func (*RecordCondition_Condition) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54, 0} } func (m *RecordCondition_Condition) GetField() *FieldId { if m != nil { @@ -5216,7 +5708,7 @@ type RecordCondition_Conditions struct { func (m *RecordCondition_Conditions) Reset() { *m = RecordCondition_Conditions{} } func (m *RecordCondition_Conditions) String() string { return proto.CompactTextString(m) } func (*RecordCondition_Conditions) ProtoMessage() {} -func (*RecordCondition_Conditions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51, 1} } +func (*RecordCondition_Conditions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54, 1} } func (m *RecordCondition_Conditions) GetConditions() []*RecordCondition_Condition { if m != nil { @@ -5238,7 +5730,7 @@ type RecordCondition_Expressions struct { func (m *RecordCondition_Expressions) Reset() { *m = RecordCondition_Expressions{} } func (m *RecordCondition_Expressions) String() string { return proto.CompactTextString(m) } func (*RecordCondition_Expressions) ProtoMessage() {} -func (*RecordCondition_Expressions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51, 2} } +func (*RecordCondition_Expressions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54, 2} } type isRecordCondition_Expressions_Type interface { isRecordCondition_Expressions_Type() @@ -5337,7 +5829,7 @@ type TransformationOverview struct { func (m *TransformationOverview) Reset() { *m = TransformationOverview{} } func (m *TransformationOverview) String() string { return proto.CompactTextString(m) } func (*TransformationOverview) ProtoMessage() {} -func (*TransformationOverview) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } +func (*TransformationOverview) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } func (m *TransformationOverview) GetTransformedBytes() int64 { if m != nil { @@ -5377,7 +5869,7 @@ type TransformationSummary struct { func (m *TransformationSummary) Reset() { *m = TransformationSummary{} } func (m *TransformationSummary) String() string { return proto.CompactTextString(m) } func (*TransformationSummary) ProtoMessage() {} -func (*TransformationSummary) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } +func (*TransformationSummary) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } func (m *TransformationSummary) GetInfoType() *InfoType { if m != nil { @@ -5442,7 +5934,7 @@ func (m *TransformationSummary_SummaryResult) Reset() { *m = Transformat func (m *TransformationSummary_SummaryResult) String() string { return proto.CompactTextString(m) } func (*TransformationSummary_SummaryResult) ProtoMessage() {} func (*TransformationSummary_SummaryResult) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{53, 0} + return fileDescriptor0, []int{56, 0} } func (m *TransformationSummary_SummaryResult) GetCount() int64 { @@ -5466,6 +5958,97 @@ func (m *TransformationSummary_SummaryResult) GetDetails() string { return "" } +// Schedule for triggeredJobs. +type Schedule struct { + // Types that are valid to be assigned to Option: + // *Schedule_ReccurrencePeriodDuration + Option isSchedule_Option `protobuf_oneof:"option"` +} + +func (m *Schedule) Reset() { *m = Schedule{} } +func (m *Schedule) String() string { return proto.CompactTextString(m) } +func (*Schedule) ProtoMessage() {} +func (*Schedule) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } + +type isSchedule_Option interface { + isSchedule_Option() +} + +type Schedule_ReccurrencePeriodDuration struct { + ReccurrencePeriodDuration *google_protobuf2.Duration `protobuf:"bytes,1,opt,name=reccurrence_period_duration,json=reccurrencePeriodDuration,oneof"` +} + +func (*Schedule_ReccurrencePeriodDuration) isSchedule_Option() {} + +func (m *Schedule) GetOption() isSchedule_Option { + if m != nil { + return m.Option + } + return nil +} + +func (m *Schedule) GetReccurrencePeriodDuration() *google_protobuf2.Duration { + if x, ok := m.GetOption().(*Schedule_ReccurrencePeriodDuration); ok { + return x.ReccurrencePeriodDuration + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Schedule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Schedule_OneofMarshaler, _Schedule_OneofUnmarshaler, _Schedule_OneofSizer, []interface{}{ + (*Schedule_ReccurrencePeriodDuration)(nil), + } +} + +func _Schedule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Schedule) + // option + switch x := m.Option.(type) { + case *Schedule_ReccurrencePeriodDuration: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ReccurrencePeriodDuration); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Schedule.Option has unexpected type %T", x) + } + return nil +} + +func _Schedule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Schedule) + switch tag { + case 1: // option.reccurrence_period_duration + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf2.Duration) + err := b.DecodeMessage(msg) + m.Option = &Schedule_ReccurrencePeriodDuration{msg} + return true, err + default: + return false, nil + } +} + +func _Schedule_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Schedule) + // option + switch x := m.Option.(type) { + case *Schedule_ReccurrencePeriodDuration: + s := proto.Size(x.ReccurrencePeriodDuration) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + // The inspectTemplate contains a configuration (set of types of sensitive data // to be detected) to be used anywhere you otherwise would normally specify // InspectConfig. @@ -5491,7 +6074,7 @@ type InspectTemplate struct { func (m *InspectTemplate) Reset() { *m = InspectTemplate{} } func (m *InspectTemplate) String() string { return proto.CompactTextString(m) } func (*InspectTemplate) ProtoMessage() {} -func (*InspectTemplate) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } +func (*InspectTemplate) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } func (m *InspectTemplate) GetName() string { if m != nil { @@ -5558,7 +6141,7 @@ type DeidentifyTemplate struct { func (m *DeidentifyTemplate) Reset() { *m = DeidentifyTemplate{} } func (m *DeidentifyTemplate) String() string { return proto.CompactTextString(m) } func (*DeidentifyTemplate) ProtoMessage() {} -func (*DeidentifyTemplate) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } +func (*DeidentifyTemplate) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } func (m *DeidentifyTemplate) GetName() string { if m != nil { @@ -5602,6 +6185,466 @@ func (m *DeidentifyTemplate) GetDeidentifyConfig() *DeidentifyConfig { return nil } +// Contains a configuration to make dlp api calls on a repeating basis. +type JobTrigger struct { + // Unique resource name for the triggeredJob, assigned by the service when the + // triggeredJob is created, for example + // `projects/dlp-test-project/triggeredJobs/53234423`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Display name (max 100 chars) + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // User provided description (max 256 chars) + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The configuration details for the specific type of job to run. + // + // Types that are valid to be assigned to Job: + // *JobTrigger_InspectJob + Job isJobTrigger_Job `protobuf_oneof:"job"` + // A list of triggers which will be OR'ed together. Only one in the list + // needs to trigger for a job to be started. The list may contain only + // a single Schedule trigger and must have at least one object. + Triggers []*JobTrigger_Trigger `protobuf:"bytes,5,rep,name=triggers" json:"triggers,omitempty"` + // A stream of errors encountered when the trigger was activated. Repeated + // errors may result in the JobTrigger automaticaly being paused. + // Will return the last 100 errors. Whenever the JobTrigger is modified + // this list will be cleared. Output only field. + Errors []*JobTrigger_Error `protobuf:"bytes,6,rep,name=errors" json:"errors,omitempty"` + // The creation timestamp of a triggeredJob, output only field. + CreateTime *google_protobuf1.Timestamp `protobuf:"bytes,7,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // The last update timestamp of a triggeredJob, output only field. + UpdateTime *google_protobuf1.Timestamp `protobuf:"bytes,8,opt,name=update_time,json=updateTime" json:"update_time,omitempty"` + // The timestamp of the last time this trigger executed. + LastRunTime *google_protobuf1.Timestamp `protobuf:"bytes,9,opt,name=last_run_time,json=lastRunTime" json:"last_run_time,omitempty"` + // A status for this trigger. [required] + Status JobTrigger_Status `protobuf:"varint,10,opt,name=status,enum=google.privacy.dlp.v2beta2.JobTrigger_Status" json:"status,omitempty"` +} + +func (m *JobTrigger) Reset() { *m = JobTrigger{} } +func (m *JobTrigger) String() string { return proto.CompactTextString(m) } +func (*JobTrigger) ProtoMessage() {} +func (*JobTrigger) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{60} } + +type isJobTrigger_Job interface { + isJobTrigger_Job() +} + +type JobTrigger_InspectJob struct { + InspectJob *InspectJobConfig `protobuf:"bytes,4,opt,name=inspect_job,json=inspectJob,oneof"` +} + +func (*JobTrigger_InspectJob) isJobTrigger_Job() {} + +func (m *JobTrigger) GetJob() isJobTrigger_Job { + if m != nil { + return m.Job + } + return nil +} + +func (m *JobTrigger) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *JobTrigger) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *JobTrigger) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *JobTrigger) GetInspectJob() *InspectJobConfig { + if x, ok := m.GetJob().(*JobTrigger_InspectJob); ok { + return x.InspectJob + } + return nil +} + +func (m *JobTrigger) GetTriggers() []*JobTrigger_Trigger { + if m != nil { + return m.Triggers + } + return nil +} + +func (m *JobTrigger) GetErrors() []*JobTrigger_Error { + if m != nil { + return m.Errors + } + return nil +} + +func (m *JobTrigger) GetCreateTime() *google_protobuf1.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *JobTrigger) GetUpdateTime() *google_protobuf1.Timestamp { + if m != nil { + return m.UpdateTime + } + return nil +} + +func (m *JobTrigger) GetLastRunTime() *google_protobuf1.Timestamp { + if m != nil { + return m.LastRunTime + } + return nil +} + +func (m *JobTrigger) GetStatus() JobTrigger_Status { + if m != nil { + return m.Status + } + return JobTrigger_STATUS_UNSPECIFIED +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*JobTrigger) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _JobTrigger_OneofMarshaler, _JobTrigger_OneofUnmarshaler, _JobTrigger_OneofSizer, []interface{}{ + (*JobTrigger_InspectJob)(nil), + } +} + +func _JobTrigger_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*JobTrigger) + // job + switch x := m.Job.(type) { + case *JobTrigger_InspectJob: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InspectJob); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("JobTrigger.Job has unexpected type %T", x) + } + return nil +} + +func _JobTrigger_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*JobTrigger) + switch tag { + case 4: // job.inspect_job + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(InspectJobConfig) + err := b.DecodeMessage(msg) + m.Job = &JobTrigger_InspectJob{msg} + return true, err + default: + return false, nil + } +} + +func _JobTrigger_OneofSizer(msg proto.Message) (n int) { + m := msg.(*JobTrigger) + // job + switch x := m.Job.(type) { + case *JobTrigger_InspectJob: + s := proto.Size(x.InspectJob) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// What event needs to occur for a new job to be started. +type JobTrigger_Trigger struct { + // Types that are valid to be assigned to Trigger: + // *JobTrigger_Trigger_Schedule + Trigger isJobTrigger_Trigger_Trigger `protobuf_oneof:"trigger"` +} + +func (m *JobTrigger_Trigger) Reset() { *m = JobTrigger_Trigger{} } +func (m *JobTrigger_Trigger) String() string { return proto.CompactTextString(m) } +func (*JobTrigger_Trigger) ProtoMessage() {} +func (*JobTrigger_Trigger) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{60, 0} } + +type isJobTrigger_Trigger_Trigger interface { + isJobTrigger_Trigger_Trigger() +} + +type JobTrigger_Trigger_Schedule struct { + Schedule *Schedule `protobuf:"bytes,1,opt,name=schedule,oneof"` +} + +func (*JobTrigger_Trigger_Schedule) isJobTrigger_Trigger_Trigger() {} + +func (m *JobTrigger_Trigger) GetTrigger() isJobTrigger_Trigger_Trigger { + if m != nil { + return m.Trigger + } + return nil +} + +func (m *JobTrigger_Trigger) GetSchedule() *Schedule { + if x, ok := m.GetTrigger().(*JobTrigger_Trigger_Schedule); ok { + return x.Schedule + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*JobTrigger_Trigger) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _JobTrigger_Trigger_OneofMarshaler, _JobTrigger_Trigger_OneofUnmarshaler, _JobTrigger_Trigger_OneofSizer, []interface{}{ + (*JobTrigger_Trigger_Schedule)(nil), + } +} + +func _JobTrigger_Trigger_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*JobTrigger_Trigger) + // trigger + switch x := m.Trigger.(type) { + case *JobTrigger_Trigger_Schedule: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Schedule); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("JobTrigger_Trigger.Trigger has unexpected type %T", x) + } + return nil +} + +func _JobTrigger_Trigger_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*JobTrigger_Trigger) + switch tag { + case 1: // trigger.schedule + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Schedule) + err := b.DecodeMessage(msg) + m.Trigger = &JobTrigger_Trigger_Schedule{msg} + return true, err + default: + return false, nil + } +} + +func _JobTrigger_Trigger_OneofSizer(msg proto.Message) (n int) { + m := msg.(*JobTrigger_Trigger) + // trigger + switch x := m.Trigger.(type) { + case *JobTrigger_Trigger_Schedule: + s := proto.Size(x.Schedule) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The results of an unsuccessful activation of the JobTrigger. +type JobTrigger_Error struct { + Details *google_rpc.Status `protobuf:"bytes,1,opt,name=details" json:"details,omitempty"` + // The times the error occurred. + Timestamps []*google_protobuf1.Timestamp `protobuf:"bytes,2,rep,name=timestamps" json:"timestamps,omitempty"` +} + +func (m *JobTrigger_Error) Reset() { *m = JobTrigger_Error{} } +func (m *JobTrigger_Error) String() string { return proto.CompactTextString(m) } +func (*JobTrigger_Error) ProtoMessage() {} +func (*JobTrigger_Error) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{60, 1} } + +func (m *JobTrigger_Error) GetDetails() *google_rpc.Status { + if m != nil { + return m.Details + } + return nil +} + +func (m *JobTrigger_Error) GetTimestamps() []*google_protobuf1.Timestamp { + if m != nil { + return m.Timestamps + } + return nil +} + +// A task to execute on the completion of a job. +type Action struct { + // Types that are valid to be assigned to Action: + // *Action_SaveFindings_ + // *Action_PubSub + Action isAction_Action `protobuf_oneof:"action"` +} + +func (m *Action) Reset() { *m = Action{} } +func (m *Action) String() string { return proto.CompactTextString(m) } +func (*Action) ProtoMessage() {} +func (*Action) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{61} } + +type isAction_Action interface { + isAction_Action() +} + +type Action_SaveFindings_ struct { + SaveFindings *Action_SaveFindings `protobuf:"bytes,1,opt,name=save_findings,json=saveFindings,oneof"` +} +type Action_PubSub struct { + PubSub *Action_PublishToPubSub `protobuf:"bytes,2,opt,name=pub_sub,json=pubSub,oneof"` +} + +func (*Action_SaveFindings_) isAction_Action() {} +func (*Action_PubSub) isAction_Action() {} + +func (m *Action) GetAction() isAction_Action { + if m != nil { + return m.Action + } + return nil +} + +func (m *Action) GetSaveFindings() *Action_SaveFindings { + if x, ok := m.GetAction().(*Action_SaveFindings_); ok { + return x.SaveFindings + } + return nil +} + +func (m *Action) GetPubSub() *Action_PublishToPubSub { + if x, ok := m.GetAction().(*Action_PubSub); ok { + return x.PubSub + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Action) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Action_OneofMarshaler, _Action_OneofUnmarshaler, _Action_OneofSizer, []interface{}{ + (*Action_SaveFindings_)(nil), + (*Action_PubSub)(nil), + } +} + +func _Action_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Action) + // action + switch x := m.Action.(type) { + case *Action_SaveFindings_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SaveFindings); err != nil { + return err + } + case *Action_PubSub: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PubSub); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Action.Action has unexpected type %T", x) + } + return nil +} + +func _Action_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Action) + switch tag { + case 1: // action.save_findings + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Action_SaveFindings) + err := b.DecodeMessage(msg) + m.Action = &Action_SaveFindings_{msg} + return true, err + case 2: // action.pub_sub + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Action_PublishToPubSub) + err := b.DecodeMessage(msg) + m.Action = &Action_PubSub{msg} + return true, err + default: + return false, nil + } +} + +func _Action_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Action) + // action + switch x := m.Action.(type) { + case *Action_SaveFindings_: + s := proto.Size(x.SaveFindings) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Action_PubSub: + s := proto.Size(x.PubSub) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// If set, the detailed findings will be persisted to the specified +// OutputStorageConfig. Compatible with: Inspect +type Action_SaveFindings struct { + OutputConfig *OutputStorageConfig `protobuf:"bytes,1,opt,name=output_config,json=outputConfig" json:"output_config,omitempty"` +} + +func (m *Action_SaveFindings) Reset() { *m = Action_SaveFindings{} } +func (m *Action_SaveFindings) String() string { return proto.CompactTextString(m) } +func (*Action_SaveFindings) ProtoMessage() {} +func (*Action_SaveFindings) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{61, 0} } + +func (m *Action_SaveFindings) GetOutputConfig() *OutputStorageConfig { + if m != nil { + return m.OutputConfig + } + return nil +} + +// Publish the results of a DlpJob to a pub sub channel. +// Compatible with: Inpect, Risk +type Action_PublishToPubSub struct { + // Cloud Pub/Sub topic to send notifications to. The topic must have given + // publishing access rights to the DLP API service account executing + // the long running DlpJob sending the notifications. + // Format is projects/{project}/topics/{topic}. + Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` +} + +func (m *Action_PublishToPubSub) Reset() { *m = Action_PublishToPubSub{} } +func (m *Action_PublishToPubSub) String() string { return proto.CompactTextString(m) } +func (*Action_PublishToPubSub) ProtoMessage() {} +func (*Action_PublishToPubSub) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{61, 1} } + +func (m *Action_PublishToPubSub) GetTopic() string { + if m != nil { + return m.Topic + } + return "" +} + // Request message for CreateInspectTemplate. type CreateInspectTemplateRequest struct { // The parent resource name, for example projects/my-project-id or @@ -5619,7 +6662,7 @@ type CreateInspectTemplateRequest struct { func (m *CreateInspectTemplateRequest) Reset() { *m = CreateInspectTemplateRequest{} } func (m *CreateInspectTemplateRequest) String() string { return proto.CompactTextString(m) } func (*CreateInspectTemplateRequest) ProtoMessage() {} -func (*CreateInspectTemplateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } +func (*CreateInspectTemplateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{62} } func (m *CreateInspectTemplateRequest) GetParent() string { if m != nil { @@ -5645,7 +6688,8 @@ func (m *CreateInspectTemplateRequest) GetTemplateId() string { // Request message for UpdateInspectTemplate. type UpdateInspectTemplateRequest struct { // Resource name of organization and inspectTemplate to be updated, for - // example `organizations/433245324/inspectTemplates/432452342`. + // example `organizations/433245324/inspectTemplates/432452342` or + // projects/project-id/inspectTemplates/432452342. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // New InspectTemplate value. InspectTemplate *InspectTemplate `protobuf:"bytes,2,opt,name=inspect_template,json=inspectTemplate" json:"inspect_template,omitempty"` @@ -5656,7 +6700,7 @@ type UpdateInspectTemplateRequest struct { func (m *UpdateInspectTemplateRequest) Reset() { *m = UpdateInspectTemplateRequest{} } func (m *UpdateInspectTemplateRequest) String() string { return proto.CompactTextString(m) } func (*UpdateInspectTemplateRequest) ProtoMessage() {} -func (*UpdateInspectTemplateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } +func (*UpdateInspectTemplateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{63} } func (m *UpdateInspectTemplateRequest) GetName() string { if m != nil { @@ -5682,14 +6726,15 @@ func (m *UpdateInspectTemplateRequest) GetUpdateMask() *google_protobuf4.FieldMa // Request message for GetInspectTemplate. type GetInspectTemplateRequest struct { // Resource name of the organization and inspectTemplate to be read, for - // example `organizations/433245324/inspectTemplates/432452342`. + // example `organizations/433245324/inspectTemplates/432452342` or + // projects/project-id/inspectTemplates/432452342. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` } func (m *GetInspectTemplateRequest) Reset() { *m = GetInspectTemplateRequest{} } func (m *GetInspectTemplateRequest) String() string { return proto.CompactTextString(m) } func (*GetInspectTemplateRequest) ProtoMessage() {} -func (*GetInspectTemplateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } +func (*GetInspectTemplateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{64} } func (m *GetInspectTemplateRequest) GetName() string { if m != nil { @@ -5714,7 +6759,7 @@ type ListInspectTemplatesRequest struct { func (m *ListInspectTemplatesRequest) Reset() { *m = ListInspectTemplatesRequest{} } func (m *ListInspectTemplatesRequest) String() string { return proto.CompactTextString(m) } func (*ListInspectTemplatesRequest) ProtoMessage() {} -func (*ListInspectTemplatesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } +func (*ListInspectTemplatesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{65} } func (m *ListInspectTemplatesRequest) GetParent() string { if m != nil { @@ -5749,7 +6794,7 @@ type ListInspectTemplatesResponse struct { func (m *ListInspectTemplatesResponse) Reset() { *m = ListInspectTemplatesResponse{} } func (m *ListInspectTemplatesResponse) String() string { return proto.CompactTextString(m) } func (*ListInspectTemplatesResponse) ProtoMessage() {} -func (*ListInspectTemplatesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{60} } +func (*ListInspectTemplatesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{66} } func (m *ListInspectTemplatesResponse) GetInspectTemplates() []*InspectTemplate { if m != nil { @@ -5768,14 +6813,15 @@ func (m *ListInspectTemplatesResponse) GetNextPageToken() string { // Request message for DeleteInspectTemplate. type DeleteInspectTemplateRequest struct { // Resource name of the organization and inspectTemplate to be deleted, for - // example `organizations/433245324/inspectTemplates/432452342`. + // example `organizations/433245324/inspectTemplates/432452342` or + // projects/project-id/inspectTemplates/432452342. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` } func (m *DeleteInspectTemplateRequest) Reset() { *m = DeleteInspectTemplateRequest{} } func (m *DeleteInspectTemplateRequest) String() string { return proto.CompactTextString(m) } func (*DeleteInspectTemplateRequest) ProtoMessage() {} -func (*DeleteInspectTemplateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{61} } +func (*DeleteInspectTemplateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{67} } func (m *DeleteInspectTemplateRequest) GetName() string { if m != nil { @@ -5784,6 +6830,207 @@ func (m *DeleteInspectTemplateRequest) GetName() string { return "" } +// Request message for CreateJobTrigger. +type CreateJobTriggerRequest struct { + // The parent resource name, for example projects/my-project-id. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The JobTrigger to create. + JobTrigger *JobTrigger `protobuf:"bytes,2,opt,name=job_trigger,json=jobTrigger" json:"job_trigger,omitempty"` + // The trigger id can contain uppercase and lowercase letters, + // numbers, and hyphens; that is, it must match the regular + // expression: `[a-zA-Z\\d-]+`. The maximum length is 100 + // characters. Can be empty to allow the system to generate one. + TriggerId string `protobuf:"bytes,3,opt,name=trigger_id,json=triggerId" json:"trigger_id,omitempty"` +} + +func (m *CreateJobTriggerRequest) Reset() { *m = CreateJobTriggerRequest{} } +func (m *CreateJobTriggerRequest) String() string { return proto.CompactTextString(m) } +func (*CreateJobTriggerRequest) ProtoMessage() {} +func (*CreateJobTriggerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{68} } + +func (m *CreateJobTriggerRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateJobTriggerRequest) GetJobTrigger() *JobTrigger { + if m != nil { + return m.JobTrigger + } + return nil +} + +func (m *CreateJobTriggerRequest) GetTriggerId() string { + if m != nil { + return m.TriggerId + } + return "" +} + +// Request message for UpdateJobTrigger. +type UpdateJobTriggerRequest struct { + // Resource name of the project and the triggeredJob, for example + // `projects/dlp-test-project/jobTriggers/53234423`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // New JobTrigger value. + JobTrigger *JobTrigger `protobuf:"bytes,2,opt,name=job_trigger,json=jobTrigger" json:"job_trigger,omitempty"` + // Mask to control which fields get updated. + UpdateMask *google_protobuf4.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateJobTriggerRequest) Reset() { *m = UpdateJobTriggerRequest{} } +func (m *UpdateJobTriggerRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateJobTriggerRequest) ProtoMessage() {} +func (*UpdateJobTriggerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{69} } + +func (m *UpdateJobTriggerRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateJobTriggerRequest) GetJobTrigger() *JobTrigger { + if m != nil { + return m.JobTrigger + } + return nil +} + +func (m *UpdateJobTriggerRequest) GetUpdateMask() *google_protobuf4.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +// Request message for GetJobTrigger. +type GetJobTriggerRequest struct { + // Resource name of the project and the triggeredJob, for example + // `projects/dlp-test-project/jobTriggers/53234423`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetJobTriggerRequest) Reset() { *m = GetJobTriggerRequest{} } +func (m *GetJobTriggerRequest) String() string { return proto.CompactTextString(m) } +func (*GetJobTriggerRequest) ProtoMessage() {} +func (*GetJobTriggerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{70} } + +func (m *GetJobTriggerRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for ListJobTriggers. +type ListJobTriggersRequest struct { + // The parent resource name, for example projects/my-project-id. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Optional page token to continue retrieval. Comes from previous call + // to ListJobTriggers. `order_by` and `filter` should not change for + // subsequent calls, but can be omitted if token is specified. + PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // Optional size of the page, can be limited by a server. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Optional comma separated list of triggeredJob fields to order by, + // followed by 'asc/desc' postfix, i.e. + // `"create_time asc,name desc,schedule_mode asc"`. This list is + // case-insensitive. + // + // Example: `"name asc,schedule_mode desc, status desc"` + // + // Supported filters keys and values are: + // + // - `create_time`: corresponds to time the triggeredJob was created. + // - `update_time`: corresponds to time the triggeredJob was last updated. + // - `name`: corresponds to JobTrigger's display name. + // - `status`: corresponds to the triggeredJob status. + OrderBy string `protobuf:"bytes,4,opt,name=order_by,json=orderBy" json:"order_by,omitempty"` +} + +func (m *ListJobTriggersRequest) Reset() { *m = ListJobTriggersRequest{} } +func (m *ListJobTriggersRequest) String() string { return proto.CompactTextString(m) } +func (*ListJobTriggersRequest) ProtoMessage() {} +func (*ListJobTriggersRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{71} } + +func (m *ListJobTriggersRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListJobTriggersRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListJobTriggersRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListJobTriggersRequest) GetOrderBy() string { + if m != nil { + return m.OrderBy + } + return "" +} + +// Response message for ListJobTriggers. +type ListJobTriggersResponse struct { + // List of triggeredJobs, up to page_size in ListJobTriggersRequest. + JobTriggers []*JobTrigger `protobuf:"bytes,1,rep,name=job_triggers,json=jobTriggers" json:"job_triggers,omitempty"` + // If the next page is available then the next page token to be used + // in following ListJobTriggers request. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListJobTriggersResponse) Reset() { *m = ListJobTriggersResponse{} } +func (m *ListJobTriggersResponse) String() string { return proto.CompactTextString(m) } +func (*ListJobTriggersResponse) ProtoMessage() {} +func (*ListJobTriggersResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{72} } + +func (m *ListJobTriggersResponse) GetJobTriggers() []*JobTrigger { + if m != nil { + return m.JobTriggers + } + return nil +} + +func (m *ListJobTriggersResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request message for DeleteJobTrigger. +type DeleteJobTriggerRequest struct { + // Resource name of the project and the triggeredJob, for example + // `projects/dlp-test-project/jobTriggers/53234423`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteJobTriggerRequest) Reset() { *m = DeleteJobTriggerRequest{} } +func (m *DeleteJobTriggerRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteJobTriggerRequest) ProtoMessage() {} +func (*DeleteJobTriggerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{73} } + +func (m *DeleteJobTriggerRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + type InspectJobConfig struct { // The data to scan. StorageConfig *StorageConfig `protobuf:"bytes,1,opt,name=storage_config,json=storageConfig" json:"storage_config,omitempty"` @@ -5795,12 +7042,15 @@ type InspectJobConfig struct { // `inspect_config` will be merged into the values persisted as part of the // template. InspectTemplateName string `protobuf:"bytes,4,opt,name=inspect_template_name,json=inspectTemplateName" json:"inspect_template_name,omitempty"` + // Actions to execute at the completion of the job. Are executed in the order + // provided. + Actions []*Action `protobuf:"bytes,5,rep,name=actions" json:"actions,omitempty"` } func (m *InspectJobConfig) Reset() { *m = InspectJobConfig{} } func (m *InspectJobConfig) String() string { return proto.CompactTextString(m) } func (*InspectJobConfig) ProtoMessage() {} -func (*InspectJobConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{62} } +func (*InspectJobConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{74} } func (m *InspectJobConfig) GetStorageConfig() *StorageConfig { if m != nil { @@ -5830,6 +7080,13 @@ func (m *InspectJobConfig) GetInspectTemplateName() string { return "" } +func (m *InspectJobConfig) GetActions() []*Action { + if m != nil { + return m.Actions + } + return nil +} + // Combines all of the information about a DLP job. type DlpJob struct { // The server-assigned name. @@ -5850,12 +7107,15 @@ type DlpJob struct { EndTime *google_protobuf1.Timestamp `protobuf:"bytes,8,opt,name=end_time,json=endTime" json:"end_time,omitempty"` // A stream of errors encountered running the job. ErrorResults []*google_rpc.Status `protobuf:"bytes,9,rep,name=error_results,json=errorResults" json:"error_results,omitempty"` + // If created by a job trigger, the resource name of the trigger that + // instantiated the job. + JobTriggerName string `protobuf:"bytes,10,opt,name=job_trigger_name,json=jobTriggerName" json:"job_trigger_name,omitempty"` } func (m *DlpJob) Reset() { *m = DlpJob{} } func (m *DlpJob) String() string { return proto.CompactTextString(m) } func (*DlpJob) ProtoMessage() {} -func (*DlpJob) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{63} } +func (*DlpJob) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{75} } type isDlpJob_Details interface { isDlpJob_Details() @@ -5941,6 +7201,13 @@ func (m *DlpJob) GetErrorResults() []*google_rpc.Status { return nil } +func (m *DlpJob) GetJobTriggerName() string { + if m != nil { + return m.JobTriggerName + } + return "" +} + // XXX_OneofFuncs is for the internal use of the proto package. func (*DlpJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _DlpJob_OneofMarshaler, _DlpJob_OneofUnmarshaler, _DlpJob_OneofSizer, []interface{}{ @@ -6024,7 +7291,7 @@ type GetDlpJobRequest struct { func (m *GetDlpJobRequest) Reset() { *m = GetDlpJobRequest{} } func (m *GetDlpJobRequest) String() string { return proto.CompactTextString(m) } func (*GetDlpJobRequest) ProtoMessage() {} -func (*GetDlpJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{64} } +func (*GetDlpJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{76} } func (m *GetDlpJobRequest) GetName() string { if m != nil { @@ -6048,6 +7315,7 @@ type ListDlpJobsRequest struct { // * Supported fields/values for inspect jobs: // - `state` - PENDING|RUNNING|CANCELED|FINISHED|FAILED // - `inspected_storage` - DATASTORE|CLOUD_STORAGE|BIGQUERY + // - `trigger_name` - The resource name of the trigger that created job. // * Supported fields for risk analysis jobs: // - `state` - RUNNING|CANCELED|FINISHED|FAILED // * The operator must be `=` or `!=`. @@ -6071,7 +7339,7 @@ type ListDlpJobsRequest struct { func (m *ListDlpJobsRequest) Reset() { *m = ListDlpJobsRequest{} } func (m *ListDlpJobsRequest) String() string { return proto.CompactTextString(m) } func (*ListDlpJobsRequest) ProtoMessage() {} -func (*ListDlpJobsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{65} } +func (*ListDlpJobsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{77} } func (m *ListDlpJobsRequest) GetParent() string { if m != nil { @@ -6119,7 +7387,7 @@ type ListDlpJobsResponse struct { func (m *ListDlpJobsResponse) Reset() { *m = ListDlpJobsResponse{} } func (m *ListDlpJobsResponse) String() string { return proto.CompactTextString(m) } func (*ListDlpJobsResponse) ProtoMessage() {} -func (*ListDlpJobsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{66} } +func (*ListDlpJobsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{78} } func (m *ListDlpJobsResponse) GetJobs() []*DlpJob { if m != nil { @@ -6144,7 +7412,7 @@ type CancelDlpJobRequest struct { func (m *CancelDlpJobRequest) Reset() { *m = CancelDlpJobRequest{} } func (m *CancelDlpJobRequest) String() string { return proto.CompactTextString(m) } func (*CancelDlpJobRequest) ProtoMessage() {} -func (*CancelDlpJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{67} } +func (*CancelDlpJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{79} } func (m *CancelDlpJobRequest) GetName() string { if m != nil { @@ -6162,7 +7430,7 @@ type DeleteDlpJobRequest struct { func (m *DeleteDlpJobRequest) Reset() { *m = DeleteDlpJobRequest{} } func (m *DeleteDlpJobRequest) String() string { return proto.CompactTextString(m) } func (*DeleteDlpJobRequest) ProtoMessage() {} -func (*DeleteDlpJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{68} } +func (*DeleteDlpJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{80} } func (m *DeleteDlpJobRequest) GetName() string { if m != nil { @@ -6189,7 +7457,7 @@ func (m *CreateDeidentifyTemplateRequest) Reset() { *m = CreateDeidentif func (m *CreateDeidentifyTemplateRequest) String() string { return proto.CompactTextString(m) } func (*CreateDeidentifyTemplateRequest) ProtoMessage() {} func (*CreateDeidentifyTemplateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{69} + return fileDescriptor0, []int{81} } func (m *CreateDeidentifyTemplateRequest) GetParent() string { @@ -6216,7 +7484,8 @@ func (m *CreateDeidentifyTemplateRequest) GetTemplateId() string { // Request message for UpdateDeidentifyTemplate. type UpdateDeidentifyTemplateRequest struct { // Resource name of organization and deidentify template to be updated, for - // example `organizations/433245324/deidentifyTemplates/432452342`. + // example `organizations/433245324/deidentifyTemplates/432452342` or + // projects/project-id/deidentifyTemplates/432452342. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // New DeidentifyTemplate value. DeidentifyTemplate *DeidentifyTemplate `protobuf:"bytes,2,opt,name=deidentify_template,json=deidentifyTemplate" json:"deidentify_template,omitempty"` @@ -6228,7 +7497,7 @@ func (m *UpdateDeidentifyTemplateRequest) Reset() { *m = UpdateDeidentif func (m *UpdateDeidentifyTemplateRequest) String() string { return proto.CompactTextString(m) } func (*UpdateDeidentifyTemplateRequest) ProtoMessage() {} func (*UpdateDeidentifyTemplateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{70} + return fileDescriptor0, []int{82} } func (m *UpdateDeidentifyTemplateRequest) GetName() string { @@ -6255,14 +7524,15 @@ func (m *UpdateDeidentifyTemplateRequest) GetUpdateMask() *google_protobuf4.Fiel // Request message for GetDeidentifyTemplate. type GetDeidentifyTemplateRequest struct { // Resource name of the organization and deidentify template to be read, for - // example `organizations/433245324/deidentifyTemplates/432452342`. + // example `organizations/433245324/deidentifyTemplates/432452342` or + // projects/project-id/deidentifyTemplates/432452342. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` } func (m *GetDeidentifyTemplateRequest) Reset() { *m = GetDeidentifyTemplateRequest{} } func (m *GetDeidentifyTemplateRequest) String() string { return proto.CompactTextString(m) } func (*GetDeidentifyTemplateRequest) ProtoMessage() {} -func (*GetDeidentifyTemplateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{71} } +func (*GetDeidentifyTemplateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{83} } func (m *GetDeidentifyTemplateRequest) GetName() string { if m != nil { @@ -6287,7 +7557,7 @@ type ListDeidentifyTemplatesRequest struct { func (m *ListDeidentifyTemplatesRequest) Reset() { *m = ListDeidentifyTemplatesRequest{} } func (m *ListDeidentifyTemplatesRequest) String() string { return proto.CompactTextString(m) } func (*ListDeidentifyTemplatesRequest) ProtoMessage() {} -func (*ListDeidentifyTemplatesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{72} } +func (*ListDeidentifyTemplatesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{84} } func (m *ListDeidentifyTemplatesRequest) GetParent() string { if m != nil { @@ -6324,7 +7594,7 @@ func (m *ListDeidentifyTemplatesResponse) Reset() { *m = ListDeidentifyT func (m *ListDeidentifyTemplatesResponse) String() string { return proto.CompactTextString(m) } func (*ListDeidentifyTemplatesResponse) ProtoMessage() {} func (*ListDeidentifyTemplatesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{73} + return fileDescriptor0, []int{85} } func (m *ListDeidentifyTemplatesResponse) GetDeidentifyTemplates() []*DeidentifyTemplate { @@ -6344,7 +7614,8 @@ func (m *ListDeidentifyTemplatesResponse) GetNextPageToken() string { // Request message for DeleteDeidentifyTemplate. type DeleteDeidentifyTemplateRequest struct { // Resource name of the organization and deidentify template to be deleted, - // for example `organizations/433245324/deidentifyTemplates/432452342`. + // for example `organizations/433245324/deidentifyTemplates/432452342` or + // projects/project-id/deidentifyTemplates/432452342. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` } @@ -6352,7 +7623,7 @@ func (m *DeleteDeidentifyTemplateRequest) Reset() { *m = DeleteDeidentif func (m *DeleteDeidentifyTemplateRequest) String() string { return proto.CompactTextString(m) } func (*DeleteDeidentifyTemplateRequest) ProtoMessage() {} func (*DeleteDeidentifyTemplateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{74} + return fileDescriptor0, []int{86} } func (m *DeleteDeidentifyTemplateRequest) GetName() string { @@ -6420,6 +7691,9 @@ func init() { proto.RegisterType((*AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationHistogramBucket)(nil), "google.privacy.dlp.v2beta2.AnalyzeDataSourceRiskDetails.KMapEstimationResult.KMapEstimationHistogramBucket") proto.RegisterType((*ValueFrequency)(nil), "google.privacy.dlp.v2beta2.ValueFrequency") proto.RegisterType((*Value)(nil), "google.privacy.dlp.v2beta2.Value") + proto.RegisterType((*QuoteInfo)(nil), "google.privacy.dlp.v2beta2.QuoteInfo") + proto.RegisterType((*DateTime)(nil), "google.privacy.dlp.v2beta2.DateTime") + proto.RegisterType((*DateTime_TimeZone)(nil), "google.privacy.dlp.v2beta2.DateTime.TimeZone") proto.RegisterType((*DeidentifyConfig)(nil), "google.privacy.dlp.v2beta2.DeidentifyConfig") proto.RegisterType((*PrimitiveTransformation)(nil), "google.privacy.dlp.v2beta2.PrimitiveTransformation") proto.RegisterType((*TimePartConfig)(nil), "google.privacy.dlp.v2beta2.TimePartConfig") @@ -6437,6 +7711,7 @@ func init() { proto.RegisterType((*TransientCryptoKey)(nil), "google.privacy.dlp.v2beta2.TransientCryptoKey") proto.RegisterType((*UnwrappedCryptoKey)(nil), "google.privacy.dlp.v2beta2.UnwrappedCryptoKey") proto.RegisterType((*KmsWrappedCryptoKey)(nil), "google.privacy.dlp.v2beta2.KmsWrappedCryptoKey") + proto.RegisterType((*DateShiftConfig)(nil), "google.privacy.dlp.v2beta2.DateShiftConfig") proto.RegisterType((*InfoTypeTransformations)(nil), "google.privacy.dlp.v2beta2.InfoTypeTransformations") proto.RegisterType((*InfoTypeTransformations_InfoTypeTransformation)(nil), "google.privacy.dlp.v2beta2.InfoTypeTransformations.InfoTypeTransformation") proto.RegisterType((*FieldTransformation)(nil), "google.privacy.dlp.v2beta2.FieldTransformation") @@ -6449,14 +7724,27 @@ func init() { proto.RegisterType((*TransformationOverview)(nil), "google.privacy.dlp.v2beta2.TransformationOverview") proto.RegisterType((*TransformationSummary)(nil), "google.privacy.dlp.v2beta2.TransformationSummary") proto.RegisterType((*TransformationSummary_SummaryResult)(nil), "google.privacy.dlp.v2beta2.TransformationSummary.SummaryResult") + proto.RegisterType((*Schedule)(nil), "google.privacy.dlp.v2beta2.Schedule") proto.RegisterType((*InspectTemplate)(nil), "google.privacy.dlp.v2beta2.InspectTemplate") proto.RegisterType((*DeidentifyTemplate)(nil), "google.privacy.dlp.v2beta2.DeidentifyTemplate") + proto.RegisterType((*JobTrigger)(nil), "google.privacy.dlp.v2beta2.JobTrigger") + proto.RegisterType((*JobTrigger_Trigger)(nil), "google.privacy.dlp.v2beta2.JobTrigger.Trigger") + proto.RegisterType((*JobTrigger_Error)(nil), "google.privacy.dlp.v2beta2.JobTrigger.Error") + proto.RegisterType((*Action)(nil), "google.privacy.dlp.v2beta2.Action") + proto.RegisterType((*Action_SaveFindings)(nil), "google.privacy.dlp.v2beta2.Action.SaveFindings") + proto.RegisterType((*Action_PublishToPubSub)(nil), "google.privacy.dlp.v2beta2.Action.PublishToPubSub") proto.RegisterType((*CreateInspectTemplateRequest)(nil), "google.privacy.dlp.v2beta2.CreateInspectTemplateRequest") proto.RegisterType((*UpdateInspectTemplateRequest)(nil), "google.privacy.dlp.v2beta2.UpdateInspectTemplateRequest") proto.RegisterType((*GetInspectTemplateRequest)(nil), "google.privacy.dlp.v2beta2.GetInspectTemplateRequest") proto.RegisterType((*ListInspectTemplatesRequest)(nil), "google.privacy.dlp.v2beta2.ListInspectTemplatesRequest") proto.RegisterType((*ListInspectTemplatesResponse)(nil), "google.privacy.dlp.v2beta2.ListInspectTemplatesResponse") proto.RegisterType((*DeleteInspectTemplateRequest)(nil), "google.privacy.dlp.v2beta2.DeleteInspectTemplateRequest") + proto.RegisterType((*CreateJobTriggerRequest)(nil), "google.privacy.dlp.v2beta2.CreateJobTriggerRequest") + proto.RegisterType((*UpdateJobTriggerRequest)(nil), "google.privacy.dlp.v2beta2.UpdateJobTriggerRequest") + proto.RegisterType((*GetJobTriggerRequest)(nil), "google.privacy.dlp.v2beta2.GetJobTriggerRequest") + proto.RegisterType((*ListJobTriggersRequest)(nil), "google.privacy.dlp.v2beta2.ListJobTriggersRequest") + proto.RegisterType((*ListJobTriggersResponse)(nil), "google.privacy.dlp.v2beta2.ListJobTriggersResponse") + proto.RegisterType((*DeleteJobTriggerRequest)(nil), "google.privacy.dlp.v2beta2.DeleteJobTriggerRequest") proto.RegisterType((*InspectJobConfig)(nil), "google.privacy.dlp.v2beta2.InspectJobConfig") proto.RegisterType((*DlpJob)(nil), "google.privacy.dlp.v2beta2.DlpJob") proto.RegisterType((*GetDlpJobRequest)(nil), "google.privacy.dlp.v2beta2.GetDlpJobRequest") @@ -6473,11 +7761,13 @@ func init() { proto.RegisterEnum("google.privacy.dlp.v2beta2.InfoTypeSupportedBy", InfoTypeSupportedBy_name, InfoTypeSupportedBy_value) proto.RegisterEnum("google.privacy.dlp.v2beta2.RelationalOperator", RelationalOperator_name, RelationalOperator_value) proto.RegisterEnum("google.privacy.dlp.v2beta2.DlpJobType", DlpJobType_name, DlpJobType_value) + proto.RegisterEnum("google.privacy.dlp.v2beta2.OutputStorageConfig_OutputSchema", OutputStorageConfig_OutputSchema_name, OutputStorageConfig_OutputSchema_value) proto.RegisterEnum("google.privacy.dlp.v2beta2.TimePartConfig_TimePart", TimePartConfig_TimePart_name, TimePartConfig_TimePart_value) proto.RegisterEnum("google.privacy.dlp.v2beta2.CharsToIgnore_CommonCharsToIgnore", CharsToIgnore_CommonCharsToIgnore_name, CharsToIgnore_CommonCharsToIgnore_value) proto.RegisterEnum("google.privacy.dlp.v2beta2.CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet", CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet_name, CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet_value) proto.RegisterEnum("google.privacy.dlp.v2beta2.RecordCondition_Expressions_LogicalOperator", RecordCondition_Expressions_LogicalOperator_name, RecordCondition_Expressions_LogicalOperator_value) proto.RegisterEnum("google.privacy.dlp.v2beta2.TransformationSummary_TransformationResultCode", TransformationSummary_TransformationResultCode_name, TransformationSummary_TransformationResultCode_value) + proto.RegisterEnum("google.privacy.dlp.v2beta2.JobTrigger_Status", JobTrigger_Status_name, JobTrigger_Status_value) proto.RegisterEnum("google.privacy.dlp.v2beta2.DlpJob_JobState", DlpJob_JobState_name, DlpJob_JobState_value) } @@ -6511,7 +7801,7 @@ type DlpServiceClient interface { // repository. [How-to guide](/dlp/docs/inspecting-storage) InspectDataSource(ctx context.Context, in *InspectDataSourceRequest, opts ...grpc.CallOption) (*DlpJob, error) // Schedules a job to compute risk analysis metrics over content in a Google - // Cloud Platform repository. [How-to guide}(/dlp/docs/compute-risk-analysis) + // Cloud Platform repository. [How-to guide](/dlp/docs/compute-risk-analysis) AnalyzeDataSourceRisk(ctx context.Context, in *AnalyzeDataSourceRiskRequest, opts ...grpc.CallOption) (*DlpJob, error) // Returns sensitive information types DLP supports. ListInfoTypes(ctx context.Context, in *ListInfoTypesRequest, opts ...grpc.CallOption) (*ListInfoTypesResponse, error) @@ -6537,6 +7827,17 @@ type DlpServiceClient interface { ListDeidentifyTemplates(ctx context.Context, in *ListDeidentifyTemplatesRequest, opts ...grpc.CallOption) (*ListDeidentifyTemplatesResponse, error) // Deletes inspect templates. DeleteDeidentifyTemplate(ctx context.Context, in *DeleteDeidentifyTemplateRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) + // Creates a job to run DLP actions such as scanning storage for sensitive + // information on a set schedule. + CreateJobTrigger(ctx context.Context, in *CreateJobTriggerRequest, opts ...grpc.CallOption) (*JobTrigger, error) + // Updates a job trigger. + UpdateJobTrigger(ctx context.Context, in *UpdateJobTriggerRequest, opts ...grpc.CallOption) (*JobTrigger, error) + // Gets a job trigger. + GetJobTrigger(ctx context.Context, in *GetJobTriggerRequest, opts ...grpc.CallOption) (*JobTrigger, error) + // Lists job triggers. + ListJobTriggers(ctx context.Context, in *ListJobTriggersRequest, opts ...grpc.CallOption) (*ListJobTriggersResponse, error) + // Deletes a job trigger. + DeleteJobTrigger(ctx context.Context, in *DeleteJobTriggerRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) // Lists DlpJobs that match the specified filter in the request. ListDlpJobs(ctx context.Context, in *ListDlpJobsRequest, opts ...grpc.CallOption) (*ListDlpJobsResponse, error) // Gets the latest state of a long-running DlpJob. @@ -6712,6 +8013,51 @@ func (c *dlpServiceClient) DeleteDeidentifyTemplate(ctx context.Context, in *Del return out, nil } +func (c *dlpServiceClient) CreateJobTrigger(ctx context.Context, in *CreateJobTriggerRequest, opts ...grpc.CallOption) (*JobTrigger, error) { + out := new(JobTrigger) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2beta2.DlpService/CreateJobTrigger", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) UpdateJobTrigger(ctx context.Context, in *UpdateJobTriggerRequest, opts ...grpc.CallOption) (*JobTrigger, error) { + out := new(JobTrigger) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2beta2.DlpService/UpdateJobTrigger", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) GetJobTrigger(ctx context.Context, in *GetJobTriggerRequest, opts ...grpc.CallOption) (*JobTrigger, error) { + out := new(JobTrigger) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2beta2.DlpService/GetJobTrigger", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) ListJobTriggers(ctx context.Context, in *ListJobTriggersRequest, opts ...grpc.CallOption) (*ListJobTriggersResponse, error) { + out := new(ListJobTriggersResponse) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2beta2.DlpService/ListJobTriggers", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) DeleteJobTrigger(ctx context.Context, in *DeleteJobTriggerRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) { + out := new(google_protobuf3.Empty) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2beta2.DlpService/DeleteJobTrigger", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *dlpServiceClient) ListDlpJobs(ctx context.Context, in *ListDlpJobsRequest, opts ...grpc.CallOption) (*ListDlpJobsResponse, error) { out := new(ListDlpJobsResponse) err := grpc.Invoke(ctx, "/google.privacy.dlp.v2beta2.DlpService/ListDlpJobs", in, out, c.cc, opts...) @@ -6770,7 +8116,7 @@ type DlpServiceServer interface { // repository. [How-to guide](/dlp/docs/inspecting-storage) InspectDataSource(context.Context, *InspectDataSourceRequest) (*DlpJob, error) // Schedules a job to compute risk analysis metrics over content in a Google - // Cloud Platform repository. [How-to guide}(/dlp/docs/compute-risk-analysis) + // Cloud Platform repository. [How-to guide](/dlp/docs/compute-risk-analysis) AnalyzeDataSourceRisk(context.Context, *AnalyzeDataSourceRiskRequest) (*DlpJob, error) // Returns sensitive information types DLP supports. ListInfoTypes(context.Context, *ListInfoTypesRequest) (*ListInfoTypesResponse, error) @@ -6796,6 +8142,17 @@ type DlpServiceServer interface { ListDeidentifyTemplates(context.Context, *ListDeidentifyTemplatesRequest) (*ListDeidentifyTemplatesResponse, error) // Deletes inspect templates. DeleteDeidentifyTemplate(context.Context, *DeleteDeidentifyTemplateRequest) (*google_protobuf3.Empty, error) + // Creates a job to run DLP actions such as scanning storage for sensitive + // information on a set schedule. + CreateJobTrigger(context.Context, *CreateJobTriggerRequest) (*JobTrigger, error) + // Updates a job trigger. + UpdateJobTrigger(context.Context, *UpdateJobTriggerRequest) (*JobTrigger, error) + // Gets a job trigger. + GetJobTrigger(context.Context, *GetJobTriggerRequest) (*JobTrigger, error) + // Lists job triggers. + ListJobTriggers(context.Context, *ListJobTriggersRequest) (*ListJobTriggersResponse, error) + // Deletes a job trigger. + DeleteJobTrigger(context.Context, *DeleteJobTriggerRequest) (*google_protobuf3.Empty, error) // Lists DlpJobs that match the specified filter in the request. ListDlpJobs(context.Context, *ListDlpJobsRequest) (*ListDlpJobsResponse, error) // Gets the latest state of a long-running DlpJob. @@ -7120,6 +8477,96 @@ func _DlpService_DeleteDeidentifyTemplate_Handler(srv interface{}, ctx context.C return interceptor(ctx, in, info, handler) } +func _DlpService_CreateJobTrigger_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateJobTriggerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).CreateJobTrigger(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2beta2.DlpService/CreateJobTrigger", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).CreateJobTrigger(ctx, req.(*CreateJobTriggerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_UpdateJobTrigger_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateJobTriggerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).UpdateJobTrigger(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2beta2.DlpService/UpdateJobTrigger", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).UpdateJobTrigger(ctx, req.(*UpdateJobTriggerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_GetJobTrigger_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetJobTriggerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).GetJobTrigger(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2beta2.DlpService/GetJobTrigger", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).GetJobTrigger(ctx, req.(*GetJobTriggerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_ListJobTriggers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListJobTriggersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).ListJobTriggers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2beta2.DlpService/ListJobTriggers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).ListJobTriggers(ctx, req.(*ListJobTriggersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_DeleteJobTrigger_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteJobTriggerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).DeleteJobTrigger(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2beta2.DlpService/DeleteJobTrigger", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).DeleteJobTrigger(ctx, req.(*DeleteJobTriggerRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _DlpService_ListDlpJobs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ListDlpJobsRequest) if err := dec(in); err != nil { @@ -7264,6 +8711,26 @@ var _DlpService_serviceDesc = grpc.ServiceDesc{ MethodName: "DeleteDeidentifyTemplate", Handler: _DlpService_DeleteDeidentifyTemplate_Handler, }, + { + MethodName: "CreateJobTrigger", + Handler: _DlpService_CreateJobTrigger_Handler, + }, + { + MethodName: "UpdateJobTrigger", + Handler: _DlpService_UpdateJobTrigger_Handler, + }, + { + MethodName: "GetJobTrigger", + Handler: _DlpService_GetJobTrigger_Handler, + }, + { + MethodName: "ListJobTriggers", + Handler: _DlpService_ListJobTriggers_Handler, + }, + { + MethodName: "DeleteJobTrigger", + Handler: _DlpService_DeleteJobTrigger_Handler, + }, { MethodName: "ListDlpJobs", Handler: _DlpService_ListDlpJobs_Handler, @@ -7288,417 +8755,488 @@ var _DlpService_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("google/privacy/dlp/v2beta2/dlp.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 6587 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x7d, 0x6b, 0x6c, 0x1b, 0xd9, - 0x75, 0xb0, 0x86, 0x14, 0x25, 0xf2, 0x90, 0x94, 0xa8, 0xab, 0x87, 0x65, 0xda, 0x5e, 0x7b, 0x67, - 0x77, 0x1d, 0xaf, 0x77, 0x23, 0x65, 0xb5, 0xeb, 0x38, 0x6b, 0x7f, 0xde, 0x2c, 0x45, 0xd1, 0x96, - 0x6c, 0x49, 0xd4, 0x0e, 0x29, 0xaf, 0x9d, 0xdd, 0x6f, 0xe7, 0x1b, 0x91, 0x57, 0xd4, 0xac, 0xc8, - 0x19, 0x7a, 0x66, 0x68, 0x49, 0x9b, 0x2f, 0x5f, 0xbe, 0xb6, 0x48, 0x91, 0x34, 0x40, 0x1f, 0x48, - 0x80, 0x2d, 0x90, 0x02, 0x49, 0x80, 0x04, 0x48, 0x82, 0x22, 0x7d, 0xa4, 0x68, 0xd1, 0x3c, 0x80, - 0x34, 0x41, 0xd1, 0xf7, 0x8f, 0x22, 0x40, 0x81, 0xa2, 0x28, 0x5a, 0xb4, 0x05, 0x8a, 0x3e, 0x50, - 0x14, 0xfd, 0xd1, 0x7f, 0x05, 0x8a, 0xfb, 0x98, 0x27, 0x87, 0x43, 0x52, 0x92, 0xd1, 0x36, 0xff, - 0x38, 0xe7, 0x9e, 0x73, 0xee, 0xb9, 0xe7, 0x9e, 0x7b, 0xee, 0xb9, 0xe7, 0x3e, 0x08, 0xcf, 0x36, - 0x74, 0xbd, 0xd1, 0xc4, 0x8b, 0x6d, 0x43, 0x7d, 0xac, 0xd4, 0x8e, 0x16, 0xeb, 0xcd, 0xf6, 0xe2, - 0xe3, 0xa5, 0x1d, 0x6c, 0x29, 0x4b, 0xe4, 0xf7, 0x42, 0xdb, 0xd0, 0x2d, 0x1d, 0xe5, 0x19, 0xd6, - 0x02, 0xc7, 0x5a, 0x20, 0x25, 0x1c, 0x2b, 0x7f, 0x9e, 0x73, 0x50, 0xda, 0xea, 0xa2, 0xa2, 0x69, - 0xba, 0xa5, 0x58, 0xaa, 0xae, 0x99, 0x8c, 0x32, 0x7f, 0x25, 0x82, 0xbf, 0x69, 0xe9, 0x86, 0xd2, - 0xc0, 0x1c, 0xf3, 0x29, 0x07, 0x53, 0xb7, 0xf4, 0x9d, 0xce, 0xee, 0x62, 0xbd, 0x63, 0x50, 0x56, - 0xbc, 0xfc, 0x5c, 0xb0, 0x1c, 0xb7, 0xda, 0xd6, 0x11, 0x2f, 0xbc, 0x14, 0x2c, 0xdc, 0x55, 0x71, - 0xb3, 0x2e, 0xb7, 0x14, 0x73, 0x9f, 0x63, 0x5c, 0x0c, 0x62, 0x58, 0x6a, 0x0b, 0x9b, 0x96, 0xd2, - 0xe2, 0x6d, 0xcc, 0x9f, 0xe1, 0x08, 0x46, 0xbb, 0xb6, 0x68, 0x5a, 0x8a, 0xd5, 0xb1, 0x9b, 0x30, - 0xc7, 0x0b, 0xac, 0xa3, 0x36, 0x5e, 0xac, 0x2b, 0x16, 0x0e, 0x08, 0xc4, 0xe1, 0x47, 0xfa, 0xee, - 0x01, 0xc6, 0xfb, 0x61, 0x85, 0xa4, 0x2a, 0x7d, 0xb7, 0xae, 0x70, 0x69, 0xc5, 0x5f, 0x1c, 0x83, - 0xec, 0x9a, 0x66, 0xb6, 0x71, 0xcd, 0x2a, 0xea, 0xda, 0xae, 0xda, 0x40, 0x45, 0x00, 0x55, 0xdb, - 0xd5, 0x65, 0x82, 0x6e, 0xce, 0x0b, 0x97, 0xe2, 0x57, 0xd2, 0x4b, 0xcf, 0x2e, 0xf4, 0xd6, 0xfa, - 0xc2, 0x9a, 0xb6, 0xab, 0x57, 0x8f, 0xda, 0x58, 0x4a, 0xa9, 0xfc, 0x97, 0x89, 0x36, 0x60, 0xa2, - 0xa5, 0x6a, 0x72, 0x53, 0xdd, 0xc7, 0x4d, 0x75, 0x4f, 0xd7, 0xeb, 0xf3, 0xb1, 0x4b, 0xc2, 0x95, - 0x89, 0xa5, 0xcb, 0x51, 0x8c, 0xd6, 0x1d, 0x6c, 0x29, 0xdb, 0x52, 0x35, 0xf7, 0x13, 0x95, 0x61, - 0xac, 0xa9, 0xb6, 0x54, 0xcb, 0x9c, 0x8f, 0x5f, 0x12, 0xae, 0xa4, 0x97, 0xae, 0x47, 0xcb, 0xe3, - 0x69, 0xce, 0xc2, 0x6d, 0x55, 0xab, 0xab, 0x5a, 0x63, 0x9d, 0x92, 0x4b, 0x9c, 0x0d, 0x7a, 0x06, - 0xb2, 0xaa, 0x56, 0x6b, 0x76, 0xea, 0x58, 0x7e, 0xd4, 0xd1, 0x2d, 0x3c, 0x3f, 0x7a, 0x49, 0xb8, - 0x92, 0x94, 0x32, 0x1c, 0xf8, 0x06, 0x81, 0xa1, 0x17, 0x01, 0xe1, 0x43, 0x86, 0xe4, 0xd1, 0x48, - 0x82, 0x62, 0xe6, 0x78, 0xc9, 0x9a, 0xd3, 0xe4, 0xfb, 0x30, 0x55, 0xeb, 0x98, 0x96, 0xde, 0xf2, - 0x22, 0x8f, 0x51, 0xf5, 0x5d, 0x8d, 0x12, 0xb7, 0x48, 0x89, 0x1c, 0x25, 0x4e, 0xd6, 0x7c, 0xdf, - 0x66, 0xfe, 0xdf, 0x63, 0x90, 0xf5, 0x35, 0x02, 0xbd, 0x04, 0xb3, 0x2d, 0xe5, 0x50, 0xde, 0x65, - 0x40, 0x53, 0x6e, 0x63, 0x43, 0x56, 0x2d, 0xdc, 0x9a, 0x17, 0x2e, 0x09, 0x57, 0x12, 0x12, 0x6a, - 0x29, 0x87, 0x9c, 0xc0, 0xdc, 0xc2, 0xc6, 0x9a, 0x85, 0x5b, 0xe8, 0x3a, 0xcc, 0x77, 0x91, 0x18, - 0xf8, 0x51, 0x07, 0x9b, 0x16, 0xed, 0x99, 0x84, 0x34, 0xeb, 0xa7, 0x92, 0x58, 0x21, 0xfa, 0x09, - 0x01, 0xf2, 0xdd, 0x95, 0xd9, 0x0d, 0x9c, 0x8f, 0xd3, 0xf6, 0x95, 0x8e, 0xd9, 0x1d, 0x8e, 0xe9, - 0xd0, 0x4f, 0x69, 0x2e, 0x20, 0x38, 0x2f, 0xcc, 0x77, 0x88, 0x89, 0x7a, 0x10, 0x51, 0x01, 0x52, - 0xae, 0x08, 0x02, 0xb5, 0x88, 0xc1, 0x2c, 0x34, 0x69, 0x5b, 0x28, 0x7a, 0x1a, 0x32, 0xde, 0x66, - 0x71, 0x25, 0xa4, 0x3d, 0x12, 0x88, 0xef, 0x0b, 0x90, 0x2e, 0xea, 0x9a, 0x85, 0x35, 0x8b, 0xea, - 0x10, 0xc1, 0xa8, 0x53, 0x61, 0x4a, 0xa2, 0xbf, 0xd1, 0x0c, 0x8c, 0xd6, 0x15, 0x4b, 0xa1, 0xe4, - 0x99, 0xd5, 0x11, 0x89, 0x7e, 0xa1, 0x39, 0x48, 0x3c, 0x56, 0x9a, 0x1d, 0x4c, 0xad, 0x35, 0xb5, - 0x3a, 0x22, 0xb1, 0x4f, 0xf4, 0x2a, 0x24, 0x2c, 0x65, 0xa7, 0xc9, 0xac, 0x2d, 0xbd, 0xf4, 0x74, - 0x94, 0xcc, 0x55, 0x82, 0x48, 0x48, 0x29, 0xc5, 0x72, 0x1a, 0x52, 0x84, 0x35, 0xed, 0x67, 0xf1, - 0x87, 0x02, 0x24, 0x68, 0x39, 0xba, 0x05, 0xe3, 0x7b, 0x58, 0xa9, 0x63, 0xc3, 0x1e, 0xa9, 0xcf, - 0x44, 0xf1, 0xbc, 0x4d, 0x3c, 0xd1, 0x5a, 0x5d, 0xb2, 0x69, 0xd0, 0xab, 0x30, 0x6a, 0xe8, 0x07, - 0xa4, 0xf5, 0x84, 0xf6, 0xb9, 0xbe, 0xf2, 0x2c, 0x48, 0xfa, 0x81, 0x44, 0x49, 0xf2, 0xaf, 0x43, - 0x5c, 0xd2, 0x0f, 0xd0, 0xab, 0x30, 0x46, 0xdb, 0x66, 0xd7, 0x1f, 0xd9, 0xa6, 0xfb, 0x04, 0x53, - 0xe2, 0x04, 0xe2, 0x27, 0x1d, 0xcf, 0x23, 0x61, 0xb3, 0xd3, 0xb4, 0xd0, 0x47, 0x21, 0xe9, 0xf4, - 0xc7, 0x40, 0xad, 0xa1, 0xb8, 0x92, 0x43, 0x84, 0x3e, 0x08, 0xc8, 0xb1, 0x53, 0xcb, 0xe8, 0x68, - 0x35, 0xc5, 0xc2, 0xcc, 0xf3, 0x24, 0xa5, 0x29, 0xbb, 0xa4, 0x6a, 0x17, 0x88, 0x5f, 0x8f, 0xc1, - 0x38, 0x67, 0x82, 0x66, 0x20, 0xc1, 0x1c, 0x01, 0xeb, 0x5d, 0xf6, 0xe1, 0x37, 0xb4, 0xd8, 0xb1, - 0x0c, 0xed, 0x36, 0x80, 0xc7, 0x0b, 0xc6, 0x87, 0xf2, 0x82, 0x1e, 0x4a, 0xf4, 0x3a, 0x24, 0x9b, - 0x7a, 0x8d, 0xce, 0x42, 0xdc, 0x7c, 0x22, 0x25, 0x59, 0xe7, 0xb8, 0x92, 0x43, 0x85, 0x6e, 0x42, - 0xba, 0x66, 0x60, 0xc5, 0xc2, 0x32, 0x99, 0x04, 0xe6, 0xc7, 0x28, 0x93, 0xbc, 0xcb, 0x84, 0x4d, - 0x46, 0x0b, 0x55, 0x7b, 0x32, 0x92, 0x80, 0xa1, 0x13, 0x80, 0xf8, 0xdd, 0x38, 0x24, 0x6d, 0x9e, - 0xe8, 0x75, 0x80, 0x9d, 0x23, 0x0b, 0xcb, 0x86, 0xa2, 0x35, 0xec, 0x01, 0x18, 0xd9, 0xf1, 0x12, - 0x41, 0x94, 0x52, 0x84, 0x88, 0xfe, 0x44, 0x77, 0x61, 0xb2, 0xa6, 0xd7, 0x71, 0x5b, 0x57, 0x35, - 0x8b, 0xb3, 0x89, 0x0d, 0xca, 0x66, 0xc2, 0xa1, 0xb4, 0x79, 0xa5, 0xd5, 0x96, 0xd2, 0xc0, 0xf2, - 0x8e, 0x7e, 0x88, 0x4d, 0xee, 0x92, 0x9e, 0x8f, 0xec, 0x26, 0x82, 0xee, 0x68, 0x08, 0x28, 0xf5, - 0x32, 0x21, 0x46, 0x2b, 0x00, 0x06, 0xae, 0xe9, 0x46, 0x5d, 0xde, 0xc7, 0x47, 0x5c, 0xcf, 0x91, - 0xc3, 0x42, 0xa2, 0xd8, 0xf7, 0xf0, 0x91, 0x94, 0x32, 0xec, 0x9f, 0xe8, 0x35, 0x62, 0xc8, 0x64, - 0xd2, 0x57, 0xeb, 0x74, 0xba, 0x18, 0x74, 0x58, 0xee, 0xb2, 0x1f, 0x68, 0x0b, 0x26, 0xe8, 0xa8, - 0x97, 0x9d, 0x1e, 0x67, 0x9d, 0xf5, 0x7c, 0xdf, 0x01, 0xea, 0x34, 0x2a, 0x6b, 0x79, 0x3f, 0xc5, - 0x17, 0x21, 0xeb, 0x2b, 0x47, 0xe7, 0x20, 0x65, 0xe8, 0x07, 0xb2, 0xaa, 0xd5, 0xf1, 0x21, 0xed, - 0xc1, 0xb8, 0x94, 0x34, 0xf4, 0x83, 0x35, 0xf2, 0x2d, 0x2e, 0x42, 0x82, 0xa9, 0x76, 0x06, 0x12, - 0xa6, 0xa5, 0x18, 0x16, 0xc7, 0x60, 0x1f, 0x28, 0x07, 0x71, 0xac, 0xb1, 0x71, 0x15, 0x97, 0xc8, - 0x4f, 0xb1, 0x06, 0x59, 0x9f, 0x4e, 0x09, 0x8a, 0xa5, 0xb7, 0xf9, 0x84, 0x44, 0x7e, 0x12, 0xef, - 0xd9, 0xc4, 0xbb, 0xf6, 0x6c, 0x43, 0x7f, 0x13, 0xf6, 0x07, 0x6a, 0xdd, 0xda, 0xa3, 0xc3, 0x22, - 0x21, 0xb1, 0x0f, 0x34, 0x07, 0x63, 0x7b, 0x58, 0x6d, 0xec, 0x59, 0x54, 0xff, 0x09, 0x89, 0x7f, - 0x89, 0xef, 0x8f, 0x02, 0x92, 0x70, 0x5d, 0xa9, 0x59, 0xb4, 0x2e, 0x7b, 0x86, 0x9a, 0x83, 0xb1, - 0xb6, 0x62, 0x60, 0xcd, 0xe2, 0x43, 0x97, 0x7f, 0x11, 0x25, 0xaa, 0xcc, 0xbd, 0xc8, 0x35, 0x3a, - 0xf7, 0x70, 0x0b, 0x7b, 0x7e, 0xe0, 0xc9, 0x4a, 0xca, 0xaa, 0xbe, 0xc8, 0xe8, 0x02, 0x30, 0x53, - 0xb1, 0xa7, 0x3e, 0x52, 0x5b, 0x8a, 0x42, 0xe8, 0x48, 0x77, 0x8a, 0xe9, 0x8c, 0x40, 0x64, 0xcf, - 0xf0, 0xe2, 0x15, 0x32, 0x29, 0x7c, 0x1c, 0xce, 0xb0, 0x62, 0x83, 0xb6, 0x41, 0xd5, 0x35, 0x2e, - 0x17, 0x09, 0x29, 0x88, 0xc9, 0x16, 0xa3, 0xed, 0x2c, 0xd8, 0xf0, 0x05, 0xfe, 0xc1, 0x99, 0x71, - 0x91, 0x67, 0xd5, 0x10, 0xa8, 0x99, 0xff, 0x0b, 0x01, 0x66, 0xc2, 0xf0, 0x51, 0xf1, 0x98, 0x53, - 0xe9, 0xea, 0x88, 0xc7, 0xc7, 0x5d, 0x81, 0x49, 0xd6, 0x28, 0x59, 0x69, 0x36, 0x65, 0x0b, 0x1f, - 0xb2, 0x6e, 0x4e, 0xae, 0x8e, 0x48, 0x59, 0x56, 0x50, 0x68, 0x36, 0xab, 0xf8, 0xd0, 0x22, 0xe3, - 0xde, 0xdb, 0xfc, 0xa6, 0x6e, 0xf0, 0x88, 0x2e, 0x72, 0xdc, 0x17, 0x09, 0xa2, 0x34, 0x61, 0xb8, - 0xb2, 0x37, 0x75, 0x63, 0x39, 0x09, 0x63, 0x96, 0x62, 0x34, 0xb0, 0x25, 0x16, 0x21, 0x41, 0x41, - 0xc4, 0xec, 0x0c, 0x5c, 0xa7, 0xed, 0x88, 0x49, 0xe4, 0x27, 0x31, 0xb1, 0x86, 0x81, 0xb1, 0x46, - 0x05, 0x8a, 0x49, 0xec, 0x83, 0x18, 0xe3, 0x8e, 0x3d, 0x3f, 0xc7, 0x24, 0xfa, 0x5b, 0xac, 0xc1, - 0xb4, 0x4f, 0xc9, 0x66, 0x5b, 0xd7, 0x4c, 0x8c, 0x9e, 0x03, 0x5e, 0x2f, 0xae, 0xcb, 0x54, 0xb7, - 0x94, 0x7b, 0xc6, 0x6e, 0x18, 0xae, 0x53, 0x74, 0x82, 0x86, 0x0f, 0x2d, 0x83, 0xe1, 0x39, 0x1a, - 0x48, 0x49, 0x59, 0x07, 0x4a, 0xda, 0x2f, 0xfe, 0x4c, 0x1c, 0xe6, 0x57, 0xb0, 0x5a, 0xc7, 0x9a, - 0xa5, 0xee, 0x1e, 0xf1, 0xe8, 0xa2, 0x9f, 0x25, 0x3f, 0x84, 0xa9, 0xba, 0x43, 0xe3, 0x37, 0xe6, - 0x17, 0xa3, 0xd4, 0xe6, 0xab, 0x88, 0x18, 0x47, 0xae, 0x1e, 0x80, 0x84, 0x0c, 0x92, 0xf8, 0x09, - 0x07, 0xc9, 0x4d, 0x18, 0xa5, 0xb1, 0x28, 0xf3, 0x9d, 0x1f, 0x88, 0xee, 0x56, 0x27, 0xb8, 0x92, - 0x28, 0x11, 0x5a, 0x82, 0x59, 0x5b, 0x1c, 0x0b, 0xb7, 0xda, 0x4d, 0x32, 0x59, 0x69, 0x4a, 0x0b, - 0x53, 0x2f, 0x9a, 0x92, 0xa6, 0x79, 0x61, 0x95, 0x97, 0x6d, 0x2a, 0x2d, 0x8c, 0x3e, 0x02, 0xf3, - 0x1e, 0xed, 0xf8, 0xc9, 0xc6, 0x28, 0xd9, 0x9c, 0x5b, 0xee, 0xa5, 0x14, 0xbf, 0x26, 0xc0, 0xd9, - 0x90, 0xce, 0xe0, 0x1d, 0x6f, 0x37, 0x44, 0x38, 0x4e, 0x43, 0x36, 0x21, 0xa9, 0x3f, 0xc6, 0xc6, - 0x63, 0x15, 0x1f, 0xf0, 0x9e, 0x5a, 0x8a, 0xf4, 0xdd, 0x86, 0xa2, 0x99, 0xbb, 0xba, 0xd1, 0xa2, - 0xde, 0xb3, 0xcc, 0x29, 0x25, 0x87, 0x07, 0xb5, 0x1b, 0xe9, 0x18, 0x76, 0x63, 0x9c, 0x8e, 0xdd, - 0x18, 0x3f, 0x9e, 0x76, 0x63, 0xf4, 0xb1, 0x1b, 0xa3, 0xb7, 0xdd, 0x48, 0xff, 0x33, 0xec, 0xe6, - 0xdf, 0x04, 0x98, 0x75, 0xd5, 0x3e, 0x88, 0xd1, 0x9c, 0xfe, 0xb4, 0x69, 0x2b, 0x24, 0x7e, 0xaa, - 0x3d, 0x3b, 0xda, 0xb3, 0x67, 0xc5, 0xb7, 0x60, 0x2e, 0xd8, 0x66, 0xde, 0x37, 0x05, 0x18, 0x33, - 0xe8, 0x5a, 0x83, 0xf7, 0xce, 0x20, 0x8d, 0x62, 0x8b, 0x13, 0x89, 0x13, 0x8a, 0x9f, 0x84, 0x79, - 0x5e, 0x40, 0x66, 0xf5, 0x8a, 0xde, 0x31, 0x6a, 0x7d, 0x43, 0x91, 0x7b, 0x00, 0xef, 0xea, 0x3b, - 0x43, 0x8c, 0x40, 0x5e, 0xc3, 0x5d, 0x7d, 0x87, 0xab, 0x34, 0xf5, 0xae, 0xfd, 0x53, 0xfc, 0x3f, - 0x30, 0x5d, 0xee, 0x58, 0xed, 0x8e, 0x55, 0x61, 0x39, 0x2b, 0xae, 0xe5, 0x82, 0xbd, 0xb6, 0x1c, - 0xa0, 0x65, 0xcb, 0x6a, 0xe3, 0x8d, 0x0e, 0x36, 0x8e, 0x02, 0x6b, 0xcc, 0x31, 0xb6, 0xc0, 0x15, - 0x5b, 0x80, 0xec, 0x69, 0xbe, 0x62, 0x29, 0x96, 0x6a, 0x5a, 0x6a, 0xcd, 0x3c, 0x8d, 0x45, 0xf7, - 0x0c, 0x24, 0x6a, 0x7a, 0x47, 0xb3, 0x78, 0xe8, 0xc8, 0x3e, 0xc4, 0x7f, 0x19, 0x0d, 0x51, 0xe9, - 0x0a, 0xb6, 0x14, 0xb5, 0x69, 0x22, 0x83, 0xf8, 0x30, 0xaa, 0x5d, 0x5c, 0x97, 0xf5, 0x36, 0xcd, - 0xe7, 0x71, 0x0d, 0x0e, 0x92, 0x75, 0xe8, 0x62, 0xb8, 0x20, 0xd9, 0xdc, 0xca, 0x8c, 0x19, 0x71, - 0x6e, 0x7e, 0x08, 0xaa, 0x38, 0x56, 0xc2, 0x4c, 0xf6, 0xe6, 0x31, 0x2b, 0xf2, 0xda, 0x4d, 0xfe, - 0xf7, 0x05, 0xc8, 0x05, 0xeb, 0x46, 0x0d, 0x38, 0x6b, 0x6a, 0x4a, 0xdb, 0xdc, 0xd3, 0x2d, 0x39, - 0x68, 0xe6, 0x5c, 0xc7, 0x2f, 0x0c, 0x50, 0xb9, 0x6d, 0xfd, 0xd2, 0x19, 0x9b, 0x5b, 0xa0, 0x20, - 0x60, 0x81, 0xf1, 0x13, 0x59, 0x60, 0xfe, 0x7b, 0x02, 0x8c, 0xf1, 0x25, 0xfb, 0x07, 0x60, 0xb2, - 0x6d, 0xe8, 0x35, 0x6c, 0x9a, 0xb8, 0x2e, 0x93, 0xe5, 0x9d, 0xc9, 0x97, 0x0a, 0x13, 0x0e, 0x78, - 0x99, 0x40, 0xc9, 0x38, 0xb6, 0x74, 0x4b, 0x69, 0xca, 0xd8, 0xb4, 0xd4, 0x16, 0x59, 0x7e, 0x73, - 0x74, 0x66, 0x0a, 0xd3, 0xb4, 0xb0, 0x64, 0x97, 0x31, 0x9a, 0xfb, 0x30, 0xe9, 0x58, 0x9c, 0x6c, - 0x5a, 0x8a, 0x65, 0x2f, 0xee, 0x16, 0x06, 0xb1, 0x3b, 0xd7, 0x74, 0x89, 0x43, 0x72, 0x61, 0xa6, - 0xf8, 0x25, 0x01, 0xa6, 0x6d, 0xac, 0x15, 0x6c, 0xd6, 0x0c, 0x95, 0x76, 0x07, 0x89, 0x0a, 0xa9, - 0x6b, 0xe1, 0x09, 0x1e, 0xf2, 0x1b, 0x3d, 0x0d, 0x99, 0xba, 0x6a, 0xb6, 0x9b, 0xca, 0x11, 0x73, - 0x3b, 0x2c, 0xaa, 0x4b, 0x73, 0x18, 0x9d, 0x48, 0x24, 0xc8, 0x98, 0x9d, 0x76, 0x5b, 0x37, 0x58, - 0xa3, 0xa8, 0x8c, 0x13, 0x4b, 0x8b, 0x03, 0xc9, 0x68, 0xd3, 0x2d, 0x1f, 0x49, 0x69, 0xd3, 0xfd, - 0x10, 0x2b, 0x30, 0xb3, 0xae, 0x9a, 0x96, 0x93, 0x05, 0xb4, 0x3d, 0xcc, 0x33, 0x90, 0x6d, 0x2a, - 0x5a, 0xa3, 0x43, 0xd6, 0x11, 0x64, 0x19, 0xcc, 0x65, 0xcd, 0xd8, 0xc0, 0xa2, 0x5e, 0xc7, 0xc4, - 0x0d, 0xed, 0xaa, 0x4d, 0x0b, 0x1b, 0x5c, 0x5a, 0xfe, 0x25, 0x36, 0x60, 0x36, 0xc0, 0x94, 0xbb, - 0xc5, 0xcd, 0x90, 0x94, 0xef, 0x40, 0xf2, 0x7b, 0xb4, 0xe7, 0xc9, 0xfe, 0x8a, 0x9f, 0x16, 0xe0, - 0x7c, 0x41, 0x53, 0x9a, 0x47, 0xef, 0x61, 0x8f, 0x93, 0x54, 0xcd, 0xfd, 0xfe, 0x93, 0x4f, 0xb7, - 0xa3, 0x7c, 0x29, 0x72, 0x59, 0xa4, 0x9a, 0xfb, 0xb4, 0x26, 0x53, 0x35, 0x43, 0xbd, 0xe5, 0x6f, - 0x09, 0x30, 0x1b, 0x8a, 0x44, 0x26, 0x3a, 0xce, 0x51, 0x6e, 0x61, 0xcb, 0x50, 0x6b, 0x83, 0x78, - 0xce, 0x2d, 0x06, 0xdb, 0xa0, 0x04, 0x52, 0xb6, 0xed, 0xfd, 0x44, 0xeb, 0x90, 0x31, 0x69, 0x53, - 0x65, 0xe6, 0x89, 0x63, 0x43, 0x7a, 0x62, 0x29, 0xcd, 0xc8, 0xe9, 0x87, 0xf8, 0xb7, 0x93, 0x90, - 0xf5, 0x55, 0x87, 0x1e, 0xc3, 0x9c, 0xd6, 0x69, 0x61, 0x43, 0xad, 0x29, 0x4d, 0x36, 0x1e, 0x6c, - 0x4d, 0x31, 0xc9, 0x5f, 0x1b, 0x58, 0xf2, 0x85, 0x4d, 0x9b, 0x0f, 0x1d, 0x11, 0x4c, 0x23, 0xab, - 0x23, 0xd2, 0x8c, 0x16, 0x02, 0x47, 0xff, 0x17, 0xe6, 0x6b, 0x8a, 0x85, 0x1b, 0x7a, 0x48, 0xcd, - 0xac, 0x8d, 0xaf, 0x0f, 0x5e, 0x73, 0xd1, 0xe5, 0xe4, 0xaf, 0x7b, 0xae, 0x16, 0x5a, 0x82, 0xde, - 0x05, 0xb4, 0x2f, 0x2b, 0x9a, 0xae, 0x1d, 0xb5, 0x54, 0xeb, 0xc8, 0xef, 0xc2, 0x6e, 0x0c, 0x5e, - 0xef, 0xbd, 0x82, 0xcd, 0xc2, 0xa9, 0x31, 0xb7, 0x1f, 0x80, 0x91, 0xba, 0x9a, 0x72, 0x5d, 0x7d, - 0x8c, 0x0d, 0xd3, 0x53, 0xd7, 0xe8, 0xb0, 0x75, 0xad, 0xaf, 0xd8, 0x2c, 0xdc, 0xba, 0x9a, 0x01, - 0x18, 0x3a, 0x80, 0x33, 0xfb, 0x72, 0x4b, 0x69, 0xdb, 0x1e, 0xd1, 0x4d, 0x08, 0xf0, 0x9c, 0xd1, - 0x10, 0xdd, 0x79, 0x6f, 0x43, 0x69, 0x97, 0x1c, 0x36, 0x6e, 0x77, 0xee, 0x87, 0xc0, 0xf3, 0x6f, - 0xc0, 0x4c, 0x58, 0xf7, 0xa3, 0x57, 0x21, 0x41, 0x13, 0x50, 0xdc, 0x9a, 0x06, 0x4a, 0x59, 0x31, - 0x8a, 0x7c, 0x05, 0xe6, 0xc2, 0xfb, 0xf5, 0x24, 0x4c, 0xdf, 0x17, 0x20, 0x17, 0xec, 0x35, 0xf4, - 0x3a, 0xa4, 0x1e, 0x75, 0x14, 0x53, 0x95, 0xd5, 0xfa, 0x50, 0x29, 0xef, 0x24, 0xa5, 0x5a, 0xab, - 0xd3, 0x38, 0x86, 0x04, 0xee, 0xd6, 0x91, 0xac, 0xd6, 0x07, 0xc9, 0xe9, 0x96, 0x28, 0x32, 0x61, - 0x81, 0xf9, 0xaf, 0xfc, 0x37, 0x04, 0xc8, 0x05, 0xfb, 0xf8, 0x14, 0x24, 0xab, 0xc2, 0xb4, 0x89, - 0x35, 0x53, 0xb5, 0xd4, 0xc7, 0x58, 0x56, 0x2c, 0xcb, 0x50, 0x77, 0x3a, 0x96, 0xed, 0x46, 0x06, - 0xe2, 0x85, 0x1c, 0xfa, 0x82, 0x4d, 0x9e, 0xff, 0xdc, 0x38, 0xcc, 0x84, 0xd9, 0x07, 0x6a, 0x74, - 0x0b, 0x7c, 0xf7, 0x64, 0x26, 0xb7, 0x50, 0x55, 0x1a, 0x0d, 0x5c, 0xa7, 0xd2, 0x78, 0xda, 0x75, - 0x11, 0xd2, 0x06, 0x6e, 0x30, 0xfb, 0xae, 0xdb, 0x53, 0x28, 0x30, 0x10, 0x9d, 0xb0, 0x0e, 0x21, - 0xa7, 0x74, 0x0e, 0xd5, 0xa6, 0xaa, 0x18, 0x47, 0xcc, 0x77, 0xda, 0x33, 0xfd, 0xc6, 0x09, 0x05, - 0x2a, 0xd8, 0x6c, 0x99, 0x83, 0x9d, 0x54, 0x7c, 0xdf, 0x66, 0xfe, 0x9f, 0x04, 0x48, 0x7b, 0x84, - 0x3e, 0x81, 0xb9, 0xfa, 0x33, 0x69, 0xb1, 0x63, 0x66, 0xd2, 0x2e, 0x02, 0xf0, 0x4d, 0x44, 0x4b, - 0x69, 0x38, 0xdb, 0x47, 0x29, 0x06, 0xab, 0x2a, 0x0d, 0xf4, 0x0a, 0x10, 0x64, 0x6c, 0x18, 0xb8, - 0xce, 0xfd, 0xd2, 0x5c, 0x57, 0x06, 0xbf, 0xd4, 0x6a, 0x5b, 0x47, 0x9c, 0x2d, 0xc5, 0x5c, 0x4e, - 0x40, 0xdc, 0x52, 0x1a, 0xf9, 0x4f, 0xc7, 0x61, 0xc2, 0xaf, 0x11, 0xf4, 0x51, 0x7b, 0xd9, 0x10, - 0x1f, 0x76, 0xb2, 0x62, 0x74, 0xe8, 0xa0, 0xdb, 0x8a, 0x3e, 0x76, 0xaa, 0x9d, 0xb6, 0xf0, 0x06, - 0xb3, 0xa3, 0xa0, 0x55, 0x49, 0x80, 0x0c, 0xdc, 0x54, 0xe8, 0x60, 0xd9, 0xa5, 0x31, 0xbc, 0x56, - 0x3b, 0x1a, 0x66, 0xb0, 0x4c, 0xd9, 0xe4, 0xb7, 0x6d, 0xea, 0xfc, 0x1e, 0x64, 0xbc, 0xb5, 0x9d, - 0xc4, 0x1c, 0x2e, 0xf8, 0x7a, 0x92, 0xd9, 0xbc, 0xdb, 0x8f, 0xce, 0x5a, 0xeb, 0x9b, 0x4f, 0xf5, - 0x08, 0x95, 0xec, 0x05, 0x50, 0x0d, 0xe6, 0xdd, 0x05, 0xd0, 0x49, 0x03, 0x96, 0x39, 0x87, 0x95, - 0x3f, 0xb2, 0x90, 0xc1, 0x2d, 0x91, 0x4f, 0x16, 0xc3, 0xcc, 0x38, 0x8c, 0x2a, 0x6e, 0x30, 0x83, - 0x3e, 0x25, 0x74, 0xc7, 0x2e, 0xbe, 0x35, 0x56, 0xe4, 0x40, 0x8f, 0x52, 0x50, 0x20, 0x94, 0x61, - 0xeb, 0x92, 0xee, 0x50, 0x86, 0xaf, 0x57, 0x3e, 0x2b, 0x84, 0xc5, 0x32, 0x5c, 0x12, 0x36, 0x9e, - 0xca, 0xc7, 0x96, 0x24, 0x38, 0x05, 0x3a, 0xb2, 0x74, 0x85, 0x36, 0x5c, 0x9a, 0x23, 0x7f, 0x68, - 0xc3, 0xc5, 0x60, 0xb3, 0xff, 0xda, 0xb1, 0xc5, 0x70, 0xe7, 0x4c, 0x47, 0x00, 0x4f, 0xa4, 0xe3, - 0x56, 0xed, 0x8d, 0x74, 0x78, 0xd5, 0x63, 0x27, 0xac, 0xda, 0x9d, 0x14, 0xdd, 0xaa, 0x9b, 0x01, - 0x18, 0xfa, 0x69, 0x21, 0x24, 0xf2, 0xe1, 0x02, 0x8c, 0x9f, 0xd0, 0x18, 0xfc, 0xfe, 0xc4, 0x35, - 0x86, 0xfd, 0x10, 0x78, 0xfe, 0x6f, 0x84, 0x60, 0x24, 0xc4, 0x25, 0x7c, 0x0d, 0x52, 0x2d, 0x55, - 0x93, 0xd9, 0x1e, 0xfe, 0x00, 0xdb, 0x9b, 0x6c, 0x5f, 0x3b, 0xd9, 0x52, 0x35, 0xfa, 0x8b, 0xd2, - 0x2b, 0x87, 0x9c, 0x3e, 0x36, 0x38, 0xbd, 0x72, 0xc8, 0xe8, 0xef, 0xc2, 0xe4, 0xa3, 0x8e, 0xa2, - 0x59, 0x6a, 0x13, 0xcb, 0x7c, 0x77, 0x7d, 0x74, 0xd0, 0xdd, 0xf5, 0x09, 0x9b, 0x92, 0x7e, 0x9a, - 0xf9, 0x7f, 0x88, 0x77, 0xc7, 0x66, 0xbc, 0x99, 0xdf, 0x16, 0xe0, 0x69, 0xca, 0xde, 0xf5, 0x9f, - 0xf2, 0x9e, 0x6a, 0x5a, 0x7a, 0xc3, 0x50, 0x5a, 0xf2, 0x4e, 0xa7, 0xb6, 0x8f, 0x2d, 0x7b, 0x73, - 0x4a, 0x3f, 0xe5, 0x51, 0xd1, 0x05, 0x5e, 0xb5, 0x2b, 0x5e, 0xa6, 0xf5, 0x4a, 0x4f, 0x51, 0xc9, - 0x1c, 0xd7, 0x1c, 0x28, 0x36, 0xf3, 0xbf, 0x10, 0x83, 0x8b, 0x7d, 0x78, 0xa0, 0x5b, 0x70, 0x2e, - 0xd8, 0xbe, 0xa6, 0x7e, 0x80, 0x0d, 0x79, 0x47, 0xef, 0x68, 0x75, 0x9e, 0xa9, 0x98, 0xf7, 0x57, - 0xb4, 0x4e, 0x10, 0x96, 0x49, 0x79, 0x18, 0x79, 0xa7, 0xdd, 0x76, 0xc8, 0x63, 0x61, 0xe4, 0xdb, - 0x04, 0x81, 0x91, 0x5f, 0x84, 0x34, 0xd3, 0xa1, 0x6c, 0xaa, 0xef, 0xb1, 0x09, 0x36, 0x2e, 0x01, - 0x03, 0x55, 0xd4, 0xf7, 0x30, 0x2a, 0x43, 0x96, 0x23, 0xf8, 0x3a, 0xf9, 0x6a, 0xdf, 0x4e, 0x76, - 0x6a, 0x93, 0x32, 0x8c, 0x01, 0xef, 0xeb, 0x3f, 0x4e, 0x78, 0x23, 0x66, 0xde, 0xcb, 0xbf, 0x2e, - 0xc0, 0x33, 0xf8, 0x51, 0x47, 0x7d, 0xac, 0x34, 0xb1, 0x56, 0xc3, 0x72, 0xad, 0xa9, 0x98, 0x66, - 0xcf, 0x7e, 0xae, 0x9d, 0x9a, 0xdb, 0xf1, 0x00, 0x82, 0x7d, 0x7b, 0xc9, 0x23, 0x4f, 0x91, 0x88, - 0xd3, 0xd5, 0xbb, 0x5f, 0x14, 0x20, 0xef, 0xd2, 0x97, 0x02, 0xe8, 0xe8, 0x1e, 0xe4, 0x9c, 0xa0, - 0x43, 0x1e, 0xf6, 0xfc, 0xc9, 0x84, 0x1d, 0x42, 0x30, 0xad, 0xa1, 0x57, 0x60, 0xae, 0x5b, 0x3f, - 0xb4, 0xcb, 0x58, 0x0f, 0xcf, 0x04, 0xa5, 0x25, 0x9d, 0x97, 0xff, 0xeb, 0x18, 0x9c, 0xed, 0xd9, - 0x42, 0x74, 0x17, 0xc4, 0x70, 0x9e, 0x21, 0x06, 0xf8, 0x54, 0x18, 0x7f, 0x8f, 0x19, 0xf6, 0xe6, - 0xd5, 0x6d, 0x8d, 0xa1, 0xbc, 0x86, 0xb1, 0xc9, 0xcf, 0x08, 0xe1, 0x46, 0x59, 0x7f, 0x12, 0x76, - 0x11, 0xec, 0xd7, 0x80, 0x39, 0x7f, 0x7a, 0xdc, 0xbb, 0xcc, 0xe2, 0xe6, 0xfc, 0x3b, 0x02, 0xbc, - 0xe0, 0xae, 0x92, 0x06, 0x75, 0x5f, 0xb5, 0x53, 0x9b, 0xd2, 0x3c, 0x80, 0xa0, 0x59, 0x7f, 0xc0, - 0x91, 0xeb, 0x7e, 0xb4, 0xef, 0xfa, 0x7e, 0x0c, 0xf2, 0x2e, 0x9b, 0xff, 0x86, 0xd6, 0x8d, 0x0a, - 0x70, 0x41, 0xeb, 0xb4, 0xe4, 0xba, 0x6a, 0x5a, 0xaa, 0x56, 0xb3, 0xe4, 0x80, 0xc6, 0x4d, 0x6e, - 0x39, 0x79, 0xad, 0xd3, 0x5a, 0xe1, 0x38, 0x15, 0x5f, 0xe3, 0x4d, 0xf4, 0x36, 0xcc, 0x58, 0x7a, - 0xbb, 0x9b, 0x72, 0x78, 0x27, 0x87, 0x2c, 0xbd, 0x1d, 0xe0, 0x9e, 0xff, 0xe7, 0x18, 0x9c, 0xed, - 0xd9, 0x13, 0x68, 0x0b, 0x9e, 0xeb, 0x6d, 0x23, 0xdd, 0x23, 0xf0, 0xe9, 0x1e, 0x1d, 0xe7, 0x19, - 0x84, 0x91, 0x1c, 0xbb, 0xc7, 0x61, 0x2f, 0x8e, 0xff, 0xb5, 0x43, 0x31, 0xc2, 0x96, 0xfb, 0x0c, - 0xc5, 0xff, 0x18, 0x0d, 0x26, 0x11, 0xf8, 0x70, 0xfc, 0x9a, 0x00, 0xf9, 0xae, 0x60, 0xce, 0x19, - 0x85, 0xdc, 0x6c, 0xf7, 0x4f, 0x35, 0x9e, 0x0b, 0x00, 0x83, 0xa3, 0xf0, 0xcc, 0x7e, 0x78, 0x71, - 0xfe, 0x0b, 0x02, 0x9c, 0xf3, 0x93, 0xf2, 0xb5, 0x1e, 0x37, 0xd8, 0x53, 0x1d, 0x76, 0x8b, 0x30, - 0xed, 0xee, 0x74, 0x38, 0x21, 0x3e, 0xb7, 0x0e, 0xe4, 0x14, 0x39, 0xae, 0x30, 0xff, 0x85, 0x18, - 0x5c, 0x88, 0x6c, 0x18, 0x7a, 0x06, 0xb2, 0x24, 0x2a, 0x75, 0x99, 0x31, 0xe3, 0xcd, 0xb4, 0x54, - 0xcd, 0x61, 0x43, 0x91, 0x94, 0xc3, 0xae, 0x1a, 0x33, 0x2d, 0xe5, 0xd0, 0x45, 0x0a, 0x98, 0x5e, - 0xa2, 0xcb, 0xf4, 0x7e, 0xb6, 0xcb, 0xf4, 0xd8, 0x41, 0x66, 0xf5, 0x49, 0x76, 0xa4, 0xaf, 0x37, - 0xfc, 0xf6, 0xb7, 0x9c, 0xb4, 0xb7, 0xe4, 0x44, 0x19, 0x26, 0xfc, 0x43, 0x0a, 0x5d, 0xb7, 0x4f, - 0xdb, 0x0e, 0x1c, 0xa9, 0xf3, 0xe3, 0xb8, 0xe1, 0xdb, 0x91, 0x7f, 0x1e, 0x83, 0x04, 0x0b, 0xc3, - 0x9f, 0x83, 0xac, 0xaa, 0x59, 0xb8, 0x81, 0x0d, 0xcf, 0x52, 0x20, 0xbe, 0x3a, 0x22, 0x65, 0x38, - 0x98, 0xa1, 0x3d, 0x0d, 0xe9, 0xdd, 0xa6, 0xae, 0x58, 0x9e, 0x78, 0x5f, 0x58, 0x1d, 0x91, 0x80, - 0x02, 0x19, 0xca, 0x33, 0x90, 0x31, 0x2d, 0x43, 0xd5, 0x1a, 0xb2, 0xff, 0x5c, 0x70, 0x9a, 0x41, - 0x9d, 0xea, 0x76, 0x74, 0xbd, 0x89, 0x15, 0x7b, 0xe5, 0x31, 0xca, 0xcf, 0x50, 0x65, 0x38, 0x98, - 0xa1, 0x95, 0x60, 0xd2, 0xb9, 0x2f, 0xc0, 0x11, 0x13, 0xfd, 0x8e, 0x72, 0xae, 0x8e, 0x48, 0x13, - 0x0e, 0x11, 0x63, 0x73, 0x1d, 0x80, 0x40, 0x38, 0x87, 0x31, 0x7f, 0x2a, 0xc9, 0x3a, 0x6a, 0x63, - 0x4a, 0x5d, 0xde, 0x5d, 0x51, 0x8e, 0x56, 0x47, 0xa4, 0x14, 0xc1, 0x65, 0x84, 0x4b, 0x00, 0x75, - 0xc5, 0xb2, 0x09, 0xd9, 0x82, 0x6d, 0xca, 0x47, 0xb8, 0xa2, 0x58, 0x98, 0xd0, 0x10, 0x34, 0x4a, - 0xe3, 0x64, 0x3b, 0x7e, 0x2a, 0x06, 0xb9, 0xe0, 0xe9, 0x12, 0xf4, 0x08, 0xce, 0xba, 0xdb, 0x7c, - 0x96, 0xef, 0x44, 0x83, 0xc9, 0xfb, 0xf4, 0xe5, 0x41, 0x12, 0x69, 0xfe, 0xc3, 0x10, 0xe6, 0xea, - 0x88, 0x74, 0x46, 0x0d, 0x2f, 0x42, 0xef, 0xc2, 0x1c, 0x3f, 0xe6, 0x19, 0xac, 0x6f, 0x90, 0x3d, - 0x27, 0x4a, 0xd9, 0x5d, 0xdb, 0xac, 0x11, 0x56, 0xb0, 0x9c, 0x83, 0x09, 0x7f, 0x25, 0xe2, 0x6f, - 0x8f, 0xc3, 0x99, 0x2d, 0x43, 0x6d, 0xd1, 0xf9, 0xc1, 0x8f, 0x8e, 0xde, 0x84, 0x09, 0x03, 0xb7, - 0x9b, 0x0a, 0x99, 0xaa, 0xbd, 0x7b, 0x3b, 0x0b, 0xd1, 0x12, 0x51, 0x0a, 0xaa, 0x6b, 0x27, 0xf9, - 0x9f, 0xe5, 0x7c, 0xb8, 0x96, 0xcb, 0xc0, 0x4f, 0xac, 0xf9, 0x77, 0x6e, 0xae, 0xf4, 0x3f, 0x74, - 0xe8, 0x70, 0xcc, 0x18, 0x9e, 0x6f, 0x84, 0x61, 0xb6, 0xb6, 0xa7, 0xd0, 0xb3, 0x6d, 0x06, 0xbd, - 0xdd, 0xe2, 0xdf, 0x9a, 0x89, 0xdc, 0x3f, 0x2c, 0xda, 0x84, 0x1b, 0x8a, 0xb9, 0xef, 0xf0, 0x9f, - 0xae, 0x75, 0x83, 0xd1, 0x11, 0x5c, 0xa8, 0x19, 0x47, 0x6d, 0x4b, 0x97, 0x6d, 0xbd, 0xec, 0xee, - 0x1e, 0xca, 0xbb, 0x6d, 0xec, 0xdf, 0x9d, 0xb9, 0x16, 0x59, 0x1d, 0x65, 0xc0, 0xb5, 0x74, 0x7b, - 0xf7, 0xf0, 0x76, 0xdb, 0x55, 0xd3, 0xd9, 0x5a, 0xaf, 0x42, 0xd4, 0x81, 0x73, 0xbb, 0xea, 0x21, - 0xae, 0xb3, 0x60, 0x9b, 0xf9, 0x23, 0x32, 0x86, 0x7d, 0xbb, 0x34, 0xaf, 0x44, 0xe7, 0x04, 0x0f, - 0x71, 0x9d, 0x78, 0xd4, 0x65, 0x9b, 0xd8, 0xa9, 0x77, 0x7e, 0xb7, 0x47, 0x19, 0x7a, 0x00, 0xb9, - 0xae, 0xba, 0xc6, 0xfa, 0x9f, 0x05, 0xe8, 0xae, 0x62, 0x72, 0x27, 0xc0, 0xf9, 0x08, 0x2e, 0xd8, - 0x4a, 0x3c, 0x50, 0xad, 0x3d, 0xf7, 0x1e, 0x87, 0x5d, 0xcd, 0x78, 0x7f, 0x5d, 0x72, 0x45, 0xbd, - 0xa9, 0x5a, 0x7b, 0xf6, 0xc0, 0x73, 0x75, 0x69, 0xf4, 0x2a, 0x44, 0xf7, 0x21, 0x47, 0xdd, 0x4d, - 0x5b, 0x31, 0x1c, 0x0b, 0x4c, 0xd2, 0xda, 0x22, 0x23, 0x41, 0xe2, 0x83, 0xb6, 0x14, 0xc3, 0xb5, - 0x41, 0xea, 0xc6, 0x5c, 0x08, 0x7a, 0x1b, 0x10, 0x37, 0x8f, 0x3d, 0xc5, 0xdc, 0xb3, 0x39, 0xa7, - 0xfa, 0x1f, 0x70, 0x60, 0x36, 0xb1, 0xaa, 0x98, 0x7b, 0xee, 0x1e, 0x5d, 0x2d, 0x00, 0x0b, 0x19, - 0xbb, 0xff, 0x2a, 0xc0, 0x84, 0x5f, 0x28, 0xf4, 0x16, 0x4c, 0xd2, 0x56, 0x59, 0xba, 0xcc, 0x0f, - 0x7b, 0xd2, 0x31, 0x3b, 0x11, 0xed, 0xb5, 0xfc, 0x4c, 0x9c, 0x4f, 0x29, 0x4b, 0x78, 0x55, 0xf5, - 0x12, 0xe3, 0x24, 0xfe, 0x7f, 0x01, 0x92, 0x76, 0x19, 0x3a, 0x0b, 0xb3, 0xd5, 0xb5, 0x8d, 0x92, - 0xbc, 0x55, 0x90, 0xaa, 0xf2, 0xf6, 0x66, 0x65, 0xab, 0x54, 0x5c, 0xbb, 0xbd, 0x56, 0x5a, 0xc9, - 0x8d, 0xa0, 0x24, 0x8c, 0x3e, 0x2c, 0x15, 0xa4, 0x9c, 0x80, 0x52, 0x90, 0xd8, 0x28, 0x6f, 0x56, - 0x57, 0x73, 0x31, 0x94, 0x83, 0xcc, 0x4a, 0xe1, 0xa1, 0x5c, 0xbe, 0x2d, 0x33, 0x48, 0x1c, 0x4d, - 0x42, 0x9a, 0x43, 0xde, 0x2c, 0x95, 0xee, 0xe5, 0x46, 0x09, 0x0a, 0xf9, 0x45, 0x20, 0x94, 0x3e, - 0x41, 0x50, 0x56, 0xcb, 0xdb, 0x12, 0x81, 0xac, 0x14, 0x1e, 0xe6, 0xc6, 0xc4, 0x07, 0x90, 0x0b, - 0x2a, 0x0b, 0xad, 0x00, 0x70, 0xb5, 0xef, 0xe3, 0x23, 0xee, 0xa2, 0x9e, 0xeb, 0xaf, 0x6e, 0x7a, - 0x4e, 0xbe, 0x66, 0xff, 0x14, 0xab, 0x80, 0xba, 0x5d, 0x17, 0x7a, 0x0d, 0x52, 0x1a, 0x3e, 0x18, - 0x3a, 0xfb, 0xa6, 0xe1, 0x03, 0xfa, 0x4b, 0x3c, 0x07, 0x67, 0x7b, 0x1a, 0xa9, 0x38, 0x01, 0x19, - 0xaf, 0x57, 0x13, 0xff, 0x3e, 0x06, 0x59, 0xe2, 0x8d, 0xcc, 0xaa, 0xbe, 0xd6, 0xd0, 0x74, 0x03, - 0xa3, 0x05, 0x40, 0x8e, 0x1f, 0x32, 0x49, 0xa7, 0x9a, 0xfb, 0x2a, 0x3b, 0xc9, 0x9e, 0xa2, 0x36, - 0xe2, 0x94, 0x55, 0xf5, 0xca, 0xbe, 0xda, 0x46, 0xff, 0x0f, 0xce, 0xd5, 0xf4, 0x56, 0x4b, 0xd7, - 0x64, 0x3f, 0x99, 0x4a, 0xd9, 0xf1, 0x7b, 0x6f, 0xb7, 0xfa, 0x79, 0x43, 0xa7, 0xfe, 0x85, 0x22, - 0x65, 0xe6, 0x83, 0x11, 0x77, 0x51, 0x73, 0xc0, 0x76, 0xed, 0xac, 0x4c, 0x7c, 0x5f, 0x80, 0xe9, - 0x10, 0x1a, 0x74, 0x19, 0xc4, 0x62, 0x79, 0x63, 0xa3, 0xbc, 0x29, 0x17, 0x57, 0x0b, 0x52, 0x45, - 0xae, 0x96, 0xe5, 0xb5, 0x3b, 0x9b, 0x65, 0xa9, 0x14, 0xb0, 0x9c, 0x34, 0x8c, 0x6f, 0x6e, 0x6f, - 0x94, 0xa4, 0xb5, 0x62, 0x4e, 0x40, 0x33, 0x90, 0x2b, 0xac, 0x6f, 0xad, 0x16, 0xe4, 0xed, 0xad, - 0xad, 0x92, 0x24, 0x17, 0x0b, 0x95, 0x52, 0x2e, 0xe6, 0x42, 0xd7, 0xcb, 0x6f, 0xda, 0x50, 0x6a, - 0x4b, 0x5b, 0xdb, 0x9b, 0xc5, 0xea, 0x76, 0xa1, 0xba, 0x56, 0xde, 0xcc, 0x8d, 0xa2, 0x09, 0x80, - 0x37, 0x57, 0xd7, 0xaa, 0xa5, 0xca, 0x56, 0xa1, 0x58, 0xca, 0x25, 0x96, 0x33, 0x00, 0xae, 0x4a, - 0xc4, 0x7f, 0x24, 0x72, 0x86, 0x38, 0xf8, 0x17, 0x60, 0x8a, 0xcc, 0x1e, 0xd4, 0xd9, 0xd9, 0xc5, - 0xfc, 0x58, 0x48, 0x8e, 0x17, 0x38, 0x64, 0xe8, 0x59, 0x98, 0xd0, 0x3a, 0xad, 0x1d, 0x6c, 0x10, - 0x0d, 0x93, 0x52, 0x7e, 0x9f, 0x20, 0xc3, 0xa0, 0x55, 0x9d, 0x30, 0x26, 0x41, 0xb0, 0x81, 0xc9, - 0xc2, 0x06, 0xcb, 0xba, 0x51, 0xc7, 0xec, 0x8c, 0x79, 0x92, 0xcc, 0x5f, 0x14, 0x58, 0x26, 0x30, - 0xf4, 0x16, 0xcc, 0x84, 0x76, 0xd8, 0x68, 0xff, 0xfb, 0x23, 0x3e, 0x45, 0x4b, 0xa8, 0xd6, 0xdd, - 0x29, 0x3f, 0x14, 0x60, 0xbe, 0x97, 0xf3, 0x47, 0xcb, 0x90, 0x0e, 0xae, 0x41, 0x07, 0x32, 0x71, - 0x68, 0xba, 0xeb, 0xd1, 0x65, 0x48, 0x07, 0x57, 0x9d, 0x83, 0xf1, 0xe8, 0x44, 0xae, 0x40, 0x05, - 0xef, 0x32, 0x40, 0xfc, 0x4a, 0x0c, 0x26, 0x83, 0xc2, 0xaf, 0xc3, 0xb8, 0x9d, 0x5a, 0x61, 0x8b, - 0xa3, 0xa5, 0x21, 0x26, 0x25, 0xfe, 0x2d, 0xd9, 0x2c, 0xf2, 0x3f, 0x10, 0x60, 0x8c, 0x2f, 0x6f, - 0x5e, 0x86, 0x78, 0x4b, 0xd5, 0x06, 0xd7, 0x06, 0xc1, 0xa6, 0x44, 0xca, 0xe1, 0xe0, 0xcd, 0x27, - 0xd8, 0x68, 0x13, 0xa6, 0xf8, 0x44, 0xd5, 0xc2, 0x9a, 0xe5, 0x09, 0xc9, 0x07, 0x62, 0x91, 0xf3, - 0xd0, 0x32, 0x87, 0xf3, 0x67, 0xa3, 0x70, 0xb6, 0x67, 0x88, 0x71, 0x3a, 0xae, 0x12, 0xdd, 0x82, - 0xf1, 0x9a, 0xae, 0x39, 0x17, 0x0b, 0x06, 0xbd, 0x51, 0xc4, 0x69, 0xd0, 0x21, 0x4c, 0x72, 0x27, - 0xa5, 0x34, 0xdb, 0x7b, 0xca, 0x0e, 0x66, 0xbb, 0x5d, 0x13, 0xd1, 0x5b, 0x2d, 0x3d, 0x1b, 0xb5, - 0x70, 0x7b, 0xf7, 0x90, 0xf9, 0x9c, 0x4d, 0xba, 0x83, 0x5a, 0xe0, 0x4c, 0xc9, 0x04, 0xcd, 0xea, - 0xb1, 0x21, 0xe8, 0x79, 0xe0, 0x37, 0x5a, 0xdd, 0x9a, 0x13, 0xdc, 0x97, 0x4e, 0xb0, 0x02, 0x07, - 0x75, 0x0e, 0x12, 0x86, 0x52, 0x57, 0x0f, 0x69, 0xb4, 0x93, 0x58, 0x1d, 0x91, 0xd8, 0x27, 0x3d, - 0x17, 0xd1, 0x31, 0x0c, 0xbd, 0x41, 0x96, 0x1d, 0xee, 0x1e, 0x7b, 0x72, 0x88, 0x33, 0xa8, 0x53, - 0x0e, 0x03, 0x1b, 0x24, 0x7e, 0x5e, 0x80, 0x33, 0x3d, 0x9a, 0x81, 0xae, 0xc2, 0xe5, 0xdb, 0xb7, - 0x1f, 0xc8, 0xdc, 0x7f, 0x6e, 0x16, 0xaa, 0x6b, 0xf7, 0x4b, 0x32, 0x75, 0x81, 0xcb, 0xa5, 0x6a, - 0x94, 0xff, 0x24, 0x93, 0x67, 0xe9, 0x41, 0x61, 0xa5, 0x54, 0x5c, 0xdb, 0x28, 0xac, 0xe7, 0x62, - 0xe8, 0x3c, 0xcc, 0xbb, 0xae, 0x94, 0xb1, 0x90, 0x6d, 0xf4, 0x38, 0x9a, 0x82, 0xac, 0x1f, 0x34, - 0xba, 0x0c, 0x90, 0xb4, 0x15, 0x25, 0x7e, 0x26, 0x06, 0x29, 0xc7, 0x1a, 0xd0, 0x26, 0xa4, 0x68, - 0x30, 0xa2, 0xda, 0xe7, 0xe6, 0xfa, 0xac, 0x0a, 0xaa, 0x36, 0xb2, 0xc3, 0x82, 0x2e, 0xe4, 0x6c, - 0x28, 0xe1, 0xd7, 0xd1, 0x0e, 0x0c, 0xa5, 0xdd, 0xc6, 0xb6, 0x03, 0x89, 0xe4, 0xb7, 0x6d, 0x23, - 0xfb, 0xf8, 0x39, 0x2c, 0x90, 0x04, 0xe9, 0xfd, 0x96, 0x29, 0xdb, 0x1c, 0x07, 0x58, 0x06, 0xdc, - 0x6b, 0x99, 0x6f, 0x76, 0xb3, 0x84, 0x7d, 0x07, 0x4c, 0xd6, 0xfd, 0x6c, 0x3b, 0x5a, 0xbc, 0x02, - 0xa8, 0xbb, 0x41, 0x61, 0x47, 0x36, 0xc5, 0xcb, 0x80, 0xba, 0x45, 0x45, 0x39, 0x88, 0xdb, 0xe3, - 0x2f, 0x23, 0x91, 0x9f, 0xe2, 0x3b, 0x30, 0x1d, 0x22, 0x00, 0xf1, 0x8a, 0x9c, 0x58, 0x76, 0x09, - 0x80, 0x83, 0x08, 0xc2, 0x65, 0x98, 0x74, 0x07, 0xb4, 0xf7, 0x54, 0x68, 0xd6, 0x19, 0xae, 0xf4, - 0x18, 0xfa, 0x5f, 0xc5, 0xe0, 0x4c, 0x8f, 0xb5, 0x29, 0xb2, 0x60, 0xb2, 0x7b, 0xa5, 0xdb, 0xf7, - 0x04, 0x4e, 0x0f, 0x6e, 0x3d, 0xe0, 0x52, 0xb0, 0x8a, 0xfc, 0x9f, 0x08, 0x30, 0x17, 0x8e, 0x7b, - 0x3a, 0xb7, 0xfe, 0x35, 0x98, 0x6f, 0xdb, 0xeb, 0xda, 0xc0, 0xca, 0x9a, 0x1b, 0xd8, 0xcb, 0x7d, - 0xce, 0x2a, 0x84, 0xad, 0x89, 0xa5, 0x33, 0xed, 0xf0, 0x02, 0xf1, 0xf3, 0x71, 0x98, 0xa6, 0xae, - 0x2e, 0xd0, 0x98, 0x9b, 0x30, 0x46, 0x0f, 0x61, 0x0c, 0x75, 0x0e, 0x8b, 0x93, 0xa0, 0x35, 0x48, - 0xd5, 0x74, 0xad, 0xae, 0x52, 0xa9, 0xe3, 0xfd, 0xd7, 0x5d, 0x2c, 0x1d, 0x50, 0xb4, 0x49, 0x24, - 0x97, 0x1a, 0xb5, 0x23, 0xf4, 0x31, 0x7a, 0x6c, 0x7d, 0xac, 0x8e, 0xf4, 0xd4, 0x48, 0x74, 0x2e, - 0x25, 0xf1, 0x24, 0x72, 0x29, 0x21, 0x6b, 0xa4, 0xbf, 0x14, 0x60, 0x36, 0x34, 0x49, 0x82, 0xea, - 0x30, 0xcb, 0x2e, 0xc6, 0x86, 0x1b, 0xff, 0x62, 0xdf, 0x7e, 0x0a, 0x58, 0xc6, 0xcc, 0x6e, 0x37, - 0xd0, 0x44, 0xef, 0xc0, 0x34, 0xcf, 0xee, 0x98, 0x9d, 0x76, 0xdb, 0xc0, 0xa6, 0xc9, 0x53, 0x3b, - 0xa4, 0x8e, 0x0f, 0xf6, 0xef, 0xcb, 0x8a, 0x4b, 0x25, 0x21, 0x23, 0x08, 0x32, 0xc5, 0x77, 0x60, - 0xaa, 0x0b, 0xd1, 0x6f, 0x36, 0xc2, 0x49, 0xcc, 0x46, 0xfc, 0x61, 0x02, 0x26, 0x03, 0xc5, 0xe8, - 0x21, 0xa4, 0xf1, 0xa1, 0xdb, 0x96, 0x01, 0x9e, 0xc1, 0x08, 0x70, 0x58, 0x28, 0xb9, 0xe4, 0x92, - 0x97, 0x57, 0xfe, 0x8f, 0x04, 0x48, 0xb9, 0x15, 0x9d, 0xe0, 0xc8, 0xd3, 0x5d, 0x48, 0xea, 0x6d, - 0x6c, 0x28, 0x16, 0xbf, 0xd5, 0x39, 0xd1, 0x2f, 0x6b, 0xd5, 0xa4, 0x1d, 0xa6, 0x34, 0xcb, 0x9c, - 0x4a, 0x72, 0xe8, 0xdd, 0xa4, 0xee, 0xe8, 0x70, 0x49, 0xdd, 0x7c, 0x0d, 0xc0, 0x69, 0x8c, 0x89, - 0xb6, 0x01, 0x1c, 0xbd, 0xda, 0x56, 0x76, 0x6d, 0x18, 0xad, 0xb9, 0x1d, 0xe4, 0x61, 0x94, 0xff, - 0x72, 0x0c, 0xd2, 0x1e, 0x7d, 0x22, 0x03, 0x72, 0x4d, 0xbd, 0x41, 0x4f, 0x14, 0x39, 0x1a, 0x60, - 0x39, 0x80, 0x3b, 0xc7, 0xec, 0xa2, 0x85, 0x75, 0xc6, 0xcf, 0x51, 0xcd, 0x64, 0xd3, 0x0f, 0x40, - 0x0f, 0x7c, 0x4d, 0x63, 0x06, 0xf1, 0xe1, 0x63, 0x35, 0x8d, 0x0c, 0x6f, 0x0f, 0x2f, 0xf1, 0x7f, - 0xc1, 0x64, 0xa0, 0x76, 0x74, 0x09, 0xce, 0xaf, 0x97, 0xef, 0xac, 0x15, 0x0b, 0xeb, 0x72, 0x79, - 0xab, 0x24, 0x15, 0xaa, 0x65, 0x29, 0x10, 0x06, 0x8d, 0x43, 0xbc, 0xb0, 0xb9, 0x92, 0x13, 0x9c, - 0x5c, 0xef, 0xb7, 0x04, 0x98, 0x0b, 0xbf, 0x9f, 0x46, 0x96, 0x7c, 0x8e, 0x03, 0x08, 0x5c, 0x04, - 0xc9, 0x79, 0x0a, 0xd8, 0x2d, 0x90, 0x26, 0xcc, 0xfb, 0xbd, 0x85, 0x6c, 0x76, 0x5a, 0x2d, 0xc5, - 0x50, 0x9d, 0x43, 0xa2, 0x2f, 0x0d, 0x7e, 0x45, 0xae, 0x42, 0x49, 0x8f, 0xa4, 0x33, 0x56, 0x08, - 0x58, 0xc5, 0xa6, 0xf8, 0xed, 0x31, 0x98, 0x0d, 0x25, 0x39, 0x8d, 0xfb, 0x4f, 0xce, 0xd8, 0x8a, - 0x0d, 0x3d, 0xb6, 0xde, 0x0a, 0x7a, 0x59, 0xde, 0xe3, 0xc7, 0x9a, 0x50, 0x03, 0xac, 0x7a, 0xbb, - 0xe5, 0xc4, 0x69, 0xba, 0xe5, 0xfb, 0x30, 0x19, 0x70, 0xcb, 0x3c, 0xad, 0x39, 0xa4, 0x4b, 0x9e, - 0xf0, 0xbb, 0x64, 0xf4, 0x10, 0xc6, 0xd9, 0xde, 0x90, 0xbd, 0x41, 0xfa, 0xd1, 0xa1, 0xed, 0x61, - 0xc1, 0xb6, 0x0b, 0x76, 0x67, 0xcb, 0xe6, 0x17, 0x6e, 0xa8, 0xe3, 0xe1, 0x86, 0x9a, 0xff, 0xa2, - 0x00, 0x59, 0x1f, 0x1f, 0x77, 0x83, 0x49, 0xf0, 0x6c, 0x30, 0xa1, 0x77, 0x60, 0xd4, 0x39, 0x07, - 0x3d, 0x11, 0x1d, 0xf0, 0x85, 0x0b, 0x1b, 0xd0, 0x36, 0xad, 0xab, 0xa8, 0xd7, 0xb1, 0x44, 0xf9, - 0xa2, 0x79, 0x18, 0xaf, 0xb3, 0x5d, 0x37, 0xfe, 0x46, 0x81, 0xfd, 0x29, 0xbe, 0x03, 0xf3, 0xbd, - 0x68, 0xc9, 0x92, 0xa7, 0x2a, 0x15, 0x36, 0x2b, 0xb7, 0xcb, 0xd2, 0x06, 0x4d, 0xe8, 0xc8, 0x52, - 0xa9, 0xb2, 0xbd, 0x5e, 0x95, 0x8b, 0xe5, 0x95, 0x90, 0x94, 0x51, 0x65, 0xbb, 0x58, 0x2c, 0x55, - 0x2a, 0x2c, 0xdf, 0x58, 0x92, 0xa4, 0xb2, 0x94, 0x8b, 0x89, 0xbf, 0x19, 0x83, 0xc9, 0xe0, 0xcd, - 0xb3, 0x63, 0x5e, 0xaa, 0xba, 0x04, 0xe9, 0xba, 0x7b, 0xb9, 0x88, 0x37, 0xc4, 0x0b, 0x0a, 0x3e, - 0x67, 0x32, 0x3a, 0xcc, 0x73, 0x26, 0x84, 0xb8, 0xd3, 0xae, 0x3b, 0xc4, 0x89, 0xfe, 0xc4, 0x0c, - 0x9d, 0x12, 0x77, 0x5f, 0x91, 0x1d, 0x3b, 0xd9, 0x15, 0x59, 0xf1, 0x07, 0x31, 0x40, 0x2b, 0x5d, - 0x97, 0x8d, 0x7f, 0x1c, 0x75, 0x17, 0xfa, 0x96, 0xc1, 0xd8, 0x69, 0xbc, 0x65, 0x20, 0xfe, 0x8a, - 0x00, 0xe7, 0x8b, 0x54, 0xcc, 0xe0, 0xb5, 0xc8, 0x3e, 0xb7, 0xce, 0xee, 0x43, 0xae, 0xeb, 0xf2, - 0x65, 0x6c, 0xf8, 0xcb, 0x97, 0x93, 0x81, 0xbb, 0xc8, 0x64, 0x25, 0xe9, 0xdc, 0x59, 0x56, 0xeb, - 0xbc, 0x1f, 0xc0, 0x06, 0xad, 0xd5, 0xc5, 0xdf, 0x13, 0xe0, 0xfc, 0x36, 0xd5, 0x4d, 0x0f, 0x89, - 0xc3, 0x0c, 0xe0, 0x49, 0x49, 0xeb, 0x76, 0x2b, 0xcd, 0xab, 0xc6, 0x7b, 0x74, 0x2b, 0xf5, 0xf7, - 0x1b, 0x8a, 0xb9, 0x6f, 0x77, 0x2b, 0xf9, 0x2d, 0x2e, 0xc2, 0xd9, 0x3b, 0xd8, 0x1a, 0xbc, 0x15, - 0xe2, 0x23, 0x38, 0xc7, 0xee, 0x22, 0xfa, 0x28, 0xcc, 0x7e, 0x5d, 0x75, 0x01, 0xa0, 0x4d, 0x5f, - 0x60, 0xd1, 0xf7, 0xf9, 0x9b, 0x1e, 0x29, 0x29, 0x45, 0x20, 0x55, 0x02, 0x40, 0xe7, 0x80, 0x7e, - 0xb8, 0xf9, 0xcc, 0x84, 0x94, 0x24, 0x00, 0x9a, 0xcd, 0xfc, 0x92, 0x00, 0xe7, 0xc3, 0xeb, 0xe4, - 0xd7, 0x20, 0x1f, 0xc0, 0x54, 0x50, 0xb3, 0x76, 0xcc, 0x38, 0x94, 0x6a, 0x73, 0x01, 0xd5, 0x9a, - 0xe8, 0x32, 0x4c, 0x6a, 0xf8, 0xd0, 0x92, 0xbb, 0x64, 0xcf, 0x12, 0xf0, 0x96, 0x2d, 0xbf, 0xb8, - 0x04, 0xe7, 0x57, 0x70, 0x13, 0x0f, 0x63, 0x0f, 0xe2, 0xf7, 0x63, 0x90, 0x0b, 0xde, 0xd6, 0x25, - 0x2e, 0x8a, 0x3f, 0x69, 0xe8, 0xdf, 0x46, 0x8e, 0x74, 0x51, 0xbe, 0x0b, 0xe5, 0x52, 0xd6, 0xf4, - 0xdd, 0x2f, 0xaf, 0x42, 0x56, 0xa7, 0xd7, 0xce, 0xfd, 0xfb, 0xc7, 0x91, 0xb1, 0x41, 0xc8, 0x3d, - 0x75, 0x29, 0xc3, 0xb8, 0x3c, 0xb1, 0x77, 0x24, 0x8e, 0xf3, 0x60, 0xc0, 0xb7, 0x12, 0x30, 0xb6, - 0xd2, 0x6c, 0xdf, 0xd5, 0x77, 0x42, 0x47, 0xdc, 0x0d, 0xfe, 0xf0, 0xdb, 0x00, 0x4f, 0x18, 0x32, - 0x2e, 0x34, 0xec, 0x63, 0x0f, 0xc4, 0x15, 0xe8, 0x0b, 0x4a, 0x16, 0xe6, 0x0b, 0xa2, 0x17, 0xfa, - 0x13, 0x2f, 0xdc, 0xd5, 0x77, 0x2a, 0x84, 0x44, 0x62, 0x94, 0xe8, 0x7f, 0x43, 0xc6, 0x50, 0xcd, - 0x7d, 0xd9, 0x9e, 0xd4, 0x99, 0xb7, 0xfe, 0xc8, 0x71, 0x8f, 0xe2, 0xac, 0x8e, 0x48, 0x69, 0xc3, - 0x73, 0xc1, 0x44, 0x06, 0xdb, 0x15, 0x38, 0x35, 0x0c, 0xb0, 0xb3, 0xdd, 0xeb, 0xda, 0xfb, 0xea, - 0x88, 0x64, 0xf7, 0xa8, 0x5d, 0xc1, 0x49, 0xde, 0x1d, 0x43, 0xaf, 0x02, 0xd0, 0x47, 0xa7, 0x18, - 0xed, 0x78, 0x5f, 0xda, 0x14, 0xc5, 0xa6, 0xa4, 0xd7, 0x20, 0x89, 0xb5, 0x3a, 0x23, 0x4c, 0xf6, - 0x25, 0x1c, 0xc7, 0x5a, 0x9d, 0x92, 0x5d, 0x87, 0x2c, 0x36, 0x0c, 0xdd, 0x90, 0xed, 0xa0, 0x32, - 0x45, 0x3d, 0x00, 0xb2, 0x69, 0x8d, 0x76, 0x6d, 0xa1, 0x42, 0x1f, 0xe5, 0x94, 0x32, 0x14, 0x91, - 0x05, 0x51, 0xa6, 0x58, 0x83, 0xa4, 0xdd, 0x75, 0xe8, 0x2c, 0xcc, 0xde, 0x2d, 0x2f, 0xcb, 0x95, - 0x6a, 0xa1, 0x1a, 0x12, 0x3c, 0x6d, 0x95, 0x36, 0x57, 0xd6, 0x36, 0xef, 0xe4, 0x04, 0xf2, 0x21, - 0x6d, 0x6f, 0x6e, 0x92, 0x8f, 0x18, 0x4a, 0xc2, 0xe8, 0x4a, 0x79, 0xb3, 0x94, 0x8b, 0xa3, 0x0c, - 0x24, 0x8b, 0x85, 0xcd, 0x62, 0x69, 0xbd, 0xb4, 0x92, 0x1b, 0x45, 0x00, 0x63, 0xb7, 0x0b, 0x6b, - 0xe4, 0x77, 0x62, 0x39, 0xe5, 0x04, 0x77, 0xe2, 0x65, 0xc8, 0xdd, 0xc1, 0x16, 0x33, 0x9a, 0x28, - 0x07, 0xf1, 0x5d, 0x01, 0x10, 0xf1, 0x7b, 0x0c, 0x33, 0xc4, 0xc5, 0x8e, 0xfa, 0x5c, 0xac, 0x7b, - 0x7b, 0x5c, 0xf0, 0xde, 0x1e, 0xf7, 0xfb, 0xd6, 0x98, 0xdf, 0xb7, 0x06, 0xfc, 0x72, 0x3c, 0xe8, - 0x97, 0xed, 0x11, 0x94, 0x18, 0x7e, 0x04, 0x89, 0x1d, 0x98, 0xf6, 0x49, 0xcf, 0x9d, 0xf5, 0x87, - 0x61, 0xf4, 0x5d, 0x7d, 0xc7, 0xf6, 0xcf, 0x62, 0x7f, 0x96, 0x12, 0xc5, 0x1f, 0xd8, 0x15, 0x3f, - 0x0f, 0xd3, 0x45, 0x45, 0xab, 0xe1, 0x66, 0x7f, 0x05, 0x3f, 0x0f, 0xd3, 0xcc, 0x6b, 0xf7, 0x47, - 0xfd, 0x8e, 0x00, 0x17, 0x59, 0x8c, 0xd2, 0x1d, 0xee, 0xf5, 0x9b, 0xfb, 0x64, 0x98, 0x0e, 0x79, - 0xe8, 0x68, 0x90, 0xcc, 0x7d, 0x48, 0x5d, 0xa8, 0xfb, 0x4d, 0xa4, 0xfe, 0xf1, 0xca, 0x8f, 0x04, - 0xb8, 0xc8, 0xe2, 0x95, 0xde, 0xd2, 0x87, 0x39, 0xd0, 0x27, 0x2e, 0xf9, 0x89, 0x62, 0x97, 0x25, - 0x38, 0x4f, 0xc6, 0xd1, 0x30, 0x2d, 0x12, 0x2d, 0x78, 0x8a, 0x1a, 0x65, 0x17, 0xd1, 0x13, 0x8d, - 0x60, 0x7e, 0x59, 0x80, 0x8b, 0x3d, 0xab, 0xe5, 0xe3, 0x42, 0x81, 0x99, 0x10, 0x5d, 0xdb, 0xe3, - 0x64, 0x58, 0x65, 0x4f, 0x77, 0x2b, 0x7b, 0xf0, 0x21, 0x74, 0x0d, 0x2e, 0xf2, 0x71, 0x31, 0x8c, - 0x6e, 0xaf, 0x6e, 0xb9, 0xaf, 0x73, 0x78, 0xde, 0xc7, 0x20, 0x2e, 0xb5, 0xb4, 0xb9, 0xbd, 0x21, - 0x57, 0x1f, 0x6e, 0x85, 0xb8, 0xd4, 0x35, 0x0a, 0xa8, 0xe6, 0x04, 0x34, 0x05, 0x59, 0x69, 0xad, - 0x72, 0x4f, 0x2e, 0x6c, 0x16, 0xd6, 0x1f, 0x56, 0xd6, 0x2a, 0xb9, 0xd8, 0xd5, 0xef, 0x08, 0x80, - 0xba, 0xb3, 0x8d, 0xe8, 0x19, 0xb8, 0x28, 0x95, 0xd6, 0xe9, 0x62, 0xb7, 0x77, 0x5e, 0x2b, 0x03, - 0xc9, 0xd2, 0x1b, 0xdb, 0x85, 0x75, 0xb9, 0x5a, 0xce, 0x09, 0x28, 0x07, 0x99, 0xcd, 0x72, 0x55, - 0x76, 0x20, 0xf4, 0x8c, 0xcd, 0x1d, 0xa9, 0x54, 0xa8, 0x96, 0x24, 0xb9, 0xba, 0x5a, 0xd8, 0xcc, - 0xc5, 0x51, 0x16, 0x52, 0xeb, 0xa5, 0x4a, 0x85, 0x7d, 0x8e, 0xa2, 0x3c, 0xcc, 0x79, 0x11, 0xe4, - 0xb2, 0xc4, 0xc8, 0x2b, 0xb9, 0x04, 0x3a, 0x03, 0xd3, 0x0e, 0xaa, 0xa7, 0x60, 0x8c, 0xb8, 0xfc, - 0xd2, 0x83, 0xb5, 0x4a, 0xb5, 0x92, 0x1b, 0xbf, 0x2a, 0x01, 0xb8, 0x4e, 0x11, 0x9d, 0x87, 0xf9, - 0x95, 0xf5, 0x2d, 0x99, 0xcc, 0x2e, 0x21, 0x9a, 0x98, 0x84, 0x34, 0xd7, 0x04, 0xc1, 0xc8, 0x09, - 0x68, 0x16, 0xa6, 0x7c, 0xda, 0xa0, 0xe0, 0xd8, 0xd2, 0x57, 0x2f, 0x50, 0xa6, 0x15, 0x6c, 0x3c, - 0x56, 0x6b, 0x18, 0xfd, 0xaa, 0x00, 0x13, 0xfe, 0x27, 0x93, 0xd0, 0x4b, 0x83, 0x45, 0x60, 0x9e, - 0x27, 0xa5, 0xf2, 0x4b, 0xc3, 0x90, 0x30, 0x73, 0x15, 0xaf, 0xff, 0xe4, 0x8f, 0xfe, 0xee, 0x73, - 0xb1, 0x97, 0xc4, 0x17, 0x9d, 0xa7, 0xb8, 0x3f, 0xce, 0x06, 0xca, 0xad, 0xb6, 0xa1, 0xbf, 0x8b, - 0x6b, 0x96, 0xb9, 0x78, 0xf5, 0x13, 0x8b, 0x35, 0x46, 0x75, 0x83, 0x47, 0x16, 0x37, 0x84, 0xab, - 0xe8, 0x2b, 0x02, 0xa4, 0x3d, 0xef, 0xf5, 0xa1, 0x85, 0xe1, 0x5e, 0x4f, 0xcc, 0x2f, 0x0e, 0x8c, - 0xcf, 0x25, 0x7d, 0x85, 0x4a, 0xba, 0x20, 0x3e, 0x1f, 0x29, 0x29, 0x7d, 0x22, 0xf0, 0x06, 0x3b, - 0x28, 0x49, 0xc4, 0xfc, 0xae, 0x00, 0x53, 0x5d, 0x6f, 0xcc, 0xa1, 0x57, 0x06, 0x5e, 0xea, 0x7a, - 0xf5, 0x7b, 0x6d, 0x48, 0x2a, 0x2e, 0xf8, 0x0d, 0x2a, 0xf8, 0x2b, 0xe2, 0xe2, 0x40, 0x2a, 0x76, - 0x07, 0xbc, 0x2d, 0xbe, 0x34, 0x9c, 0xf8, 0xd2, 0xb1, 0xc4, 0x97, 0x4e, 0x49, 0x7c, 0xc3, 0x27, - 0xfe, 0xd7, 0x05, 0x98, 0xea, 0x8a, 0x54, 0xd1, 0x70, 0x81, 0xad, 0x2d, 0xfe, 0x00, 0x21, 0xc6, - 0x80, 0xb2, 0xd6, 0x1d, 0xde, 0x5e, 0x83, 0xfe, 0x35, 0x01, 0x66, 0x43, 0xe3, 0x76, 0x34, 0x7c, - 0xa8, 0xff, 0x44, 0x65, 0x56, 0x58, 0x8d, 0x44, 0xe6, 0x9f, 0x17, 0x20, 0xeb, 0x7b, 0x52, 0x08, - 0x7d, 0x28, 0xfa, 0x69, 0xe3, 0xee, 0x27, 0x8d, 0xf2, 0x2f, 0x0d, 0x41, 0xc1, 0x4d, 0x22, 0x4f, - 0x45, 0x9e, 0x41, 0xc8, 0x11, 0xd9, 0xdd, 0x83, 0xfe, 0x9e, 0x00, 0xb3, 0xa1, 0x59, 0xa0, 0x68, - 0x35, 0x46, 0x25, 0x8e, 0xf2, 0xc3, 0xac, 0xfe, 0xc5, 0x5b, 0x54, 0xb8, 0xeb, 0xe2, 0x52, 0x97, - 0x3e, 0x75, 0xa3, 0xa1, 0x68, 0xea, 0x7b, 0x2c, 0x1d, 0x4e, 0x9d, 0x45, 0x20, 0x4f, 0x40, 0x54, - 0x4a, 0xe4, 0x0f, 0xcd, 0x09, 0x45, 0xcb, 0x1f, 0x95, 0x46, 0x3a, 0x96, 0xfc, 0x4b, 0x1e, 0xf9, - 0xc9, 0xb4, 0x1c, 0x94, 0xbe, 0x4b, 0xf8, 0xc5, 0xab, 0x9f, 0x20, 0xf2, 0xff, 0x86, 0x00, 0xa8, - 0x3b, 0x15, 0x84, 0x22, 0x07, 0x7f, 0xcf, 0xd4, 0xd1, 0x70, 0x92, 0x73, 0x4b, 0x46, 0xc7, 0x90, - 0x1c, 0xfd, 0xae, 0x60, 0xbf, 0xb8, 0x15, 0x48, 0xdd, 0x5c, 0xef, 0x6f, 0x9e, 0xa1, 0x29, 0xac, - 0xfc, 0x47, 0x86, 0x27, 0xf4, 0x7b, 0x3c, 0x74, 0x0c, 0x0b, 0x42, 0x5f, 0x15, 0x60, 0x36, 0x34, - 0x85, 0x14, 0x6d, 0x3e, 0x51, 0x59, 0xa7, 0x7c, 0x8f, 0x17, 0x46, 0x6c, 0x39, 0xaf, 0x1e, 0x47, - 0xdf, 0x7f, 0x20, 0xc0, 0x7c, 0xaf, 0x85, 0x10, 0xba, 0xd9, 0x7f, 0xa4, 0xf6, 0x0c, 0x29, 0xf3, - 0x43, 0x86, 0xb8, 0xe2, 0xeb, 0xb4, 0x15, 0x37, 0xc4, 0x6b, 0xfd, 0xb5, 0x1d, 0x12, 0x0c, 0x13, - 0x93, 0x27, 0x6d, 0xe9, 0xb5, 0x2c, 0x8a, 0x6e, 0x4b, 0x9f, 0xc5, 0xd4, 0x71, 0xdb, 0xb2, 0x74, - 0xad, 0x4f, 0x8f, 0x84, 0x34, 0x84, 0x0f, 0xdf, 0xef, 0x0b, 0x30, 0x1b, 0xba, 0x1a, 0x8a, 0xb6, - 0x9f, 0xa8, 0x05, 0xd4, 0xd0, 0xad, 0xe0, 0x1e, 0x08, 0x1d, 0xaf, 0x15, 0xe8, 0x4f, 0x05, 0x38, - 0xd3, 0x63, 0x95, 0x84, 0x6e, 0xf4, 0x1b, 0x94, 0xbd, 0x57, 0x74, 0xf9, 0x9b, 0xc7, 0xa2, 0xe5, - 0x63, 0x3a, 0xa4, 0x4d, 0x43, 0x58, 0x19, 0xfa, 0xa6, 0x00, 0xf3, 0xbd, 0xd6, 0x52, 0xd1, 0x26, - 0xd6, 0x67, 0x05, 0xd6, 0x73, 0x70, 0x73, 0x81, 0xaf, 0x1e, 0xb3, 0x13, 0x7e, 0x49, 0x80, 0xb4, - 0x27, 0x6d, 0x13, 0x1d, 0x9e, 0x77, 0x67, 0xa7, 0xa2, 0xc3, 0xf3, 0x90, 0x7c, 0x90, 0xf8, 0x22, - 0x95, 0xf7, 0x32, 0x7a, 0x36, 0x3a, 0x8c, 0xe1, 0xe2, 0x7c, 0x56, 0x80, 0x94, 0x93, 0x3c, 0x43, - 0x2f, 0xf6, 0x33, 0x6d, 0x6f, 0x5e, 0x67, 0xa0, 0xa0, 0x2a, 0x44, 0x1a, 0xaa, 0x3d, 0x57, 0x16, - 0x5b, 0x14, 0xa2, 0xac, 0x4f, 0x09, 0x90, 0xf1, 0x66, 0x90, 0xd0, 0xe2, 0x00, 0x3d, 0xea, 0x93, - 0xa9, 0x57, 0x2f, 0x72, 0x39, 0xae, 0x0e, 0x26, 0xc7, 0xcf, 0x09, 0x90, 0xf1, 0x26, 0xbd, 0xa2, - 0xe5, 0x08, 0x49, 0x8f, 0xf5, 0x94, 0xe3, 0xc3, 0x54, 0x8e, 0x0f, 0x89, 0x2f, 0x0c, 0x22, 0xc7, - 0x8d, 0x1a, 0xe5, 0x7c, 0x43, 0xb8, 0xba, 0xfc, 0x65, 0x01, 0x9e, 0xaa, 0xe9, 0xad, 0x08, 0x31, - 0x96, 0x93, 0x2b, 0xcd, 0xf6, 0x16, 0xa9, 0x6d, 0x4b, 0xf8, 0xd8, 0x2d, 0x8e, 0xd7, 0xd0, 0x9b, - 0x8a, 0xd6, 0x58, 0xd0, 0x8d, 0xc6, 0x62, 0x03, 0x6b, 0x54, 0x96, 0x45, 0x56, 0xa4, 0xb4, 0x55, - 0x33, 0xec, 0x1f, 0xa0, 0x6e, 0xd6, 0x9b, 0xed, 0x6f, 0xc4, 0xe6, 0xef, 0x30, 0xfa, 0x62, 0x53, - 0xef, 0xd4, 0x49, 0x5f, 0x2e, 0xdc, 0x5f, 0x5a, 0x26, 0xc5, 0x7f, 0x68, 0x17, 0xbd, 0x4d, 0x8b, - 0xde, 0x5e, 0x69, 0xb6, 0xdf, 0xbe, 0xcf, 0x28, 0x77, 0xc6, 0x28, 0xff, 0x97, 0xff, 0x33, 0x00, - 0x00, 0xff, 0xff, 0x78, 0xae, 0x10, 0x67, 0xc0, 0x6a, 0x00, 0x00, + // 7722 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x7d, 0x5b, 0x6c, 0x23, 0x57, + 0x96, 0x98, 0x8a, 0x14, 0x25, 0xf2, 0x90, 0x94, 0xa8, 0xab, 0x47, 0xab, 0xd9, 0xdd, 0xee, 0x76, + 0x79, 0xdc, 0xd3, 0x96, 0x6d, 0xc9, 0x96, 0xdd, 0xe3, 0x71, 0x7b, 0xec, 0x31, 0x45, 0xb1, 0x5b, + 0x6a, 0x4b, 0xa2, 0x5c, 0xa4, 0xda, 0x6e, 0xdb, 0x71, 0xa5, 0x44, 0x5e, 0x51, 0xd5, 0x22, 0xab, + 0xd8, 0x55, 0xc5, 0x96, 0xe8, 0xcd, 0x26, 0x9b, 0x04, 0x9b, 0xec, 0x66, 0x83, 0x64, 0x17, 0x3b, + 0xc8, 0x24, 0x98, 0x20, 0xb3, 0x0b, 0xcc, 0xc7, 0x66, 0xb1, 0xc8, 0x13, 0xc8, 0xc7, 0xee, 0x04, + 0x48, 0x76, 0x3e, 0xb2, 0xc8, 0xe3, 0x6b, 0x81, 0x01, 0x82, 0x60, 0x91, 0x60, 0x93, 0x60, 0x91, + 0x07, 0xf2, 0x42, 0x32, 0x7f, 0x09, 0x82, 0xfb, 0xa8, 0x27, 0x8b, 0xc5, 0xa2, 0xa4, 0x46, 0xb2, + 0xfb, 0x25, 0xd6, 0xb9, 0xe7, 0x9c, 0x7b, 0xee, 0xb9, 0xe7, 0x9e, 0x7b, 0xee, 0xb9, 0x0f, 0xc1, + 0xd7, 0x5a, 0xba, 0xde, 0x6a, 0xe3, 0xb5, 0xae, 0xa1, 0x3e, 0x53, 0x1a, 0xfd, 0xb5, 0x66, 0xbb, + 0xbb, 0xf6, 0x6c, 0xfd, 0x10, 0x5b, 0xca, 0x3a, 0xf9, 0xbd, 0xda, 0x35, 0x74, 0x4b, 0x47, 0x45, + 0x86, 0xb5, 0xca, 0xb1, 0x56, 0x49, 0x09, 0xc7, 0x2a, 0x5e, 0xe7, 0x1c, 0x94, 0xae, 0xba, 0xa6, + 0x68, 0x9a, 0x6e, 0x29, 0x96, 0xaa, 0x6b, 0x26, 0xa3, 0x2c, 0xde, 0x89, 0xe0, 0x6f, 0x5a, 0xba, + 0xa1, 0xb4, 0x30, 0xc7, 0x7c, 0xc1, 0xc1, 0xd4, 0x2d, 0xfd, 0xb0, 0x77, 0xb4, 0xd6, 0xec, 0x19, + 0x94, 0x15, 0x2f, 0xbf, 0x16, 0x2c, 0xc7, 0x9d, 0xae, 0xd5, 0xe7, 0x85, 0xb7, 0x82, 0x85, 0x47, + 0x2a, 0x6e, 0x37, 0xe5, 0x8e, 0x62, 0x9e, 0x70, 0x8c, 0x9b, 0x41, 0x0c, 0x4b, 0xed, 0x60, 0xd3, + 0x52, 0x3a, 0xbc, 0x8d, 0xc5, 0x2b, 0x1c, 0xc1, 0xe8, 0x36, 0xd6, 0x4c, 0x4b, 0xb1, 0x7a, 0x76, + 0x13, 0x96, 0x78, 0x81, 0xd5, 0xef, 0xe2, 0xb5, 0xa6, 0x62, 0xe1, 0x80, 0x40, 0x1c, 0xde, 0xd7, + 0x8f, 0x4e, 0x31, 0x3e, 0x09, 0x2b, 0x24, 0x55, 0xe9, 0x47, 0x4d, 0x85, 0x4b, 0x2b, 0xfe, 0xd5, + 0x29, 0xc8, 0x6f, 0x6b, 0x66, 0x17, 0x37, 0xac, 0xb2, 0xae, 0x1d, 0xa9, 0x2d, 0x54, 0x06, 0x50, + 0xb5, 0x23, 0x5d, 0x26, 0xe8, 0xe6, 0xb2, 0x70, 0x2b, 0x79, 0x27, 0xbb, 0xfe, 0xb5, 0xd5, 0xe1, + 0x5a, 0x5f, 0xdd, 0xd6, 0x8e, 0xf4, 0x7a, 0xbf, 0x8b, 0xa5, 0x8c, 0xca, 0x7f, 0x99, 0x68, 0x17, + 0x66, 0x3a, 0xaa, 0x26, 0xb7, 0xd5, 0x13, 0xdc, 0x56, 0x8f, 0x75, 0xbd, 0xb9, 0x9c, 0xb8, 0x25, + 0xdc, 0x99, 0x59, 0xbf, 0x1d, 0xc5, 0x68, 0xc7, 0xc1, 0x96, 0xf2, 0x1d, 0x55, 0x73, 0x3f, 0x51, + 0x15, 0xa6, 0xda, 0x6a, 0x47, 0xb5, 0xcc, 0xe5, 0xe4, 0x2d, 0xe1, 0x4e, 0x76, 0xfd, 0x9d, 0x68, + 0x79, 0x3c, 0xcd, 0x59, 0xbd, 0xaf, 0x6a, 0x4d, 0x55, 0x6b, 0xed, 0x50, 0x72, 0x89, 0xb3, 0x41, + 0x2f, 0x41, 0x5e, 0xd5, 0x1a, 0xed, 0x5e, 0x13, 0xcb, 0x4f, 0x7b, 0xba, 0x85, 0x97, 0x27, 0x6f, + 0x09, 0x77, 0xd2, 0x52, 0x8e, 0x03, 0x3f, 0x26, 0x30, 0xf4, 0x1a, 0x20, 0x7c, 0xc6, 0x90, 0x3c, + 0x1a, 0x49, 0x51, 0xcc, 0x02, 0x2f, 0xd9, 0x76, 0x9a, 0xfc, 0x08, 0xe6, 0x1a, 0x3d, 0xd3, 0xd2, + 0x3b, 0x5e, 0xe4, 0x29, 0xaa, 0xbe, 0x95, 0x28, 0x71, 0xcb, 0x94, 0xc8, 0x51, 0xe2, 0x6c, 0xc3, + 0xf7, 0x6d, 0x16, 0x7f, 0x92, 0x80, 0xbc, 0xaf, 0x11, 0xe8, 0x4d, 0x58, 0xec, 0x28, 0x67, 0xf2, + 0x11, 0x03, 0x9a, 0x72, 0x17, 0x1b, 0xb2, 0x6a, 0xe1, 0xce, 0xb2, 0x70, 0x4b, 0xb8, 0x93, 0x92, + 0x50, 0x47, 0x39, 0xe3, 0x04, 0xe6, 0x3e, 0x36, 0xb6, 0x2d, 0xdc, 0x41, 0xef, 0xc0, 0xf2, 0x00, + 0x89, 0x81, 0x9f, 0xf6, 0xb0, 0x69, 0xd1, 0x9e, 0x49, 0x49, 0x8b, 0x7e, 0x2a, 0x89, 0x15, 0xa2, + 0x3f, 0x2d, 0x40, 0x71, 0xb0, 0x32, 0xbb, 0x81, 0xcb, 0x49, 0xda, 0xbe, 0xca, 0x39, 0xbb, 0xc3, + 0x31, 0x1d, 0xfa, 0x29, 0x2d, 0x05, 0x04, 0xe7, 0x85, 0xc5, 0x1e, 0x31, 0x51, 0x0f, 0x22, 0x2a, + 0x41, 0xc6, 0x15, 0x41, 0xa0, 0x16, 0x11, 0xcf, 0x42, 0xd3, 0xb6, 0x85, 0xa2, 0x17, 0x21, 0xe7, + 0x6d, 0x16, 0x57, 0x42, 0xd6, 0x23, 0x81, 0xf8, 0x5d, 0x01, 0xb2, 0x65, 0x5d, 0xb3, 0xb0, 0x66, + 0x51, 0x1d, 0x22, 0x98, 0x74, 0x2a, 0xcc, 0x48, 0xf4, 0x37, 0x5a, 0x80, 0xc9, 0xa6, 0x62, 0x29, + 0x94, 0x3c, 0xb7, 0x35, 0x21, 0xd1, 0x2f, 0xb4, 0x04, 0xa9, 0x67, 0x4a, 0xbb, 0x87, 0xa9, 0xb5, + 0x66, 0xb6, 0x26, 0x24, 0xf6, 0x89, 0xde, 0x85, 0x94, 0xa5, 0x1c, 0xb6, 0x99, 0xb5, 0x65, 0xd7, + 0x5f, 0x8c, 0x92, 0xb9, 0x4e, 0x10, 0x09, 0x29, 0xa5, 0xd8, 0xc8, 0x42, 0x86, 0xb0, 0xa6, 0xfd, + 0x2c, 0xfe, 0x48, 0x80, 0x14, 0x2d, 0x47, 0xef, 0xc3, 0xf4, 0x31, 0x56, 0x9a, 0xd8, 0xb0, 0x47, + 0xea, 0x4b, 0x51, 0x3c, 0xef, 0x13, 0x4f, 0xb4, 0xdd, 0x94, 0x6c, 0x1a, 0xf4, 0x2e, 0x4c, 0x1a, + 0xfa, 0x29, 0x69, 0x3d, 0xa1, 0x7d, 0x79, 0xa4, 0x3c, 0xab, 0x92, 0x7e, 0x2a, 0x51, 0x92, 0xe2, + 0x87, 0x90, 0x94, 0xf4, 0x53, 0xf4, 0x2e, 0x4c, 0xd1, 0xb6, 0xd9, 0xf5, 0x47, 0xb6, 0xe9, 0x11, + 0xc1, 0x94, 0x38, 0x81, 0xf8, 0xa7, 0x1c, 0xcf, 0x23, 0x61, 0xb3, 0xd7, 0xb6, 0xd0, 0xb7, 0x21, + 0xed, 0xf4, 0x47, 0xac, 0xd6, 0x50, 0x5c, 0xc9, 0x21, 0x42, 0xaf, 0x03, 0x72, 0xec, 0xd4, 0x32, + 0x7a, 0x5a, 0x43, 0xb1, 0x30, 0xf3, 0x3c, 0x69, 0x69, 0xce, 0x2e, 0xa9, 0xdb, 0x05, 0xe2, 0x7f, + 0x4d, 0xc0, 0x34, 0x67, 0x82, 0x16, 0x20, 0xc5, 0x1c, 0x01, 0xeb, 0x5d, 0xf6, 0xe1, 0x37, 0xb4, + 0xc4, 0xb9, 0x0c, 0xed, 0x3e, 0x80, 0xc7, 0x0b, 0x26, 0xc7, 0xf2, 0x82, 0x1e, 0x4a, 0xf4, 0x21, + 0xa4, 0xdb, 0x7a, 0x83, 0xce, 0x42, 0xdc, 0x7c, 0x22, 0x25, 0xd9, 0xe1, 0xb8, 0x92, 0x43, 0x85, + 0xde, 0x83, 0x6c, 0xc3, 0xc0, 0x8a, 0x85, 0x65, 0x32, 0x09, 0x2c, 0x4f, 0x51, 0x26, 0x45, 0x97, + 0x09, 0x9b, 0x8c, 0x56, 0xeb, 0xf6, 0x64, 0x24, 0x01, 0x43, 0x27, 0x00, 0xb4, 0x09, 0x40, 0x55, + 0x42, 0xc7, 0xfe, 0xf2, 0x34, 0xa5, 0x8d, 0xb4, 0x17, 0xea, 0x42, 0x89, 0x3e, 0xa4, 0xcc, 0x53, + 0xfb, 0xa7, 0xf8, 0xc3, 0x24, 0xa4, 0x6d, 0xc9, 0xd0, 0x87, 0x00, 0x87, 0x7d, 0x0b, 0xcb, 0x86, + 0xa2, 0xb5, 0xec, 0x61, 0x1c, 0x69, 0x3e, 0x12, 0x41, 0x94, 0x32, 0x84, 0x88, 0xfe, 0x44, 0x0f, + 0x61, 0xb6, 0xa1, 0x37, 0x71, 0x57, 0x57, 0x35, 0x8b, 0xb3, 0x49, 0xc4, 0x65, 0x33, 0xe3, 0x50, + 0xda, 0xbc, 0xb2, 0x6a, 0x47, 0x69, 0x61, 0xf9, 0x50, 0x3f, 0xc3, 0x26, 0x77, 0x6c, 0xaf, 0x44, + 0x76, 0x36, 0x41, 0x77, 0xf4, 0x0c, 0x94, 0x7a, 0x83, 0x10, 0x13, 0x65, 0x19, 0xb8, 0xa1, 0x1b, + 0x4d, 0xf9, 0x04, 0xf7, 0x79, 0x6f, 0x45, 0x2a, 0x4b, 0xa2, 0xd8, 0x1f, 0xe1, 0xbe, 0x94, 0x31, + 0xec, 0x9f, 0xe8, 0x03, 0x32, 0x1c, 0x48, 0xe8, 0xa0, 0x36, 0xe9, 0xa4, 0x13, 0x77, 0x70, 0x1f, + 0xb1, 0x1f, 0x68, 0x1f, 0x66, 0xa8, 0xef, 0x90, 0x1d, 0xbb, 0x61, 0x5d, 0xfe, 0xca, 0xc8, 0x61, + 0xee, 0x34, 0x2a, 0x6f, 0x79, 0x3f, 0xc5, 0xd7, 0x20, 0xef, 0x2b, 0x47, 0xd7, 0x20, 0x63, 0xe8, + 0xa7, 0xb2, 0xaa, 0x35, 0xf1, 0x19, 0xed, 0xc1, 0xa4, 0x94, 0x36, 0xf4, 0xd3, 0x6d, 0xf2, 0x2d, + 0xae, 0x41, 0x8a, 0xa9, 0x76, 0x01, 0x52, 0xa6, 0xa5, 0x18, 0x16, 0xc7, 0x60, 0x1f, 0xa8, 0x00, + 0x49, 0xac, 0xb1, 0xd1, 0x99, 0x94, 0xc8, 0x4f, 0xb1, 0x01, 0x79, 0x9f, 0x4e, 0x09, 0x8a, 0xa5, + 0x77, 0xf9, 0xb4, 0x46, 0x7e, 0x12, 0x1f, 0xdc, 0xc6, 0x47, 0xf6, 0x9c, 0x45, 0x7f, 0x13, 0xf6, + 0xa7, 0x6a, 0xd3, 0x3a, 0xa6, 0x83, 0x2b, 0x25, 0xb1, 0x0f, 0xb4, 0x04, 0x53, 0xc7, 0x58, 0x6d, + 0x1d, 0x5b, 0x54, 0xff, 0x29, 0x89, 0x7f, 0x89, 0xdf, 0x9d, 0x04, 0x24, 0xe1, 0xa6, 0xd2, 0xb0, + 0x68, 0x5d, 0xf6, 0x3c, 0xb7, 0x04, 0x53, 0x5d, 0xc5, 0xc0, 0x9a, 0xc5, 0x1d, 0x00, 0xff, 0x22, + 0x4a, 0x54, 0x99, 0x93, 0x92, 0x1b, 0x74, 0x06, 0xe3, 0x16, 0xf6, 0x4a, 0xec, 0x29, 0x4f, 0xca, + 0xab, 0xbe, 0xf8, 0xea, 0x06, 0x30, 0x53, 0xb1, 0x27, 0x50, 0x52, 0x5b, 0x86, 0x42, 0xa8, 0xbf, + 0x70, 0x8a, 0xe9, 0xbc, 0x42, 0x64, 0xcf, 0xf1, 0xe2, 0x4d, 0x32, 0xb5, 0xfc, 0x14, 0x5c, 0x61, + 0xc5, 0x06, 0x6d, 0x83, 0xaa, 0x6b, 0x5c, 0x2e, 0x12, 0x98, 0x10, 0x93, 0x2d, 0x47, 0xdb, 0x59, + 0xb0, 0xe1, 0xab, 0xfc, 0x83, 0x33, 0xe3, 0x22, 0x2f, 0xaa, 0x21, 0x50, 0xb3, 0xf8, 0xaf, 0x04, + 0x58, 0x08, 0xc3, 0x47, 0xe5, 0x73, 0x4e, 0xc8, 0x5b, 0x13, 0x1e, 0x4f, 0x79, 0x07, 0x66, 0x59, + 0xa3, 0x64, 0xa5, 0xdd, 0x96, 0x2d, 0x7c, 0xc6, 0xba, 0x39, 0xbd, 0x35, 0x21, 0xe5, 0x59, 0x41, + 0xa9, 0xdd, 0xae, 0xe3, 0x33, 0x8b, 0x8c, 0x7b, 0x6f, 0xf3, 0xdb, 0xba, 0xc1, 0xe3, 0xc2, 0xc8, + 0x71, 0x5f, 0x26, 0x88, 0xd2, 0x8c, 0xe1, 0xca, 0xde, 0xd6, 0x8d, 0x8d, 0x34, 0x4c, 0x59, 0x8a, + 0xd1, 0xc2, 0x96, 0x58, 0x86, 0x14, 0x05, 0x11, 0xb3, 0x33, 0x70, 0x93, 0xb6, 0x23, 0x21, 0x91, + 0x9f, 0xc4, 0xc4, 0x5a, 0x06, 0xc6, 0x1a, 0x15, 0x28, 0x21, 0xb1, 0x0f, 0x62, 0x8c, 0x87, 0xf6, + 0x2c, 0x9f, 0x90, 0xe8, 0x6f, 0xb1, 0x01, 0xf3, 0x3e, 0x25, 0x9b, 0x5d, 0x5d, 0x33, 0x31, 0x7a, + 0x19, 0x78, 0xbd, 0xb8, 0x29, 0x53, 0xdd, 0x52, 0xee, 0x39, 0xbb, 0x61, 0xb8, 0x49, 0xd1, 0x09, + 0x1a, 0x3e, 0xb3, 0x0c, 0x86, 0xe7, 0x68, 0x20, 0x23, 0xe5, 0x1d, 0x28, 0x69, 0xbf, 0xf8, 0x17, + 0x92, 0xb0, 0xbc, 0x89, 0xd5, 0x26, 0xd6, 0x2c, 0xf5, 0xa8, 0xcf, 0x63, 0x94, 0x51, 0x96, 0xfc, + 0x18, 0xe6, 0x9a, 0x0e, 0x8d, 0xdf, 0x98, 0x5f, 0x8b, 0x52, 0x9b, 0xaf, 0x22, 0x62, 0x1c, 0x85, + 0x66, 0x00, 0x12, 0x32, 0x48, 0x92, 0x17, 0x1c, 0x24, 0xef, 0xc1, 0x24, 0x8d, 0x68, 0x99, 0xef, + 0xfc, 0x7a, 0x74, 0xb7, 0x3a, 0x21, 0x9a, 0x44, 0x89, 0xd0, 0x3a, 0x2c, 0xda, 0xe2, 0x58, 0xb8, + 0xd3, 0x6d, 0x93, 0x29, 0x4f, 0x53, 0x3a, 0x98, 0x7a, 0xd1, 0x8c, 0x34, 0xcf, 0x0b, 0xeb, 0xbc, + 0x6c, 0x4f, 0xe9, 0x60, 0xf4, 0x4d, 0x58, 0xf6, 0x68, 0xc7, 0x4f, 0x36, 0x45, 0xc9, 0x96, 0xdc, + 0x72, 0x2f, 0xa5, 0xf8, 0x6b, 0x02, 0x5c, 0x0d, 0xe9, 0x0c, 0xde, 0xf1, 0x76, 0x43, 0x84, 0xf3, + 0x34, 0x64, 0x0f, 0xd2, 0xfa, 0x33, 0x6c, 0x3c, 0x53, 0xf1, 0x29, 0xef, 0xa9, 0xf5, 0x48, 0xdf, + 0x6d, 0x28, 0x9a, 0x79, 0xa4, 0x1b, 0x1d, 0xea, 0x3d, 0xab, 0x9c, 0x52, 0x72, 0x78, 0x50, 0xbb, + 0x91, 0xce, 0x61, 0x37, 0xc6, 0xe5, 0xd8, 0x8d, 0xf1, 0x47, 0xd3, 0x6e, 0x8c, 0x11, 0x76, 0x63, + 0x0c, 0xb7, 0x1b, 0xe9, 0x0f, 0x87, 0xdd, 0xfc, 0x0f, 0x01, 0x16, 0x5d, 0xb5, 0xc7, 0x31, 0x9a, + 0xcb, 0x9f, 0x36, 0x6d, 0x85, 0x24, 0x2f, 0xb5, 0x67, 0x27, 0x87, 0xf6, 0xac, 0xf8, 0x39, 0x2c, + 0x05, 0xdb, 0xcc, 0xfb, 0xa6, 0x04, 0x53, 0x06, 0x5d, 0xb1, 0xf0, 0xde, 0x89, 0xd3, 0x28, 0xb6, + 0xc4, 0x91, 0x38, 0xa1, 0xf8, 0x57, 0x04, 0x58, 0xe6, 0x25, 0x64, 0x5a, 0xaf, 0xe9, 0x3d, 0xa3, + 0x31, 0x32, 0x16, 0xf9, 0x08, 0xe0, 0x89, 0x7e, 0x38, 0xc6, 0x10, 0xe4, 0x35, 0x3c, 0xd4, 0x0f, + 0xb9, 0x4e, 0x33, 0x4f, 0xec, 0x9f, 0x68, 0x11, 0xa6, 0x08, 0x33, 0xb5, 0xc9, 0x43, 0x90, 0xd4, + 0x13, 0xfd, 0x70, 0xbb, 0x29, 0xfe, 0x38, 0x01, 0xf3, 0xd5, 0x9e, 0xd5, 0xed, 0x59, 0x35, 0x96, + 0x12, 0xe3, 0xe8, 0x25, 0x7b, 0xe9, 0x1a, 0xa3, 0xc9, 0x1b, 0x6a, 0xeb, 0xe3, 0x1e, 0x36, 0xfa, + 0xfe, 0x25, 0x2c, 0x52, 0x20, 0xaf, 0x53, 0xce, 0xb2, 0xd9, 0x38, 0xc6, 0x1d, 0x85, 0x2f, 0x86, + 0xbe, 0x15, 0xc5, 0x2a, 0x44, 0x14, 0x1b, 0x46, 0x79, 0x48, 0x39, 0xdd, 0xf3, 0x25, 0xfe, 0xa2, + 0x00, 0x39, 0x6f, 0x31, 0xba, 0x01, 0x57, 0xab, 0x07, 0xf5, 0xfd, 0x83, 0xba, 0x5c, 0x2b, 0x6f, + 0x55, 0x76, 0x4b, 0xf2, 0xc1, 0x5e, 0x6d, 0xbf, 0x52, 0xde, 0xbe, 0xbf, 0x5d, 0xd9, 0x2c, 0x4c, + 0xa0, 0x39, 0xc8, 0x6f, 0x94, 0x6a, 0xdb, 0x65, 0xb9, 0x5c, 0xdd, 0x39, 0xd8, 0xdd, 0xab, 0x15, + 0x04, 0x34, 0x0b, 0xd9, 0x07, 0xe5, 0x9a, 0x03, 0x48, 0xa0, 0x45, 0x98, 0xdb, 0x2c, 0xd5, 0x4b, + 0xb5, 0x7a, 0x55, 0xaa, 0x38, 0xe0, 0x24, 0x01, 0x6f, 0x6c, 0x3f, 0x90, 0x3f, 0x3e, 0xa8, 0x48, + 0x8f, 0x1d, 0xf0, 0x24, 0x21, 0x2f, 0xed, 0xec, 0x38, 0x80, 0xd4, 0xc6, 0x14, 0xcb, 0x1a, 0x88, + 0x1d, 0x40, 0x76, 0xd4, 0x53, 0xb3, 0x14, 0x4b, 0x35, 0x2d, 0xb5, 0x61, 0x5e, 0x46, 0x26, 0x63, + 0x01, 0x52, 0x0d, 0xbd, 0xa7, 0x59, 0x3c, 0x92, 0x66, 0x1f, 0xe2, 0x7f, 0x99, 0x0c, 0x31, 0xb0, + 0x4d, 0x6c, 0x29, 0x6a, 0xdb, 0x44, 0x06, 0x71, 0xe9, 0xd4, 0xd6, 0x70, 0x53, 0xd6, 0xbb, 0x34, + 0x49, 0xca, 0xed, 0x29, 0x4e, 0x2a, 0x67, 0x80, 0xe1, 0xaa, 0x64, 0x73, 0xab, 0x32, 0x66, 0xc4, + 0xd7, 0xfb, 0x21, 0xa8, 0xe6, 0x0c, 0x1a, 0x36, 0x82, 0xdf, 0x3b, 0x67, 0x45, 0xde, 0x61, 0x54, + 0xfc, 0x1d, 0x01, 0x0a, 0xc1, 0xba, 0x51, 0x0b, 0xae, 0x9a, 0x9a, 0xd2, 0x35, 0x8f, 0x75, 0x4b, + 0x0e, 0x8e, 0x7a, 0xae, 0xe3, 0x57, 0x63, 0x54, 0x6e, 0x3b, 0x03, 0xe9, 0x8a, 0xcd, 0x2d, 0x50, + 0x10, 0x18, 0x8f, 0xc9, 0x0b, 0x8d, 0xc7, 0xe2, 0x3f, 0x14, 0x60, 0x8a, 0xe7, 0x41, 0xbe, 0x0e, + 0xb3, 0x5d, 0x43, 0x6f, 0x60, 0xd3, 0xc4, 0x4d, 0x99, 0xac, 0x76, 0x4d, 0xbe, 0x72, 0x9a, 0x71, + 0xc0, 0x1b, 0x04, 0x4a, 0xdc, 0x9a, 0xa5, 0x5b, 0x4a, 0x5b, 0xc6, 0xa6, 0xa5, 0x76, 0x14, 0xcb, + 0x41, 0x67, 0xa6, 0x30, 0x4f, 0x0b, 0x2b, 0x76, 0x19, 0xa3, 0x79, 0x04, 0xb3, 0x8e, 0xc5, 0xc9, + 0xa6, 0xa5, 0x58, 0xf6, 0x5a, 0x77, 0x35, 0x8e, 0xdd, 0xb9, 0xa6, 0x4b, 0xfc, 0xb3, 0x0b, 0x33, + 0xc5, 0x5f, 0x11, 0x60, 0xde, 0xc6, 0xda, 0xc4, 0x66, 0xc3, 0x50, 0x69, 0x77, 0x90, 0x20, 0x99, + 0x7a, 0x5a, 0x9e, 0x35, 0x23, 0xbf, 0xd1, 0x8b, 0x90, 0x6b, 0xaa, 0x66, 0xb7, 0xad, 0xf4, 0x99, + 0x17, 0x66, 0x41, 0x6e, 0x96, 0xc3, 0xe8, 0xbc, 0x2a, 0x41, 0xce, 0xec, 0x75, 0xbb, 0xba, 0xc1, + 0x1a, 0x45, 0x65, 0x9c, 0x59, 0x5f, 0x8b, 0x25, 0xa3, 0x4d, 0xb7, 0xd1, 0x97, 0xb2, 0xa6, 0xfb, + 0x21, 0xd6, 0x60, 0x61, 0x47, 0x35, 0x2d, 0x27, 0xb5, 0x6a, 0xfb, 0xdb, 0x97, 0x20, 0xdf, 0x56, + 0xb4, 0x56, 0x8f, 0x2c, 0xab, 0x1a, 0x7a, 0xd3, 0x96, 0x35, 0x67, 0x03, 0xcb, 0x7a, 0x13, 0x13, + 0xa7, 0x7c, 0xa4, 0xb6, 0x2d, 0x6c, 0x70, 0x69, 0xf9, 0x97, 0xd8, 0x82, 0xc5, 0x00, 0x53, 0x3e, + 0x4b, 0xec, 0x85, 0xe4, 0xd1, 0x63, 0xc9, 0xef, 0xd1, 0x9e, 0x27, 0xa5, 0x2e, 0x7e, 0x5f, 0x80, + 0xeb, 0x25, 0x4d, 0x69, 0xf7, 0xbf, 0xc2, 0x9e, 0x29, 0x43, 0x35, 0x4f, 0x46, 0xcf, 0xc5, 0x83, + 0xd3, 0xc6, 0x9b, 0x91, 0xab, 0x44, 0xd5, 0x3c, 0xa1, 0x35, 0x99, 0xaa, 0x39, 0xce, 0xdc, 0xf1, + 0xbf, 0x04, 0x58, 0x0c, 0xa5, 0x25, 0xe1, 0x00, 0xaf, 0x48, 0xee, 0x60, 0xcb, 0x50, 0x1b, 0x71, + 0xa6, 0x91, 0x7d, 0x06, 0xdb, 0xa5, 0x04, 0x52, 0xbe, 0xeb, 0xfd, 0x44, 0x3b, 0x90, 0x33, 0xa9, + 0x06, 0x64, 0x36, 0x2d, 0x25, 0xc6, 0x9c, 0x96, 0xa4, 0x2c, 0x23, 0x67, 0x69, 0xd4, 0x6f, 0xc1, + 0x34, 0x5b, 0x13, 0xda, 0x83, 0x41, 0x8c, 0x62, 0x54, 0xa2, 0xa8, 0x92, 0x4d, 0x22, 0xfe, 0xdb, + 0x59, 0xc8, 0xfb, 0x84, 0x45, 0xcf, 0x60, 0x49, 0xeb, 0x75, 0xb0, 0xa1, 0x36, 0x94, 0x36, 0x1b, + 0x64, 0xb6, 0xfa, 0x59, 0xbb, 0x3f, 0x88, 0xdd, 0xee, 0xd5, 0x3d, 0x9b, 0x0f, 0x1d, 0x66, 0x4c, + 0x9f, 0x5b, 0x13, 0xd2, 0x82, 0x16, 0x02, 0x47, 0x7f, 0x02, 0x96, 0x1b, 0x8a, 0x85, 0x5b, 0x7a, + 0x48, 0xcd, 0x4c, 0x43, 0x1f, 0xc6, 0xaf, 0xb9, 0xec, 0x72, 0xf2, 0xd7, 0xbd, 0xd4, 0x08, 0x2d, + 0x41, 0x4f, 0x00, 0x9d, 0xc8, 0x8a, 0xa6, 0x6b, 0xfd, 0x8e, 0x6a, 0xf5, 0xfd, 0x7e, 0xf1, 0x5e, + 0xfc, 0x7a, 0x3f, 0x2a, 0xd9, 0x2c, 0x9c, 0x1a, 0x0b, 0x27, 0x01, 0x18, 0xa9, 0xab, 0x2d, 0x37, + 0xd5, 0x67, 0xd8, 0x30, 0x3d, 0x75, 0x4d, 0x8e, 0x5b, 0xd7, 0xce, 0xa6, 0xcd, 0xc2, 0xad, 0xab, + 0x1d, 0x80, 0xa1, 0x53, 0xb8, 0x72, 0x22, 0x77, 0x94, 0xae, 0xed, 0x66, 0xdd, 0xa4, 0x0b, 0xcf, + 0xcb, 0x8d, 0xd1, 0x9d, 0x1f, 0xed, 0x2a, 0xdd, 0x8a, 0xc3, 0xc6, 0xed, 0xce, 0x93, 0x10, 0x78, + 0xf1, 0x63, 0x58, 0x08, 0xeb, 0x7e, 0xf4, 0x2e, 0xa4, 0x68, 0x92, 0x8f, 0x5b, 0x53, 0xac, 0xb4, + 0x20, 0xa3, 0x28, 0xd6, 0x60, 0x29, 0xbc, 0x5f, 0x2f, 0xc2, 0xf4, 0xbb, 0x02, 0x14, 0x82, 0xbd, + 0x86, 0x3e, 0x84, 0xcc, 0xd3, 0x9e, 0x62, 0xaa, 0xb2, 0xda, 0x1c, 0x6b, 0x73, 0x22, 0x4d, 0xa9, + 0xb6, 0x9b, 0x34, 0x38, 0x22, 0x8b, 0x23, 0xab, 0x4f, 0x3c, 0x4d, 0x8c, 0xec, 0x7b, 0x85, 0x22, + 0x13, 0x16, 0x98, 0xff, 0x2a, 0xfe, 0xba, 0x00, 0x85, 0x60, 0x1f, 0x5f, 0x82, 0x64, 0x75, 0x98, + 0x37, 0xb1, 0x66, 0xaa, 0x96, 0xfa, 0x0c, 0xcb, 0x8a, 0x65, 0x19, 0xea, 0x61, 0xcf, 0xb2, 0x9d, + 0x50, 0x2c, 0x5e, 0xc8, 0xa1, 0x2f, 0xd9, 0xe4, 0xc5, 0x5f, 0x9e, 0x86, 0x85, 0x30, 0xfb, 0x40, + 0xad, 0x41, 0x81, 0x1f, 0x5e, 0xcc, 0xe4, 0x56, 0xeb, 0x4a, 0xab, 0x85, 0x9b, 0x54, 0x1a, 0x4f, + 0xbb, 0x6e, 0x42, 0xd6, 0xc0, 0x2d, 0x66, 0xdf, 0x4d, 0x7b, 0x5e, 0x06, 0x06, 0xa2, 0xb3, 0xe0, + 0x19, 0x14, 0x94, 0xde, 0x99, 0xda, 0x56, 0x15, 0xa3, 0xcf, 0x3c, 0xaf, 0xed, 0x31, 0x77, 0x2f, + 0x28, 0x50, 0xc9, 0x66, 0xcb, 0xdc, 0xf3, 0xac, 0xe2, 0xfb, 0x36, 0x8b, 0xff, 0x49, 0x80, 0xac, + 0x47, 0xe8, 0x0b, 0x98, 0xab, 0x3f, 0x5b, 0x99, 0x38, 0x67, 0xb6, 0xf2, 0x26, 0x00, 0xdf, 0xee, + 0xb5, 0x94, 0x96, 0xb3, 0xd1, 0x97, 0x61, 0xb0, 0xba, 0xd2, 0x42, 0x6f, 0x03, 0x41, 0xc6, 0x86, + 0x81, 0x9b, 0xdc, 0x2f, 0x2d, 0x0d, 0xec, 0xb5, 0x54, 0x3a, 0x5d, 0xab, 0xcf, 0xd9, 0x52, 0xcc, + 0x8d, 0x14, 0x24, 0x2d, 0xa5, 0x55, 0xfc, 0xb9, 0x24, 0xcc, 0xf8, 0x35, 0x82, 0xbe, 0x6d, 0xaf, + 0xc0, 0x92, 0xe3, 0x4e, 0x75, 0x7c, 0xfd, 0x75, 0x3a, 0x68, 0x45, 0x9f, 0x5d, 0x6a, 0xa7, 0xad, + 0x7e, 0xcc, 0xec, 0x28, 0x68, 0x55, 0x12, 0x20, 0x03, 0xb7, 0x15, 0x3a, 0x58, 0x8e, 0xe8, 0xc2, + 0x40, 0x6b, 0xf4, 0xc7, 0x19, 0x2c, 0x73, 0x36, 0xf9, 0x7d, 0x9b, 0xba, 0x78, 0x0c, 0x39, 0x6f, + 0x6d, 0x17, 0x31, 0x87, 0x1b, 0xbe, 0x9e, 0x64, 0x36, 0xef, 0xf6, 0xa3, 0xb3, 0x80, 0xfb, 0x5b, + 0x2f, 0x0c, 0x89, 0xbf, 0xec, 0x55, 0x55, 0x03, 0x96, 0xdd, 0x55, 0xd5, 0x45, 0xc3, 0x9d, 0x25, + 0x87, 0x95, 0x3f, 0xb2, 0x90, 0xc1, 0x2d, 0x91, 0x2f, 0x16, 0x01, 0x2d, 0x38, 0x8c, 0x6a, 0x9e, + 0x50, 0xe8, 0x67, 0x85, 0xc1, 0xd8, 0xc5, 0xb7, 0x70, 0x8b, 0x1c, 0xe8, 0x51, 0x0a, 0x0a, 0x84, + 0x32, 0x6c, 0xb1, 0x33, 0x18, 0xca, 0xf0, 0x45, 0xd0, 0x2f, 0x08, 0x61, 0xb1, 0x0c, 0x97, 0x84, + 0x8d, 0xa7, 0xea, 0xb9, 0x25, 0x09, 0x4e, 0x81, 0x8e, 0x2c, 0x03, 0xa1, 0x0d, 0x97, 0xa6, 0xef, + 0x0f, 0x6d, 0xb8, 0x18, 0x6c, 0xf6, 0xdf, 0x3e, 0xb7, 0x18, 0xee, 0x9c, 0xe9, 0x08, 0xe0, 0x89, + 0x74, 0xdc, 0xaa, 0xbd, 0x91, 0x0e, 0xaf, 0x7a, 0xea, 0x82, 0x55, 0xbb, 0x93, 0xa2, 0x5b, 0x75, + 0x3b, 0x00, 0x43, 0x7f, 0x4e, 0x08, 0x89, 0x7c, 0xb8, 0x00, 0xd3, 0x17, 0x34, 0x06, 0xbf, 0x3f, + 0x71, 0x8d, 0xe1, 0x24, 0x04, 0x5e, 0xfc, 0x7d, 0x21, 0x18, 0x09, 0x71, 0x09, 0x3f, 0x80, 0x4c, + 0x47, 0xd5, 0x64, 0x76, 0xda, 0x22, 0xc6, 0x16, 0x32, 0x3b, 0x81, 0x90, 0xee, 0xa8, 0x1a, 0xfd, + 0x45, 0xe9, 0x95, 0x33, 0x4e, 0x9f, 0x88, 0x4f, 0xaf, 0x9c, 0x31, 0xfa, 0x87, 0x30, 0xfb, 0xb4, + 0xa7, 0x68, 0x96, 0xda, 0xc6, 0x32, 0x3f, 0x07, 0x31, 0x19, 0xf7, 0x1c, 0xc4, 0x8c, 0x4d, 0x49, + 0x3f, 0xcd, 0xe2, 0x7f, 0x48, 0x0e, 0xc6, 0x66, 0xbc, 0x99, 0xbf, 0x29, 0xc0, 0x8b, 0x94, 0xbd, + 0xeb, 0x3f, 0xe5, 0x63, 0xd5, 0xb4, 0xf4, 0x96, 0xa1, 0x74, 0xe4, 0xc3, 0x5e, 0xe3, 0x04, 0x5b, + 0xf6, 0x06, 0xa0, 0x7e, 0xc9, 0xa3, 0x62, 0x00, 0xbc, 0x65, 0x57, 0xbc, 0x41, 0xeb, 0x95, 0x5e, + 0xa0, 0x92, 0x39, 0xae, 0x39, 0x50, 0x6c, 0x16, 0x7f, 0x29, 0x01, 0x37, 0x47, 0xf0, 0x40, 0xef, + 0xc3, 0xb5, 0x60, 0xfb, 0xda, 0xfa, 0x29, 0x36, 0xe4, 0x43, 0xbd, 0xa7, 0x35, 0x79, 0xfa, 0x63, + 0xd9, 0x5f, 0xd1, 0x0e, 0x41, 0xd8, 0x20, 0xe5, 0x61, 0xe4, 0xbd, 0x6e, 0xd7, 0x21, 0x4f, 0x84, + 0x91, 0x1f, 0x10, 0x04, 0x46, 0x7e, 0x13, 0xb2, 0x4c, 0x87, 0xb2, 0xa9, 0x7e, 0xc5, 0x26, 0xd8, + 0xa4, 0x04, 0x0c, 0x54, 0x53, 0xbf, 0xc2, 0xa8, 0x0a, 0x79, 0x8e, 0xe0, 0xeb, 0xe4, 0x95, 0x91, + 0x9d, 0xec, 0xd4, 0x26, 0xe5, 0x18, 0x03, 0xde, 0xd7, 0xff, 0x3c, 0xe5, 0x8d, 0x98, 0x79, 0x2f, + 0xff, 0x3d, 0x01, 0x5e, 0xc2, 0x4f, 0x7b, 0xea, 0x33, 0xa5, 0x8d, 0xb5, 0x06, 0x96, 0x1b, 0x6d, + 0xc5, 0x34, 0x87, 0xf6, 0x73, 0xe3, 0xd2, 0xdc, 0x8e, 0x07, 0x10, 0xec, 0xdb, 0x5b, 0x1e, 0x79, + 0xca, 0x44, 0x9c, 0x81, 0xde, 0xfd, 0xbe, 0x00, 0x45, 0x97, 0xbe, 0x12, 0x40, 0x47, 0x1f, 0x41, + 0xc1, 0x09, 0x3a, 0xe4, 0x71, 0x4f, 0x0a, 0xcd, 0xd8, 0x21, 0x04, 0xd3, 0x1a, 0x7a, 0x1b, 0x96, + 0x06, 0xf5, 0x43, 0xbb, 0x8c, 0xf5, 0xf0, 0x42, 0x50, 0x5a, 0xd2, 0x79, 0xc5, 0x7f, 0x93, 0x80, + 0xab, 0x43, 0x5b, 0x88, 0x1e, 0x82, 0x18, 0xce, 0x33, 0xc4, 0x00, 0x5f, 0x08, 0xe3, 0xef, 0x31, + 0xc3, 0xe1, 0xbc, 0x06, 0xad, 0x31, 0x94, 0xd7, 0x38, 0x36, 0xf9, 0xf3, 0x42, 0xb8, 0x51, 0x36, + 0x9f, 0x87, 0x5d, 0x04, 0xfb, 0x35, 0x60, 0xce, 0x3f, 0x37, 0xed, 0x5d, 0x66, 0x71, 0x73, 0xfe, + 0xc7, 0x02, 0xbc, 0xea, 0xae, 0x92, 0xe2, 0xba, 0xaf, 0xc6, 0xa5, 0x4d, 0x69, 0x1e, 0x40, 0xd0, + 0xac, 0xbf, 0xee, 0xc8, 0xf5, 0x28, 0xda, 0x77, 0xfd, 0xa3, 0x04, 0x14, 0x5d, 0x36, 0xff, 0x1f, + 0x5a, 0x37, 0x2a, 0xc1, 0x0d, 0xad, 0xd7, 0x91, 0x9b, 0xaa, 0x69, 0xa9, 0x5a, 0xc3, 0x92, 0x03, + 0x1a, 0x37, 0xb9, 0xe5, 0x14, 0xb5, 0x5e, 0x67, 0x93, 0xe3, 0xd4, 0x7c, 0x8d, 0x37, 0xd1, 0x17, + 0xb0, 0x60, 0xe9, 0xdd, 0x41, 0xca, 0xf1, 0x9d, 0x1c, 0xb2, 0xf4, 0x6e, 0x80, 0x7b, 0xf1, 0x3f, + 0x27, 0xe0, 0xea, 0xd0, 0x9e, 0x40, 0xfb, 0xf0, 0xf2, 0x70, 0x1b, 0x19, 0x1c, 0x81, 0x2f, 0x0e, + 0xe9, 0x38, 0xcf, 0x20, 0x8c, 0xe4, 0x38, 0x38, 0x0e, 0x87, 0x71, 0xfc, 0x7f, 0x3b, 0x14, 0x23, + 0x6c, 0x79, 0xc4, 0x50, 0xfc, 0xdf, 0x93, 0xc1, 0x24, 0x02, 0x1f, 0x8e, 0xbf, 0x26, 0x40, 0x71, + 0x20, 0x98, 0x73, 0x46, 0x21, 0x37, 0xdb, 0x93, 0x4b, 0x8d, 0xe7, 0x02, 0xc0, 0xe0, 0x28, 0xbc, + 0x72, 0x12, 0x5e, 0x5c, 0xfc, 0x9e, 0x00, 0xd7, 0xfc, 0xa4, 0x7c, 0xad, 0xc7, 0x0d, 0xf6, 0x52, + 0x87, 0xdd, 0x1a, 0xcc, 0xbb, 0xdb, 0x27, 0x4e, 0x88, 0xcf, 0xad, 0x03, 0x39, 0x45, 0x8e, 0x2b, + 0x2c, 0x7e, 0x2f, 0x01, 0x37, 0x22, 0x1b, 0x86, 0x5e, 0x82, 0x3c, 0x89, 0x4a, 0x5d, 0x66, 0xcc, + 0x78, 0x73, 0x1d, 0x55, 0x73, 0xd8, 0x50, 0x24, 0xe5, 0x6c, 0xa0, 0xc6, 0x5c, 0x47, 0x39, 0x73, + 0x91, 0x02, 0xa6, 0x97, 0x1a, 0x30, 0xbd, 0xbf, 0x34, 0x60, 0x7a, 0xec, 0xc8, 0xb9, 0xfa, 0x3c, + 0x3b, 0xd2, 0xd7, 0x1b, 0x7e, 0xfb, 0xdb, 0x48, 0xdb, 0xfb, 0x7c, 0xa2, 0x0c, 0x33, 0xfe, 0x21, + 0x85, 0xde, 0xb1, 0xcf, 0x45, 0xc7, 0x8e, 0xd4, 0xf9, 0xc1, 0xe9, 0xf0, 0x3d, 0xce, 0xbf, 0x91, + 0x84, 0x14, 0x0b, 0xc3, 0x5f, 0x86, 0xbc, 0xaa, 0x59, 0xb8, 0x85, 0x0d, 0xcf, 0x52, 0x20, 0xb9, + 0x35, 0x21, 0xe5, 0x38, 0x98, 0xa1, 0xbd, 0x08, 0xd9, 0xa3, 0xb6, 0xae, 0x58, 0x9e, 0x78, 0x5f, + 0xd8, 0x9a, 0x90, 0x80, 0x02, 0x19, 0xca, 0x4b, 0x90, 0x33, 0x2d, 0x43, 0xd5, 0x5a, 0xb2, 0xff, + 0x04, 0x77, 0x96, 0x41, 0x9d, 0xea, 0x0e, 0x75, 0xbd, 0x8d, 0x15, 0x7b, 0xe5, 0x31, 0xc9, 0xcf, + 0xa9, 0xe5, 0x38, 0x98, 0xa1, 0x55, 0x60, 0xd6, 0xb9, 0xd9, 0xc1, 0x11, 0x53, 0xa3, 0x0e, 0xdd, + 0x6e, 0x4d, 0x48, 0x33, 0x0e, 0x11, 0x63, 0xf3, 0x0e, 0x00, 0x81, 0x70, 0x0e, 0x53, 0xfe, 0x54, + 0x92, 0xd5, 0xef, 0x62, 0x4a, 0x5d, 0x3d, 0xda, 0x54, 0xfa, 0x5b, 0x13, 0x52, 0x86, 0xe0, 0x32, + 0xc2, 0x75, 0x80, 0xa6, 0x62, 0xd9, 0x84, 0x6c, 0xc1, 0x36, 0xe7, 0x23, 0xdc, 0x54, 0x2c, 0x4c, + 0x68, 0x08, 0x1a, 0xa3, 0x29, 0xc3, 0x5c, 0x53, 0xe9, 0xcb, 0xfa, 0x91, 0x7c, 0x8a, 0xf1, 0x09, + 0x27, 0x4d, 0xd3, 0x8d, 0xfa, 0xa5, 0x00, 0x69, 0xbf, 0x7a, 0xf4, 0x09, 0xc6, 0x27, 0x44, 0xe2, + 0xa6, 0xfd, 0x41, 0x99, 0x38, 0x29, 0x93, 0x3f, 0x0e, 0x19, 0xe7, 0x18, 0x30, 0x2a, 0xd3, 0x13, + 0xec, 0xfc, 0xf0, 0x71, 0x8c, 0xac, 0xdb, 0x26, 0x3f, 0x7a, 0xbc, 0x35, 0x21, 0xa5, 0x9b, 0xfc, + 0xf7, 0xc6, 0x0c, 0xe4, 0xba, 0x8a, 0x61, 0xe2, 0x26, 0xbb, 0xb6, 0x21, 0x7e, 0x27, 0x01, 0x69, + 0x1b, 0x11, 0xbd, 0x4c, 0x0f, 0xe3, 0xdb, 0xd6, 0x35, 0xd8, 0x52, 0x7a, 0x3a, 0x1f, 0xa3, 0x6f, + 0x40, 0xd6, 0xd3, 0x44, 0x7e, 0x31, 0x65, 0x48, 0xe3, 0x88, 0x6a, 0xf8, 0x4f, 0xb4, 0x02, 0x93, + 0x54, 0xf6, 0x64, 0x54, 0x0f, 0x48, 0x14, 0x07, 0x3d, 0x04, 0xda, 0x0f, 0xf2, 0x57, 0xba, 0x66, + 0x9f, 0xf6, 0x7f, 0x3d, 0x4e, 0x63, 0x29, 0xa3, 0xcf, 0x74, 0x0d, 0x4b, 0x69, 0x8b, 0xff, 0x2a, + 0xbe, 0x09, 0x69, 0x1b, 0x8a, 0x5e, 0x86, 0x19, 0xfd, 0xe8, 0xc8, 0xc4, 0x96, 0xdc, 0x51, 0xb5, + 0x9e, 0xbd, 0x33, 0x9c, 0x92, 0xf2, 0x0c, 0xba, 0xcb, 0x80, 0xe2, 0x9f, 0x4d, 0x40, 0x21, 0x78, + 0xfe, 0x0a, 0x3d, 0x85, 0xab, 0xee, 0xce, 0xaf, 0xe5, 0x3b, 0xf3, 0x63, 0x72, 0x9d, 0xbd, 0x15, + 0x27, 0x0d, 0xea, 0x3f, 0x2e, 0x64, 0x6e, 0x4d, 0x48, 0x57, 0xd4, 0xf0, 0x22, 0xf4, 0x04, 0x96, + 0xf8, 0x41, 0xe8, 0x60, 0x7d, 0x71, 0xb6, 0x21, 0x29, 0xe5, 0x60, 0x6d, 0x8b, 0x46, 0x58, 0xc1, + 0x46, 0x01, 0x66, 0xfc, 0x95, 0x88, 0xdf, 0x49, 0xc3, 0x95, 0x7d, 0x43, 0xed, 0xd0, 0xd9, 0xdd, + 0x8f, 0x8e, 0x3e, 0x81, 0x19, 0x03, 0x77, 0xdb, 0x0a, 0x09, 0xb4, 0xbc, 0x3b, 0x73, 0xab, 0xd1, + 0x12, 0x51, 0x0a, 0x6a, 0xe4, 0xce, 0xd6, 0x4d, 0x9e, 0xf3, 0xe1, 0x5a, 0xae, 0x02, 0x3f, 0xd3, + 0xe9, 0xdf, 0x77, 0xbb, 0x33, 0xfa, 0x58, 0xae, 0xc3, 0x31, 0x67, 0x78, 0xbe, 0x11, 0x86, 0xc5, + 0xc6, 0xb1, 0x42, 0x4f, 0x7f, 0x1a, 0xf4, 0x16, 0x99, 0x7f, 0x63, 0x2d, 0x72, 0x4b, 0xb9, 0x6c, + 0x13, 0xee, 0x2a, 0xe6, 0x89, 0xc3, 0x7f, 0xbe, 0x31, 0x08, 0x46, 0x7d, 0xb8, 0xd1, 0x30, 0xfa, + 0x5d, 0x4b, 0x97, 0x6d, 0xbd, 0x1c, 0x1d, 0x9d, 0xc9, 0x47, 0x5d, 0xec, 0xdf, 0x5b, 0xbb, 0x1b, + 0x59, 0x1d, 0x65, 0xc0, 0xb5, 0x74, 0xff, 0xe8, 0xec, 0x7e, 0xd7, 0x55, 0xd3, 0xd5, 0xc6, 0xb0, + 0x42, 0xd4, 0x83, 0x6b, 0x47, 0xea, 0x19, 0x6e, 0xb2, 0xa5, 0x12, 0x9b, 0x4d, 0x88, 0x07, 0xf6, + 0xed, 0xb1, 0xbd, 0x1d, 0x9d, 0xd1, 0x3d, 0xc3, 0x4d, 0x32, 0x1f, 0x6e, 0xd8, 0xc4, 0x4e, 0xbd, + 0xcb, 0x47, 0x43, 0xca, 0xd0, 0xa7, 0x50, 0x18, 0xa8, 0x6b, 0x6a, 0xf4, 0xf1, 0x90, 0xc1, 0x2a, + 0x66, 0x0f, 0x03, 0x9c, 0xfb, 0x70, 0xc3, 0x56, 0xe2, 0xa9, 0x6a, 0x1d, 0xbb, 0xf7, 0xa5, 0xec, + 0x6a, 0xa6, 0x47, 0xeb, 0x92, 0x2b, 0xea, 0x13, 0xd5, 0x3a, 0xb6, 0x07, 0x9e, 0xab, 0x4b, 0x63, + 0x58, 0x21, 0x7a, 0x04, 0x05, 0xea, 0x78, 0xba, 0x8a, 0xe1, 0x58, 0x60, 0x9a, 0xd6, 0x16, 0x19, + 0xc7, 0x13, 0x07, 0xb3, 0xaf, 0x18, 0xae, 0x0d, 0xd2, 0x49, 0xc8, 0x85, 0xa0, 0x2f, 0x00, 0x71, + 0xf3, 0x38, 0x56, 0xcc, 0x63, 0x9b, 0x73, 0x66, 0xf4, 0x99, 0x17, 0x66, 0x13, 0x5b, 0x8a, 0x79, + 0xec, 0xee, 0xb0, 0x36, 0x02, 0x30, 0x7a, 0x36, 0x99, 0xcc, 0x0d, 0xe6, 0xb1, 0x7a, 0xe4, 0x88, + 0x9d, 0x1d, 0xdd, 0x17, 0xc4, 0x6d, 0xd6, 0x08, 0x8d, 0xdb, 0x17, 0x4d, 0x3f, 0x28, 0xc4, 0x2d, + 0xfc, 0x37, 0x01, 0x66, 0xfc, 0xed, 0x45, 0x9f, 0xc3, 0x2c, 0x55, 0x98, 0xa5, 0xcb, 0xfc, 0xa4, + 0x35, 0x75, 0x07, 0x33, 0xd1, 0x0e, 0xd1, 0xcf, 0xc4, 0xf9, 0x94, 0xf2, 0x84, 0x57, 0x5d, 0xaf, + 0x30, 0x4e, 0xe2, 0xcf, 0x08, 0xcc, 0x81, 0x93, 0x32, 0x74, 0x15, 0x16, 0xeb, 0xdb, 0xbb, 0x15, + 0x79, 0xbf, 0x24, 0xd5, 0x03, 0x87, 0xd1, 0xd2, 0x30, 0xf9, 0xb8, 0x52, 0x92, 0x0a, 0x02, 0xca, + 0x40, 0x6a, 0xb7, 0xba, 0x57, 0xdf, 0x2a, 0x24, 0x50, 0x01, 0x72, 0x9b, 0xa5, 0xc7, 0x72, 0xf5, + 0xbe, 0xcc, 0x20, 0x49, 0x34, 0x0b, 0x59, 0x0e, 0xf9, 0xa4, 0x52, 0xf9, 0xa8, 0x30, 0x49, 0x50, + 0xc8, 0x2f, 0x02, 0xa1, 0xf4, 0x29, 0x82, 0xb2, 0x55, 0x3d, 0x90, 0x08, 0x64, 0xb3, 0xf4, 0xb8, + 0x30, 0x25, 0x7e, 0x0a, 0x85, 0x60, 0x3f, 0xa0, 0x4d, 0x00, 0xde, 0xa3, 0x27, 0xb8, 0xcf, 0xbd, + 0xdf, 0xcb, 0xa3, 0x7b, 0x92, 0x5e, 0x52, 0x69, 0xd8, 0x3f, 0xc5, 0x3a, 0xa0, 0x41, 0xaf, 0x88, + 0x3e, 0x80, 0x8c, 0x86, 0x4f, 0xc7, 0x4e, 0xcb, 0x6a, 0xf8, 0x94, 0xfe, 0x12, 0xaf, 0xc1, 0xd5, + 0xa1, 0xf6, 0x2f, 0xce, 0x40, 0xce, 0xeb, 0x30, 0xc5, 0x3f, 0x48, 0x40, 0x9e, 0x38, 0x3a, 0xb3, + 0xae, 0x6f, 0xb7, 0x34, 0xdd, 0xc0, 0x68, 0x15, 0x90, 0xe3, 0xe2, 0x4c, 0xd2, 0xa9, 0xe6, 0x89, + 0xca, 0xae, 0x91, 0x64, 0xa8, 0xf9, 0x39, 0x65, 0x75, 0xbd, 0x76, 0xa2, 0x76, 0xd1, 0x9f, 0x84, + 0x6b, 0x0d, 0xbd, 0xd3, 0xd1, 0x35, 0xd9, 0x4f, 0xa6, 0x52, 0x76, 0x3c, 0x42, 0x78, 0x7f, 0x94, + 0xa3, 0x75, 0xea, 0x5f, 0x2d, 0x53, 0x66, 0x3e, 0x18, 0xf1, 0x44, 0x0d, 0x07, 0x6c, 0xd7, 0xce, + 0xca, 0xc4, 0xef, 0x0a, 0x30, 0x1f, 0x42, 0x83, 0x6e, 0x83, 0x58, 0xae, 0xee, 0xee, 0x56, 0xf7, + 0xe4, 0xf2, 0x56, 0x49, 0xaa, 0xc9, 0xf5, 0xaa, 0xbc, 0xfd, 0x60, 0xaf, 0x2a, 0x55, 0x02, 0x96, + 0x93, 0x85, 0xe9, 0xbd, 0x83, 0xdd, 0x8a, 0xb4, 0x5d, 0x2e, 0x08, 0x68, 0x01, 0x0a, 0xa5, 0x9d, + 0xfd, 0xad, 0x92, 0x7c, 0xb0, 0xbf, 0x5f, 0x91, 0xe4, 0x72, 0xa9, 0x56, 0x29, 0x24, 0x5c, 0xe8, + 0x4e, 0xf5, 0x13, 0x1b, 0x4a, 0x6d, 0x69, 0xff, 0x60, 0xaf, 0x5c, 0x3f, 0x28, 0xd5, 0xb7, 0xab, + 0x7b, 0x85, 0x49, 0x34, 0x03, 0xf0, 0xc9, 0xd6, 0x76, 0xbd, 0x52, 0xdb, 0x2f, 0x95, 0x2b, 0x85, + 0xd4, 0x46, 0x0e, 0xc0, 0x55, 0x89, 0xf8, 0x1f, 0x89, 0x9c, 0x21, 0x73, 0xc7, 0xab, 0x30, 0x47, + 0x26, 0x26, 0xea, 0x47, 0xed, 0x62, 0x7e, 0x08, 0xa9, 0xc0, 0x0b, 0x1c, 0x32, 0xf4, 0x35, 0x98, + 0xd1, 0x7a, 0x9d, 0x43, 0x6c, 0x10, 0x0d, 0x93, 0x52, 0x7e, 0x99, 0x27, 0xc7, 0xa0, 0x75, 0x9d, + 0x30, 0x26, 0xab, 0x23, 0x03, 0x93, 0x15, 0x2f, 0x96, 0x75, 0xa3, 0x89, 0xd9, 0x05, 0x8f, 0x34, + 0x99, 0x1a, 0x29, 0xb0, 0x4a, 0x60, 0xe8, 0x73, 0x58, 0x08, 0xed, 0xb0, 0xc9, 0xd1, 0x97, 0xb7, + 0x7c, 0x8a, 0x96, 0x50, 0x63, 0xb0, 0x53, 0x7e, 0x24, 0xc0, 0xf2, 0xb0, 0x79, 0x05, 0x6d, 0x40, + 0x36, 0x98, 0x9c, 0x88, 0x65, 0xe2, 0xd0, 0x76, 0x13, 0x15, 0x1b, 0x90, 0x0d, 0xa6, 0x23, 0xe2, + 0xf1, 0xe8, 0x45, 0xa6, 0x26, 0x04, 0xef, 0xfa, 0x50, 0xfc, 0x41, 0x02, 0x66, 0x83, 0xc2, 0xef, + 0xc0, 0xb4, 0x9d, 0x73, 0x63, 0xab, 0xe6, 0xf5, 0x31, 0xe6, 0x3b, 0xfe, 0x2d, 0xd9, 0x2c, 0x8a, + 0xbf, 0x2d, 0xc0, 0x14, 0x5f, 0xf7, 0xbe, 0x05, 0xc9, 0x8e, 0xaa, 0xc5, 0xd7, 0x06, 0xc1, 0xa6, + 0x44, 0xca, 0x59, 0xfc, 0xe6, 0x13, 0x6c, 0xb4, 0x07, 0x73, 0x7c, 0x0e, 0xec, 0x60, 0xcd, 0xf2, + 0xac, 0xd5, 0x62, 0xb1, 0x28, 0x78, 0x68, 0x99, 0xc3, 0xf9, 0xf1, 0x24, 0x5c, 0x1d, 0x1a, 0xbd, + 0x5c, 0x8e, 0xab, 0x44, 0xef, 0xc3, 0x74, 0x43, 0xd7, 0x9c, 0x5b, 0x3d, 0x71, 0xaf, 0xf3, 0x71, + 0x1a, 0x74, 0x06, 0xb3, 0xdc, 0x49, 0x29, 0xed, 0xee, 0xb1, 0x72, 0x88, 0xd9, 0x36, 0xe8, 0x4c, + 0xf4, 0x1e, 0xdc, 0xd0, 0x46, 0xad, 0xde, 0x3f, 0x3a, 0x63, 0x3e, 0x67, 0x8f, 0x6e, 0xad, 0x97, + 0x38, 0x53, 0x32, 0xf7, 0xb3, 0x7a, 0x6c, 0x08, 0x7a, 0x05, 0xf8, 0xa5, 0x74, 0xb7, 0xe6, 0x14, + 0xf7, 0xa5, 0x33, 0xac, 0xc0, 0x41, 0x5d, 0x82, 0x94, 0xa1, 0x34, 0xd5, 0x33, 0x1a, 0x48, 0xa5, + 0xb6, 0x26, 0x24, 0xf6, 0x49, 0x0f, 0xcc, 0xf4, 0x0c, 0x43, 0x6f, 0x29, 0x96, 0xe7, 0x32, 0x3d, + 0x8f, 0x4c, 0xe2, 0x9d, 0x78, 0x9e, 0x73, 0x18, 0xd8, 0x20, 0xf1, 0x3b, 0x02, 0x5c, 0x19, 0xd2, + 0x0c, 0xb4, 0x02, 0xb7, 0xef, 0xdf, 0xff, 0x54, 0xe6, 0xfe, 0x73, 0xaf, 0x54, 0xdf, 0x7e, 0x54, + 0x91, 0xa9, 0x0b, 0xdc, 0xa8, 0xd4, 0xa3, 0xfc, 0x27, 0x99, 0x3c, 0x2b, 0x9f, 0x96, 0x36, 0x2b, + 0xe5, 0xed, 0xdd, 0xd2, 0x4e, 0x21, 0x81, 0xae, 0xc3, 0xb2, 0xeb, 0x4a, 0x19, 0x0b, 0xd9, 0x46, + 0x4f, 0xa2, 0x39, 0xc8, 0xfb, 0x41, 0x93, 0x1b, 0x00, 0x69, 0x5b, 0x51, 0xe2, 0xcf, 0x27, 0x20, + 0xe3, 0x58, 0x03, 0xda, 0x83, 0x0c, 0x0d, 0x46, 0x54, 0xfb, 0x94, 0xe6, 0x88, 0x05, 0x47, 0xdd, + 0x46, 0x76, 0x58, 0xd0, 0x15, 0xbe, 0x0d, 0x25, 0xfc, 0x7a, 0xda, 0xa9, 0xa1, 0x74, 0xbb, 0xd8, + 0x76, 0x20, 0x91, 0xfc, 0x0e, 0x6c, 0x64, 0x1f, 0x3f, 0x87, 0x05, 0x92, 0x20, 0x7b, 0xd2, 0x31, + 0x65, 0x9b, 0x63, 0x8c, 0x15, 0xc6, 0x47, 0x1d, 0xf3, 0x93, 0x41, 0x96, 0x70, 0xe2, 0x80, 0x37, + 0xd2, 0x30, 0xc5, 0xce, 0x29, 0x88, 0x77, 0x00, 0x0d, 0x36, 0x28, 0xec, 0x80, 0xb0, 0x78, 0x1b, + 0xd0, 0xa0, 0xa8, 0xa8, 0x00, 0x49, 0x7b, 0xfc, 0xe5, 0x24, 0xf2, 0x53, 0xfc, 0x12, 0xe6, 0x43, + 0x04, 0x20, 0x5e, 0x91, 0x13, 0xcb, 0x2e, 0x01, 0x70, 0x10, 0x41, 0xb8, 0x0d, 0xb3, 0xee, 0x80, + 0xf6, 0x9e, 0x41, 0xce, 0x3b, 0xc3, 0x95, 0xde, 0x01, 0xf9, 0x89, 0x00, 0xb3, 0x81, 0x18, 0x13, + 0xdd, 0x81, 0x82, 0xc7, 0x6d, 0xcb, 0x4d, 0xa5, 0x6f, 0x2f, 0xc2, 0x67, 0x5c, 0xc7, 0xbc, 0xa9, + 0xf4, 0x4d, 0x82, 0xe9, 0x99, 0x24, 0x18, 0x26, 0x9b, 0xeb, 0x66, 0xdc, 0x69, 0x80, 0x62, 0x7a, + 0x5c, 0x43, 0xf2, 0x1c, 0xae, 0xe1, 0xbe, 0xcf, 0x3f, 0x4d, 0x8e, 0xe1, 0x9f, 0xe8, 0x91, 0x25, + 0xfb, 0x83, 0x74, 0x55, 0x07, 0x5b, 0xc7, 0x7a, 0x53, 0xfc, 0xd7, 0x09, 0xb8, 0x32, 0x64, 0xbd, + 0x8f, 0x2c, 0x98, 0x1d, 0xcc, 0x1e, 0x8c, 0x3c, 0x93, 0x36, 0x84, 0xdb, 0x10, 0xb8, 0x14, 0xac, + 0xa2, 0xf8, 0x2f, 0x04, 0x58, 0x0a, 0xc7, 0xbd, 0x9c, 0x17, 0x4b, 0x34, 0x58, 0xee, 0xda, 0xb9, + 0x82, 0x40, 0xb6, 0x82, 0x8f, 0xac, 0xb7, 0x46, 0x9c, 0xde, 0x09, 0xcb, 0x33, 0x48, 0x57, 0xba, + 0xe1, 0x05, 0xe2, 0x77, 0x92, 0x30, 0x4f, 0x3b, 0x32, 0xd0, 0x98, 0xf7, 0x60, 0x8a, 0x1e, 0x4b, + 0x1a, 0xeb, 0x64, 0x22, 0x27, 0x41, 0xdb, 0x90, 0x69, 0xe8, 0x5a, 0x53, 0xa5, 0x52, 0x27, 0x47, + 0xaf, 0x9f, 0x58, 0x8a, 0xa5, 0x6c, 0x93, 0x48, 0x2e, 0x35, 0xea, 0x46, 0xe8, 0x63, 0xf2, 0xdc, + 0xfa, 0xd8, 0x9a, 0x18, 0xaa, 0x91, 0xe8, 0xfc, 0x54, 0xea, 0x79, 0xe4, 0xa7, 0x42, 0x16, 0x87, + 0xbf, 0x27, 0xc0, 0x62, 0x68, 0xe2, 0x09, 0x35, 0x61, 0x91, 0x5d, 0xc7, 0x0f, 0x37, 0xfe, 0xb5, + 0x91, 0xfd, 0x14, 0xb0, 0x8c, 0x85, 0xa3, 0x41, 0xa0, 0x89, 0xbe, 0x84, 0x79, 0x9e, 0x31, 0x33, + 0x7b, 0xdd, 0xae, 0x81, 0x4d, 0x93, 0xa7, 0xcb, 0x92, 0xa3, 0x52, 0x88, 0x4c, 0xea, 0x9a, 0x4b, + 0x25, 0x21, 0x23, 0x08, 0x32, 0xc5, 0x2f, 0x61, 0x6e, 0x00, 0xd1, 0x6f, 0x36, 0xc2, 0x45, 0xcc, + 0x46, 0xfc, 0x51, 0x0a, 0x66, 0x03, 0xc5, 0xe8, 0x31, 0x64, 0xf1, 0x99, 0xdb, 0x96, 0x18, 0x4f, + 0xf8, 0x04, 0x38, 0xac, 0x56, 0x5c, 0x72, 0xc9, 0xcb, 0xab, 0xf8, 0xcf, 0x04, 0xc8, 0xb8, 0x15, + 0x5d, 0xe0, 0x10, 0xe0, 0x43, 0x48, 0xeb, 0x5d, 0x6c, 0x28, 0x16, 0xbf, 0x4b, 0x3e, 0x33, 0x2a, + 0x13, 0xd8, 0xa6, 0x1d, 0xa6, 0xb4, 0xab, 0x9c, 0x4a, 0x72, 0xe8, 0xdd, 0x6d, 0x8e, 0xc9, 0xf1, + 0xb6, 0x39, 0x8a, 0x0d, 0x00, 0xa7, 0x31, 0x26, 0x3a, 0x00, 0x70, 0xf4, 0x6a, 0x5b, 0xd9, 0xdd, + 0x71, 0xb4, 0xe6, 0x76, 0x90, 0x87, 0x51, 0xf1, 0x57, 0x13, 0x90, 0xf5, 0xe8, 0x13, 0x19, 0x64, + 0x96, 0x6a, 0xd1, 0x33, 0x76, 0x8e, 0x06, 0x58, 0xf2, 0xe3, 0xc1, 0x39, 0xbb, 0x68, 0x75, 0x87, + 0xf1, 0x73, 0x54, 0x33, 0xdb, 0xf6, 0x03, 0xd0, 0xa7, 0xbe, 0xa6, 0x31, 0x83, 0xf8, 0xc6, 0xb9, + 0x9a, 0x46, 0x86, 0xb7, 0x87, 0x97, 0xf8, 0x2d, 0x98, 0x0d, 0xd4, 0x8e, 0x6e, 0xc1, 0xf5, 0x9d, + 0xea, 0x83, 0xed, 0x72, 0x69, 0x47, 0xae, 0xee, 0x57, 0xa4, 0x52, 0xbd, 0x2a, 0x05, 0xe2, 0xbf, + 0x69, 0x48, 0x96, 0xf6, 0x36, 0x0b, 0x82, 0xb3, 0x71, 0xf1, 0xf7, 0x05, 0x58, 0x0a, 0xbf, 0x15, + 0x4b, 0xd6, 0xba, 0x8e, 0x03, 0x08, 0xdc, 0xb7, 0x2a, 0x78, 0x0a, 0xd8, 0x65, 0xab, 0x36, 0x2c, + 0xfb, 0xbd, 0x85, 0x6c, 0xf6, 0x3a, 0x1d, 0xc5, 0x50, 0x9d, 0x63, 0xd3, 0x6f, 0xc6, 0xbf, 0x98, + 0x5b, 0xa3, 0xa4, 0x7d, 0xe9, 0x8a, 0x15, 0x02, 0x56, 0xb1, 0x29, 0xfe, 0xe6, 0x14, 0x2c, 0x86, + 0x92, 0x5c, 0xc6, 0x35, 0x43, 0x67, 0x6c, 0x25, 0xc6, 0x1e, 0x5b, 0x9f, 0x07, 0xbd, 0x2c, 0xef, + 0xf1, 0x73, 0x4d, 0xa8, 0x01, 0x56, 0xc3, 0xdd, 0x72, 0xea, 0x32, 0xdd, 0xf2, 0x23, 0x98, 0x0d, + 0xb8, 0x65, 0x9e, 0x2a, 0x1e, 0xd3, 0x25, 0xcf, 0xf8, 0x5d, 0x32, 0x7a, 0x0c, 0xd3, 0x6c, 0xb7, + 0xd4, 0x3e, 0x32, 0xf0, 0xed, 0xb1, 0xed, 0x61, 0xd5, 0xb6, 0x0b, 0x76, 0x35, 0xd2, 0xe6, 0x17, + 0x6e, 0xa8, 0xd3, 0xe1, 0x86, 0x5a, 0xfc, 0xbe, 0x00, 0x79, 0x1f, 0x1f, 0x77, 0xcb, 0x55, 0xf0, + 0x6c, 0xb9, 0xa2, 0x2f, 0x61, 0xd2, 0xb9, 0x19, 0x30, 0x13, 0x1d, 0xf0, 0x85, 0x0b, 0x1b, 0xd0, + 0x36, 0xad, 0xab, 0xac, 0x37, 0xb1, 0x44, 0xf9, 0xa2, 0x65, 0x98, 0x6e, 0xb2, 0x7d, 0x68, 0x7e, + 0xb5, 0xcc, 0xfe, 0x14, 0xbf, 0x84, 0xe5, 0x61, 0xb4, 0x64, 0xad, 0x57, 0x97, 0x4a, 0x7b, 0xb5, + 0xfb, 0x55, 0x69, 0x97, 0x66, 0xb2, 0x64, 0xa9, 0x52, 0x3b, 0xd8, 0xa9, 0xcb, 0xe5, 0xea, 0x66, + 0x48, 0xae, 0xac, 0x76, 0x50, 0x2e, 0x57, 0x6a, 0x35, 0x96, 0x68, 0xad, 0x48, 0x52, 0x55, 0x2a, + 0x24, 0xc4, 0xa7, 0x90, 0xae, 0x35, 0x8e, 0x71, 0xb3, 0xd7, 0xc6, 0xe8, 0x73, 0xb8, 0x66, 0xe0, + 0x46, 0xa3, 0x67, 0x18, 0xf4, 0x24, 0x4e, 0x17, 0x1b, 0xaa, 0xde, 0x94, 0xed, 0x47, 0x00, 0xf9, + 0x00, 0xba, 0x3a, 0xb0, 0x89, 0xbb, 0xc9, 0x11, 0x58, 0x86, 0xde, 0xa1, 0xdf, 0xa7, 0xe4, 0x76, + 0x21, 0x09, 0xb2, 0xd9, 0x95, 0x5b, 0xf1, 0x1f, 0x24, 0x60, 0x36, 0x78, 0xa7, 0xf4, 0x9c, 0xd7, + 0x25, 0x6f, 0x41, 0xb6, 0xe9, 0x5e, 0x1b, 0xe4, 0xba, 0xf3, 0x82, 0x82, 0xaf, 0x3f, 0x4d, 0x8e, + 0xf5, 0xfa, 0xd3, 0x7b, 0x90, 0xed, 0x75, 0xdd, 0xdd, 0xdb, 0xd4, 0x68, 0x62, 0x86, 0x4e, 0x89, + 0x07, 0xdf, 0x02, 0x98, 0xba, 0xd8, 0x5b, 0x00, 0xe2, 0x6f, 0x27, 0x00, 0x6d, 0x0e, 0xbc, 0xaa, + 0xf0, 0x47, 0x51, 0x77, 0xa1, 0x8f, 0xb6, 0x4c, 0x5d, 0xc6, 0xa3, 0x2d, 0xe2, 0x1f, 0x4c, 0x01, + 0x3c, 0xd4, 0x0f, 0xeb, 0x86, 0xda, 0x6a, 0x61, 0xe3, 0xf9, 0x29, 0xaf, 0x0a, 0x59, 0xbb, 0xfb, + 0x9f, 0xe8, 0x87, 0x5c, 0x79, 0x63, 0x5d, 0x93, 0x26, 0x53, 0xbc, 0xea, 0xc0, 0x48, 0xa8, 0x66, + 0x31, 0xa1, 0x6d, 0x27, 0x1f, 0x19, 0xaa, 0xb9, 0x6d, 0x5c, 0xe5, 0x7f, 0x25, 0x87, 0x1e, 0x6d, + 0xc2, 0x14, 0x36, 0x0c, 0xdd, 0xb0, 0x8f, 0xcd, 0xbc, 0x16, 0x93, 0x53, 0x85, 0x10, 0x49, 0x9c, + 0x36, 0x68, 0x1f, 0xd3, 0x17, 0xb1, 0x8f, 0xf4, 0x58, 0xf6, 0xf1, 0x01, 0xe4, 0xdb, 0x8a, 0x69, + 0xc9, 0x46, 0x4f, 0x63, 0xe4, 0x99, 0x91, 0xe4, 0x59, 0x42, 0x20, 0xf5, 0x34, 0x4a, 0x5f, 0x81, + 0x29, 0xf6, 0xc0, 0xe8, 0x32, 0x50, 0x8f, 0xfe, 0x7a, 0xcc, 0xf6, 0xd7, 0x28, 0x91, 0xc4, 0x89, + 0x8b, 0x9f, 0xc2, 0xb4, 0x6d, 0x47, 0x1b, 0x90, 0x36, 0xb9, 0x1f, 0x8d, 0x13, 0x69, 0xd8, 0x3e, + 0x77, 0x6b, 0x42, 0x72, 0xe8, 0x36, 0x32, 0x30, 0xcd, 0x7b, 0xa8, 0xf8, 0x14, 0x52, 0x54, 0xd7, + 0xe8, 0x35, 0x77, 0x66, 0x60, 0x6c, 0x91, 0xcd, 0xd6, 0xe8, 0x36, 0x6c, 0x79, 0x6c, 0x14, 0x74, + 0x8f, 0x9d, 0x99, 0xa1, 0x2d, 0xb6, 0x57, 0x4f, 0x91, 0x3a, 0x75, 0xb1, 0xc5, 0x2d, 0x98, 0x62, + 0xec, 0xd0, 0x12, 0xa0, 0x5a, 0xbd, 0x54, 0x3f, 0xa8, 0x0d, 0xce, 0x21, 0x5b, 0x95, 0xd2, 0x4e, + 0x7d, 0xeb, 0x71, 0x41, 0x40, 0x00, 0x53, 0xfb, 0xa5, 0x83, 0x5a, 0x65, 0xb3, 0x90, 0x40, 0x79, + 0xc8, 0x94, 0x4b, 0x7b, 0xe5, 0xca, 0xce, 0x4e, 0x65, 0xb3, 0x90, 0xdc, 0x48, 0x41, 0xf2, 0x89, + 0x7e, 0x28, 0xfe, 0x4e, 0x02, 0xa6, 0xd8, 0x9d, 0x61, 0xf4, 0x08, 0xf2, 0xa6, 0xf2, 0x0c, 0xcb, + 0x9e, 0x77, 0x0e, 0x47, 0xa6, 0xd8, 0x18, 0xe9, 0x6a, 0x4d, 0x79, 0x86, 0xed, 0xb7, 0x29, 0xb7, + 0x26, 0xa4, 0x9c, 0xe9, 0xf9, 0x46, 0xbb, 0x30, 0xdd, 0xed, 0x1d, 0xca, 0x66, 0xef, 0x30, 0xce, + 0x83, 0x2f, 0x9c, 0xe3, 0x7e, 0xef, 0xb0, 0xad, 0x9a, 0xc7, 0x75, 0x7d, 0xbf, 0x77, 0x58, 0xeb, + 0x1d, 0x6e, 0x4d, 0x48, 0x53, 0x5d, 0xfa, 0xab, 0xd8, 0x84, 0x9c, 0xb7, 0x3a, 0x54, 0x77, 0x9e, + 0xee, 0xf0, 0x1d, 0x96, 0x58, 0x1b, 0xf3, 0xe9, 0x0e, 0xfb, 0xb5, 0x0e, 0x7e, 0xbd, 0xf5, 0xeb, + 0x30, 0x1b, 0x10, 0x81, 0x44, 0x1d, 0x96, 0xde, 0xe5, 0x17, 0xa6, 0x32, 0x12, 0xfb, 0x20, 0x53, + 0x26, 0xbb, 0x6b, 0x2d, 0xfe, 0x6d, 0x01, 0xae, 0x97, 0xe9, 0xd8, 0x09, 0xbe, 0xd2, 0x30, 0xe2, + 0x12, 0xfc, 0x23, 0x28, 0x0c, 0xbc, 0x05, 0x91, 0x18, 0xff, 0x2d, 0x88, 0xd9, 0xc0, 0x4b, 0x31, + 0xe8, 0x26, 0x64, 0x9d, 0x17, 0x65, 0x9c, 0xfb, 0xf0, 0x60, 0x83, 0xb6, 0x9b, 0xe2, 0x3f, 0x11, + 0xe0, 0xfa, 0x01, 0x1d, 0xb0, 0x43, 0x24, 0x0e, 0x73, 0xbc, 0xcf, 0x4b, 0x5a, 0xd7, 0xd7, 0xd0, + 0x8d, 0xb7, 0xe4, 0x10, 0x67, 0x41, 0xe3, 0xe2, 0x5d, 0xc5, 0x3c, 0xb1, 0x7d, 0x0d, 0xf9, 0x2d, + 0xae, 0xc1, 0xd5, 0x07, 0xd8, 0x8a, 0xdf, 0x0a, 0xf1, 0x29, 0x5c, 0x63, 0x4f, 0x23, 0xf8, 0x28, + 0xcc, 0x51, 0x5d, 0x75, 0x03, 0xa0, 0x4b, 0xdf, 0xc7, 0xd3, 0x4f, 0xf8, 0x8b, 0x6b, 0x19, 0x29, + 0x43, 0x20, 0x75, 0x02, 0x40, 0xd7, 0x80, 0x7e, 0xb8, 0x1b, 0x5e, 0x29, 0x29, 0x4d, 0x00, 0x74, + 0xbb, 0xeb, 0x57, 0x04, 0xb8, 0x1e, 0x5e, 0x27, 0x7f, 0x95, 0xe1, 0x53, 0x98, 0x0b, 0x6a, 0xd6, + 0x5e, 0x5b, 0x8f, 0xa5, 0xda, 0x42, 0x40, 0xb5, 0x26, 0xba, 0x0d, 0xb3, 0x1a, 0x3e, 0xb3, 0xe4, + 0x01, 0xd9, 0xf3, 0x04, 0xbc, 0x6f, 0xcb, 0x2f, 0xae, 0xc3, 0xf5, 0x4d, 0xdc, 0xc6, 0xe3, 0xd8, + 0x83, 0xf8, 0xd7, 0x04, 0xb8, 0xc2, 0xcc, 0xde, 0xf5, 0xc1, 0xa3, 0xd4, 0xf8, 0x00, 0xb2, 0x4f, + 0xf4, 0x43, 0x99, 0x3b, 0x52, 0x6e, 0x3e, 0xb7, 0xe3, 0xf9, 0x77, 0x09, 0x9e, 0xb8, 0x91, 0xc1, + 0x0d, 0x00, 0xce, 0xc4, 0xb5, 0xf0, 0x0c, 0x87, 0x6c, 0x37, 0xc9, 0x5a, 0xf9, 0x0a, 0x33, 0xf0, + 0x41, 0xd9, 0xc2, 0x6c, 0xfb, 0xd2, 0xe4, 0xba, 0x90, 0x31, 0xaf, 0xc0, 0xc2, 0x03, 0x6c, 0xc5, + 0x92, 0x58, 0xfc, 0xf3, 0x02, 0x2c, 0x11, 0xa3, 0x72, 0xb1, 0x9f, 0xa7, 0x0d, 0xa3, 0xab, 0x90, + 0xa6, 0x5b, 0xde, 0xf2, 0x61, 0x9f, 0xbf, 0x4f, 0x35, 0x4d, 0xbf, 0x37, 0xfa, 0xe2, 0x5f, 0x14, + 0xe0, 0xca, 0x80, 0x24, 0xdc, 0xb2, 0xb7, 0x21, 0xe7, 0xd1, 0xab, 0x6d, 0xd4, 0x71, 0x15, 0x9b, + 0x75, 0x15, 0x1b, 0xdf, 0x94, 0x5f, 0x87, 0x2b, 0xcc, 0x94, 0xe3, 0xe9, 0xf1, 0xff, 0x24, 0xa0, + 0x10, 0x8c, 0xed, 0xc8, 0xea, 0x80, 0x3f, 0xbe, 0xee, 0x9f, 0x5b, 0x22, 0x57, 0x07, 0xfe, 0x59, + 0x25, 0x6f, 0xfa, 0x9e, 0xaa, 0x1a, 0x98, 0xac, 0x12, 0x97, 0x30, 0x59, 0x3d, 0x87, 0xb7, 0xea, + 0xce, 0xf1, 0x28, 0x99, 0xf7, 0xa1, 0x92, 0xd4, 0xf8, 0x0f, 0x95, 0xfc, 0x5e, 0x0a, 0xa6, 0x36, + 0xdb, 0x5d, 0x12, 0x44, 0x87, 0x8d, 0xcc, 0x7b, 0xfc, 0x81, 0xeb, 0x18, 0x4f, 0xb5, 0x33, 0x2e, + 0x34, 0x45, 0xc4, 0x1e, 0xc2, 0x2e, 0xd1, 0x37, 0x5e, 0x2d, 0xcc, 0x93, 0xa7, 0xaf, 0x8e, 0x26, + 0x26, 0xd6, 0x47, 0xe2, 0x2c, 0x2c, 0x31, 0x4a, 0xf4, 0xc7, 0x20, 0x67, 0xa8, 0xe6, 0x89, 0x6c, + 0x87, 0x79, 0x6c, 0xa5, 0xf0, 0xcd, 0xf3, 0x1e, 0x64, 0xdf, 0x9a, 0x90, 0xb2, 0x86, 0xe7, 0x7a, + 0xb6, 0x0c, 0xf6, 0x74, 0xe8, 0xd4, 0x10, 0xe3, 0x64, 0xe1, 0xb0, 0x97, 0xa8, 0xb6, 0x26, 0x24, + 0xdb, 0x1e, 0xec, 0x0a, 0x2e, 0xf4, 0xbe, 0xf2, 0xbb, 0x00, 0xf4, 0x59, 0xdc, 0xb8, 0x2b, 0x88, + 0x0c, 0xc5, 0xa6, 0xa4, 0x77, 0x21, 0x8d, 0xb5, 0x66, 0xdc, 0xd5, 0xc3, 0x34, 0xd6, 0x9a, 0x94, + 0xec, 0x1d, 0xc8, 0xd3, 0xe5, 0x8b, 0x6c, 0x27, 0xa0, 0x32, 0xd4, 0xa0, 0xc2, 0xc2, 0xea, 0x1c, + 0x45, 0x94, 0x78, 0x62, 0xe9, 0x0e, 0x14, 0x3c, 0x8e, 0x86, 0x99, 0x2c, 0x50, 0x33, 0x9a, 0x71, + 0x9d, 0x08, 0xdd, 0x3e, 0x6d, 0x40, 0xda, 0xee, 0x64, 0x74, 0x15, 0x16, 0x1f, 0x56, 0x37, 0x64, + 0x12, 0x4f, 0x87, 0xa4, 0x64, 0xf6, 0x2b, 0x7b, 0x9b, 0xdb, 0x7b, 0x0f, 0x0a, 0x02, 0xf9, 0x90, + 0x0e, 0xf6, 0xf6, 0xc8, 0x47, 0x02, 0xa5, 0x61, 0x72, 0xb3, 0xba, 0x57, 0x29, 0x24, 0x51, 0x0e, + 0xd2, 0x2c, 0xb2, 0xae, 0x6c, 0x16, 0x26, 0x49, 0xcc, 0x7d, 0xbf, 0xb4, 0x4d, 0x7e, 0xa7, 0xc8, + 0x62, 0xc1, 0xce, 0x11, 0xdd, 0x86, 0xc2, 0x03, 0x6c, 0x31, 0xf3, 0x8a, 0x72, 0x44, 0x3f, 0x14, + 0x00, 0x11, 0x37, 0xca, 0x30, 0x43, 0x9c, 0xf9, 0xa4, 0xcf, 0x99, 0xbb, 0x4f, 0x3f, 0x09, 0xde, + 0xa7, 0x9f, 0xfc, 0x5e, 0x3c, 0x11, 0xf0, 0xe2, 0xfe, 0x19, 0x20, 0x19, 0x9c, 0x01, 0xec, 0xb1, + 0x96, 0x1a, 0x7f, 0xac, 0x89, 0x3d, 0x98, 0xf7, 0x49, 0xcf, 0x27, 0x80, 0x6f, 0xc0, 0xe4, 0x13, + 0xfd, 0xd0, 0x76, 0xfc, 0xe2, 0x68, 0x96, 0x12, 0xc5, 0x8f, 0xed, 0xed, 0x5f, 0x81, 0xf9, 0xb2, + 0xa2, 0x35, 0x70, 0x7b, 0xb4, 0x82, 0x5f, 0x81, 0x79, 0x36, 0x31, 0x8c, 0x46, 0xfd, 0x2d, 0x01, + 0x6e, 0xb2, 0xd0, 0x66, 0x30, 0xa3, 0x33, 0x6a, 0x96, 0x95, 0x61, 0x3e, 0xe4, 0xd1, 0xd6, 0x38, + 0x07, 0x21, 0x42, 0xea, 0x42, 0x83, 0xef, 0xbb, 0x8e, 0x8e, 0xee, 0x7f, 0x57, 0x80, 0x9b, 0x2c, + 0xf8, 0x19, 0x2e, 0x7d, 0x98, 0xab, 0x7d, 0xee, 0x92, 0x5f, 0x28, 0x38, 0x5a, 0x87, 0xeb, 0x64, + 0x1c, 0x8d, 0xd3, 0x22, 0xd1, 0x82, 0x17, 0xa8, 0x51, 0x0e, 0x10, 0x3d, 0xd7, 0x78, 0xff, 0x37, + 0x04, 0xb8, 0x39, 0xb4, 0x5a, 0x3e, 0x2e, 0x14, 0x58, 0x08, 0xd1, 0xb5, 0x3d, 0x4e, 0xc6, 0x55, + 0xf6, 0xfc, 0xa0, 0xb2, 0xe3, 0x0f, 0xa1, 0xbb, 0x70, 0x93, 0x8f, 0x8b, 0x71, 0x74, 0xbb, 0xb2, + 0xef, 0x3e, 0xad, 0xe7, 0x79, 0xdc, 0x8e, 0xb8, 0xd4, 0xca, 0xde, 0xc1, 0xae, 0x5c, 0x7f, 0xbc, + 0x1f, 0xe2, 0x52, 0xb7, 0x29, 0xa0, 0x5e, 0x10, 0xd0, 0x1c, 0xe4, 0xa5, 0xed, 0xda, 0x47, 0x72, + 0x69, 0xaf, 0xb4, 0xf3, 0xb8, 0xb6, 0x5d, 0x2b, 0x24, 0x56, 0x7e, 0x4b, 0x00, 0x34, 0xb8, 0x87, + 0x89, 0x5e, 0x82, 0x9b, 0x52, 0x65, 0x87, 0xa6, 0xd0, 0x87, 0xef, 0x96, 0xe5, 0x20, 0x5d, 0xf9, + 0xf8, 0xa0, 0xb4, 0x23, 0xd7, 0xab, 0x05, 0x01, 0x15, 0x20, 0xb7, 0x57, 0xad, 0xcb, 0x0e, 0x84, + 0x1e, 0x59, 0x7e, 0x20, 0x55, 0x4a, 0xf5, 0x8a, 0x24, 0xd7, 0xb7, 0x4a, 0x7b, 0x85, 0x24, 0xca, + 0x43, 0x66, 0xa7, 0x52, 0xab, 0xb1, 0xcf, 0x49, 0x54, 0x84, 0x25, 0x2f, 0x82, 0x5c, 0x95, 0x18, + 0x79, 0xad, 0x90, 0x42, 0x57, 0x60, 0xde, 0x41, 0xf5, 0x14, 0x4c, 0x11, 0x97, 0x5f, 0xf9, 0x74, + 0xbb, 0x56, 0xaf, 0x15, 0xa6, 0x57, 0x24, 0x00, 0xd7, 0x29, 0xa2, 0xeb, 0xb0, 0xbc, 0xb9, 0xb3, + 0x2f, 0x93, 0xd9, 0x25, 0x44, 0x13, 0xb3, 0x90, 0xe5, 0x9a, 0x20, 0x18, 0x05, 0x01, 0x2d, 0xc2, + 0x9c, 0x4f, 0x1b, 0x14, 0x9c, 0x58, 0xff, 0x97, 0x5f, 0xa7, 0x4c, 0x6b, 0xd8, 0x78, 0xa6, 0x36, + 0x30, 0xfa, 0x3b, 0x02, 0xcc, 0xf8, 0x9f, 0x7f, 0x45, 0x6f, 0xc6, 0x8b, 0xf4, 0x3c, 0xcf, 0xe3, + 0x16, 0xd7, 0xc7, 0x21, 0x61, 0xe6, 0x2a, 0xbe, 0xf3, 0x67, 0x7e, 0xf7, 0xdf, 0xfd, 0x72, 0xe2, + 0x4d, 0xf1, 0x35, 0xe7, 0x9f, 0x13, 0xfd, 0x14, 0x1b, 0x28, 0xef, 0x77, 0x0d, 0xfd, 0x09, 0x6e, + 0x58, 0xe6, 0xda, 0xca, 0x4f, 0xaf, 0x35, 0x18, 0xd5, 0x3d, 0x1e, 0x83, 0xdc, 0x13, 0x56, 0xd0, + 0x0f, 0x04, 0xc8, 0x7a, 0xde, 0x1e, 0x47, 0xab, 0xe3, 0xbd, 0x04, 0x5f, 0x5c, 0x8b, 0x8d, 0xcf, + 0x25, 0x7d, 0x9b, 0x4a, 0xba, 0x2a, 0xbe, 0x12, 0x29, 0x29, 0x7d, 0xee, 0xfc, 0x1e, 0xbb, 0xd2, + 0x42, 0xc4, 0xfc, 0xa1, 0x00, 0x73, 0x03, 0xef, 0x65, 0xa3, 0xb7, 0x63, 0x67, 0xb3, 0xbd, 0xfa, + 0xbd, 0x3b, 0x26, 0x15, 0x17, 0xfc, 0x1e, 0x15, 0xfc, 0x6d, 0x71, 0x2d, 0x96, 0x8a, 0xdd, 0x01, + 0x6f, 0x8b, 0x2f, 0x8d, 0x27, 0xbe, 0x74, 0x2e, 0xf1, 0xa5, 0x4b, 0x12, 0xdf, 0xf0, 0x89, 0xff, + 0x37, 0x05, 0x98, 0x1b, 0x88, 0x69, 0xd1, 0x78, 0x21, 0xb0, 0x2d, 0x7e, 0x8c, 0x10, 0x23, 0xa6, + 0xac, 0x4d, 0x87, 0xb7, 0xd7, 0xa0, 0xff, 0xae, 0x00, 0x8b, 0xa1, 0x11, 0x3e, 0x1a, 0x7f, 0x51, + 0xf0, 0x5c, 0x65, 0x56, 0x58, 0x8d, 0x44, 0xe6, 0x5f, 0x14, 0x20, 0xef, 0x7b, 0x0f, 0x14, 0xbd, + 0x11, 0xfd, 0xcf, 0x5e, 0x06, 0xdf, 0x23, 0x2d, 0xbe, 0x39, 0x06, 0x05, 0x37, 0x89, 0x22, 0x15, + 0x79, 0x01, 0x21, 0x47, 0x64, 0xf7, 0x64, 0xdb, 0x7f, 0x17, 0x60, 0x31, 0x34, 0x67, 0x1a, 0xad, + 0xc6, 0xa8, 0x34, 0x6b, 0x71, 0x9c, 0x5c, 0x99, 0xf8, 0x94, 0x0a, 0x77, 0x22, 0xae, 0x0f, 0xe8, + 0x53, 0x37, 0x5a, 0x8a, 0xa6, 0x7e, 0xc5, 0x36, 0xd9, 0xa9, 0xb3, 0x08, 0x64, 0xd5, 0xee, 0x09, + 0x2b, 0x9f, 0xad, 0x8b, 0xaf, 0x47, 0x3b, 0x98, 0x41, 0x1a, 0xda, 0xe6, 0xd0, 0xac, 0x6b, 0x74, + 0x9b, 0xa3, 0x12, 0xb5, 0xe7, 0x6a, 0xf3, 0xba, 0xa7, 0xcd, 0x64, 0x2a, 0x0f, 0xb6, 0x78, 0x40, + 0xf8, 0xb5, 0x95, 0x9f, 0xa6, 0x6d, 0x5e, 0x7f, 0x3d, 0x40, 0xe8, 0xb6, 0x78, 0x08, 0x0d, 0xfa, + 0xf7, 0x02, 0xa0, 0xc1, 0x04, 0x2d, 0x8a, 0x74, 0x32, 0x43, 0x13, 0xba, 0xe3, 0xb5, 0xf6, 0x84, + 0xb6, 0x16, 0xa3, 0x73, 0xb4, 0xf6, 0xb3, 0x35, 0x34, 0x5e, 0x53, 0xd1, 0x4f, 0x04, 0xfb, 0x1d, + 0xdf, 0x40, 0x06, 0xf6, 0x9d, 0xd1, 0xe3, 0x26, 0x34, 0x13, 0x5d, 0xfc, 0xe6, 0xf8, 0x84, 0x7c, + 0xdc, 0x85, 0x34, 0x3c, 0xae, 0x69, 0xfb, 0x1b, 0x1e, 0xc3, 0xae, 0xd1, 0x8f, 0x05, 0x58, 0x0c, + 0x4d, 0x1d, 0x47, 0x1b, 0x75, 0x54, 0xb6, 0xb9, 0x38, 0xe4, 0x4d, 0x42, 0xbb, 0x61, 0x2b, 0xe7, + 0xea, 0xd1, 0x95, 0x31, 0x7b, 0xf4, 0x67, 0x12, 0xb0, 0x3c, 0x6c, 0x0d, 0x88, 0xde, 0x1b, 0xed, + 0xa4, 0x86, 0x46, 0xd3, 0xc5, 0x31, 0xa3, 0x7b, 0xf1, 0x94, 0x36, 0xfb, 0xa9, 0x78, 0x77, 0x74, + 0x7f, 0x86, 0xac, 0x03, 0xc8, 0xc8, 0xbd, 0x2b, 0xbe, 0x11, 0x3d, 0x6d, 0x84, 0x92, 0x51, 0x15, + 0x0c, 0x5b, 0x48, 0x46, 0xab, 0x60, 0xc4, 0xf2, 0xf3, 0xbc, 0x2a, 0x58, 0xbf, 0x3b, 0xa2, 0xe7, + 0x43, 0x1a, 0xc2, 0x9d, 0xd7, 0xdd, 0xf5, 0x37, 0x86, 0xf7, 0xff, 0x50, 0x32, 0xf4, 0x3f, 0x05, + 0x58, 0x0c, 0x5d, 0x76, 0x46, 0x9b, 0x77, 0xd4, 0x4a, 0x75, 0xec, 0xc6, 0x73, 0xb7, 0x8d, 0xce, + 0xd7, 0xf8, 0xcf, 0xd6, 0xd1, 0xd8, 0x2d, 0x47, 0x3f, 0x9b, 0x60, 0x39, 0xfd, 0x90, 0x25, 0x2c, + 0xba, 0x37, 0xca, 0x31, 0x0d, 0x5f, 0x6e, 0x17, 0xdf, 0x3b, 0x17, 0x2d, 0xf7, 0x6b, 0x21, 0x7a, + 0x18, 0x63, 0x1c, 0xf8, 0xf5, 0x10, 0x6f, 0x10, 0xa0, 0xdf, 0x17, 0x60, 0x79, 0xd8, 0xe2, 0x38, + 0x7a, 0x04, 0x8c, 0x58, 0x52, 0x0f, 0xf5, 0x71, 0xbc, 0x91, 0x2b, 0xe7, 0xed, 0xec, 0x95, 0xf1, + 0x3b, 0xfb, 0x07, 0x02, 0x14, 0x82, 0x1b, 0x79, 0xe8, 0xad, 0xd1, 0x1e, 0x6e, 0x60, 0x83, 0xa5, + 0x18, 0x73, 0x63, 0x47, 0x7c, 0x8b, 0x36, 0xf2, 0x75, 0xf1, 0x4e, 0x64, 0x87, 0x78, 0x76, 0x7f, + 0xf8, 0x52, 0xb2, 0x10, 0xdc, 0xd3, 0x8b, 0x16, 0x73, 0xc8, 0x0e, 0xe0, 0xb8, 0x62, 0xae, 0xdf, + 0x19, 0xae, 0x52, 0x8f, 0x8c, 0xdc, 0x63, 0x7c, 0x4f, 0x80, 0xbc, 0x6f, 0x17, 0x2f, 0x3a, 0xd8, + 0x0e, 0xdb, 0xf0, 0x8b, 0x2d, 0xe0, 0x1b, 0x54, 0xc0, 0x15, 0x14, 0x5b, 0x40, 0xf4, 0x1b, 0x02, + 0xcc, 0x06, 0x36, 0xeb, 0xd0, 0xfa, 0xa8, 0x41, 0x39, 0xb8, 0xc7, 0x58, 0x7c, 0x6b, 0x2c, 0x1a, + 0x3e, 0x80, 0x43, 0xc4, 0x8d, 0xee, 0x76, 0xf4, 0x4b, 0x02, 0x14, 0x82, 0xbb, 0x79, 0xd1, 0x7d, + 0x3e, 0x64, 0xef, 0x6f, 0xe8, 0x78, 0xe3, 0x32, 0xad, 0xc4, 0x57, 0xe1, 0x5f, 0x17, 0x20, 0xeb, + 0x49, 0x75, 0x47, 0xa7, 0x34, 0x06, 0x33, 0xfa, 0xd1, 0x29, 0x8d, 0x90, 0x1c, 0xba, 0xf8, 0x1a, + 0x15, 0xf1, 0x36, 0xfa, 0x5a, 0xb4, 0xfb, 0xe2, 0xe2, 0xfc, 0x82, 0x00, 0x19, 0x67, 0xc3, 0x01, + 0xbd, 0x36, 0x6a, 0x96, 0xf2, 0xe6, 0xc2, 0x63, 0x2d, 0x44, 0x43, 0xa4, 0x19, 0xf0, 0x33, 0x4c, + 0x14, 0x3a, 0x91, 0x08, 0x90, 0xf3, 0x66, 0xdd, 0xd1, 0x5a, 0x0c, 0xa7, 0xe9, 0x93, 0x69, 0x58, + 0xc7, 0x71, 0x39, 0x56, 0xe2, 0xc9, 0xf1, 0x97, 0x05, 0xc8, 0x79, 0x37, 0x0a, 0xa2, 0xe5, 0x08, + 0xd9, 0x52, 0x18, 0x2a, 0xc7, 0x37, 0xa8, 0x1c, 0x6f, 0x88, 0xaf, 0xc6, 0x91, 0xe3, 0x5e, 0x83, + 0x72, 0xbe, 0x27, 0xac, 0x6c, 0xfc, 0xaa, 0x00, 0x2f, 0x34, 0xf4, 0x4e, 0x84, 0x18, 0x1b, 0xe9, + 0xcd, 0x76, 0x77, 0x9f, 0xd4, 0xb6, 0x2f, 0x7c, 0xf6, 0x3e, 0xc7, 0x6b, 0xe9, 0x6d, 0x45, 0x6b, + 0xad, 0xea, 0x46, 0x6b, 0xad, 0x85, 0x35, 0x2a, 0xcb, 0x1a, 0x2b, 0x52, 0xba, 0xaa, 0x19, 0xf6, + 0x7f, 0xc4, 0xdf, 0x6b, 0xb6, 0xbb, 0xbf, 0x9e, 0x58, 0x7e, 0xc0, 0xe8, 0xcb, 0x6d, 0xbd, 0xd7, + 0x24, 0x7d, 0xb9, 0xfa, 0x68, 0x7d, 0x83, 0x14, 0xff, 0x53, 0xbb, 0xe8, 0x0b, 0x5a, 0xf4, 0xc5, + 0x66, 0xbb, 0xfb, 0xc5, 0x23, 0x46, 0x79, 0x38, 0x45, 0xf9, 0xbf, 0xf5, 0x7f, 0x03, 0x00, 0x00, + 0xff, 0xff, 0x25, 0x3f, 0xe2, 0xc3, 0x06, 0x7d, 0x00, 0x00, } diff --git a/vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2beta2/storage.pb.go b/vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2beta2/storage.pb.go index ab4fdec57..02bf32791 100644 --- a/vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2beta2/storage.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2beta2/storage.pb.go @@ -7,7 +7,7 @@ import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import _ "google.golang.org/genproto/googleapis/api/annotations" -import _ "github.com/golang/protobuf/ptypes/timestamp" +import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -77,10 +77,19 @@ type CustomInfoType struct { // Info type configuration. All custom info types must have configurations // that do not conflict with built-in info types or other custom info types. InfoType *InfoType `protobuf:"bytes,1,opt,name=info_type,json=infoType" json:"info_type,omitempty"` + // Likelihood to return for this custom info type. This base value can be + // altered by a detection rule if the finding meets the criteria specified by + // the rule. Defaults to `VERY_LIKELY` if not specified. + Likelihood Likelihood `protobuf:"varint,6,opt,name=likelihood,enum=google.privacy.dlp.v2beta2.Likelihood" json:"likelihood,omitempty"` // Types that are valid to be assigned to Type: // *CustomInfoType_Dictionary_ + // *CustomInfoType_Regex_ // *CustomInfoType_SurrogateType_ Type isCustomInfoType_Type `protobuf_oneof:"type"` + // Set of detection rules to apply to all findings of this custom info type. + // Rules are applied in order that they are specified. Not supported for the + // `surrogate_type` custom info type. + DetectionRules []*CustomInfoType_DetectionRule `protobuf:"bytes,7,rep,name=detection_rules,json=detectionRules" json:"detection_rules,omitempty"` } func (m *CustomInfoType) Reset() { *m = CustomInfoType{} } @@ -95,11 +104,15 @@ type isCustomInfoType_Type interface { type CustomInfoType_Dictionary_ struct { Dictionary *CustomInfoType_Dictionary `protobuf:"bytes,2,opt,name=dictionary,oneof"` } +type CustomInfoType_Regex_ struct { + Regex *CustomInfoType_Regex `protobuf:"bytes,3,opt,name=regex,oneof"` +} type CustomInfoType_SurrogateType_ struct { SurrogateType *CustomInfoType_SurrogateType `protobuf:"bytes,4,opt,name=surrogate_type,json=surrogateType,oneof"` } func (*CustomInfoType_Dictionary_) isCustomInfoType_Type() {} +func (*CustomInfoType_Regex_) isCustomInfoType_Type() {} func (*CustomInfoType_SurrogateType_) isCustomInfoType_Type() {} func (m *CustomInfoType) GetType() isCustomInfoType_Type { @@ -116,6 +129,13 @@ func (m *CustomInfoType) GetInfoType() *InfoType { return nil } +func (m *CustomInfoType) GetLikelihood() Likelihood { + if m != nil { + return m.Likelihood + } + return Likelihood_LIKELIHOOD_UNSPECIFIED +} + func (m *CustomInfoType) GetDictionary() *CustomInfoType_Dictionary { if x, ok := m.GetType().(*CustomInfoType_Dictionary_); ok { return x.Dictionary @@ -123,6 +143,13 @@ func (m *CustomInfoType) GetDictionary() *CustomInfoType_Dictionary { return nil } +func (m *CustomInfoType) GetRegex() *CustomInfoType_Regex { + if x, ok := m.GetType().(*CustomInfoType_Regex_); ok { + return x.Regex + } + return nil +} + func (m *CustomInfoType) GetSurrogateType() *CustomInfoType_SurrogateType { if x, ok := m.GetType().(*CustomInfoType_SurrogateType_); ok { return x.SurrogateType @@ -130,10 +157,18 @@ func (m *CustomInfoType) GetSurrogateType() *CustomInfoType_SurrogateType { return nil } +func (m *CustomInfoType) GetDetectionRules() []*CustomInfoType_DetectionRule { + if m != nil { + return m.DetectionRules + } + return nil +} + // XXX_OneofFuncs is for the internal use of the proto package. func (*CustomInfoType) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _CustomInfoType_OneofMarshaler, _CustomInfoType_OneofUnmarshaler, _CustomInfoType_OneofSizer, []interface{}{ (*CustomInfoType_Dictionary_)(nil), + (*CustomInfoType_Regex_)(nil), (*CustomInfoType_SurrogateType_)(nil), } } @@ -147,6 +182,11 @@ func _CustomInfoType_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { if err := b.EncodeMessage(x.Dictionary); err != nil { return err } + case *CustomInfoType_Regex_: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Regex); err != nil { + return err + } case *CustomInfoType_SurrogateType_: b.EncodeVarint(4<<3 | proto.WireBytes) if err := b.EncodeMessage(x.SurrogateType); err != nil { @@ -170,6 +210,14 @@ func _CustomInfoType_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto err := b.DecodeMessage(msg) m.Type = &CustomInfoType_Dictionary_{msg} return true, err + case 3: // type.regex + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CustomInfoType_Regex) + err := b.DecodeMessage(msg) + m.Type = &CustomInfoType_Regex_{msg} + return true, err case 4: // type.surrogate_type if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType @@ -192,6 +240,11 @@ func _CustomInfoType_OneofSizer(msg proto.Message) (n int) { n += proto.SizeVarint(2<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s + case *CustomInfoType_Regex_: + s := proto.Size(x.Regex) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s case *CustomInfoType_SurrogateType_: s := proto.Size(x.SurrogateType) n += proto.SizeVarint(4<<3 | proto.WireBytes) @@ -335,6 +388,24 @@ func (m *CustomInfoType_Dictionary_WordList) GetWords() []string { return nil } +// Message defining a custom regular expression. +type CustomInfoType_Regex struct { + // Pattern defining the regular expression. + Pattern string `protobuf:"bytes,1,opt,name=pattern" json:"pattern,omitempty"` +} + +func (m *CustomInfoType_Regex) Reset() { *m = CustomInfoType_Regex{} } +func (m *CustomInfoType_Regex) String() string { return proto.CompactTextString(m) } +func (*CustomInfoType_Regex) ProtoMessage() {} +func (*CustomInfoType_Regex) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1, 1} } + +func (m *CustomInfoType_Regex) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + // Message for detecting output from deidentification transformations // such as // [`CryptoReplaceFfxFpeConfig`](/dlp/docs/reference/rest/v2beta1/content/deidentify#CryptoReplaceFfxFpeConfig). @@ -349,7 +420,301 @@ type CustomInfoType_SurrogateType struct { func (m *CustomInfoType_SurrogateType) Reset() { *m = CustomInfoType_SurrogateType{} } func (m *CustomInfoType_SurrogateType) String() string { return proto.CompactTextString(m) } func (*CustomInfoType_SurrogateType) ProtoMessage() {} -func (*CustomInfoType_SurrogateType) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1, 1} } +func (*CustomInfoType_SurrogateType) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1, 2} } + +// Rule for modifying a custom info type to alter behavior under certain +// circumstances, depending on the specific details of the rule. Not supported +// for the `surrogate_type` custom info type. +type CustomInfoType_DetectionRule struct { + // Types that are valid to be assigned to Type: + // *CustomInfoType_DetectionRule_HotwordRule_ + Type isCustomInfoType_DetectionRule_Type `protobuf_oneof:"type"` +} + +func (m *CustomInfoType_DetectionRule) Reset() { *m = CustomInfoType_DetectionRule{} } +func (m *CustomInfoType_DetectionRule) String() string { return proto.CompactTextString(m) } +func (*CustomInfoType_DetectionRule) ProtoMessage() {} +func (*CustomInfoType_DetectionRule) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1, 3} } + +type isCustomInfoType_DetectionRule_Type interface { + isCustomInfoType_DetectionRule_Type() +} + +type CustomInfoType_DetectionRule_HotwordRule_ struct { + HotwordRule *CustomInfoType_DetectionRule_HotwordRule `protobuf:"bytes,1,opt,name=hotword_rule,json=hotwordRule,oneof"` +} + +func (*CustomInfoType_DetectionRule_HotwordRule_) isCustomInfoType_DetectionRule_Type() {} + +func (m *CustomInfoType_DetectionRule) GetType() isCustomInfoType_DetectionRule_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *CustomInfoType_DetectionRule) GetHotwordRule() *CustomInfoType_DetectionRule_HotwordRule { + if x, ok := m.GetType().(*CustomInfoType_DetectionRule_HotwordRule_); ok { + return x.HotwordRule + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*CustomInfoType_DetectionRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _CustomInfoType_DetectionRule_OneofMarshaler, _CustomInfoType_DetectionRule_OneofUnmarshaler, _CustomInfoType_DetectionRule_OneofSizer, []interface{}{ + (*CustomInfoType_DetectionRule_HotwordRule_)(nil), + } +} + +func _CustomInfoType_DetectionRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*CustomInfoType_DetectionRule) + // type + switch x := m.Type.(type) { + case *CustomInfoType_DetectionRule_HotwordRule_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.HotwordRule); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("CustomInfoType_DetectionRule.Type has unexpected type %T", x) + } + return nil +} + +func _CustomInfoType_DetectionRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*CustomInfoType_DetectionRule) + switch tag { + case 1: // type.hotword_rule + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CustomInfoType_DetectionRule_HotwordRule) + err := b.DecodeMessage(msg) + m.Type = &CustomInfoType_DetectionRule_HotwordRule_{msg} + return true, err + default: + return false, nil + } +} + +func _CustomInfoType_DetectionRule_OneofSizer(msg proto.Message) (n int) { + m := msg.(*CustomInfoType_DetectionRule) + // type + switch x := m.Type.(type) { + case *CustomInfoType_DetectionRule_HotwordRule_: + s := proto.Size(x.HotwordRule) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Message for specifying a window around a finding to apply a detection +// rule. +type CustomInfoType_DetectionRule_Proximity struct { + // Number of characters before the finding to consider. + WindowBefore int32 `protobuf:"varint,1,opt,name=window_before,json=windowBefore" json:"window_before,omitempty"` + // Number of characters after the finding to consider. + WindowAfter int32 `protobuf:"varint,2,opt,name=window_after,json=windowAfter" json:"window_after,omitempty"` +} + +func (m *CustomInfoType_DetectionRule_Proximity) Reset() { + *m = CustomInfoType_DetectionRule_Proximity{} +} +func (m *CustomInfoType_DetectionRule_Proximity) String() string { return proto.CompactTextString(m) } +func (*CustomInfoType_DetectionRule_Proximity) ProtoMessage() {} +func (*CustomInfoType_DetectionRule_Proximity) Descriptor() ([]byte, []int) { + return fileDescriptor1, []int{1, 3, 0} +} + +func (m *CustomInfoType_DetectionRule_Proximity) GetWindowBefore() int32 { + if m != nil { + return m.WindowBefore + } + return 0 +} + +func (m *CustomInfoType_DetectionRule_Proximity) GetWindowAfter() int32 { + if m != nil { + return m.WindowAfter + } + return 0 +} + +// Message for specifying an adjustment to the likelihood of a finding as +// part of a detection rule. +type CustomInfoType_DetectionRule_LikelihoodAdjustment struct { + // Types that are valid to be assigned to Adjustment: + // *CustomInfoType_DetectionRule_LikelihoodAdjustment_FixedLikelihood + // *CustomInfoType_DetectionRule_LikelihoodAdjustment_RelativeLikelihood + Adjustment isCustomInfoType_DetectionRule_LikelihoodAdjustment_Adjustment `protobuf_oneof:"adjustment"` +} + +func (m *CustomInfoType_DetectionRule_LikelihoodAdjustment) Reset() { + *m = CustomInfoType_DetectionRule_LikelihoodAdjustment{} +} +func (m *CustomInfoType_DetectionRule_LikelihoodAdjustment) String() string { + return proto.CompactTextString(m) +} +func (*CustomInfoType_DetectionRule_LikelihoodAdjustment) ProtoMessage() {} +func (*CustomInfoType_DetectionRule_LikelihoodAdjustment) Descriptor() ([]byte, []int) { + return fileDescriptor1, []int{1, 3, 1} +} + +type isCustomInfoType_DetectionRule_LikelihoodAdjustment_Adjustment interface { + isCustomInfoType_DetectionRule_LikelihoodAdjustment_Adjustment() +} + +type CustomInfoType_DetectionRule_LikelihoodAdjustment_FixedLikelihood struct { + FixedLikelihood Likelihood `protobuf:"varint,1,opt,name=fixed_likelihood,json=fixedLikelihood,enum=google.privacy.dlp.v2beta2.Likelihood,oneof"` +} +type CustomInfoType_DetectionRule_LikelihoodAdjustment_RelativeLikelihood struct { + RelativeLikelihood int32 `protobuf:"varint,2,opt,name=relative_likelihood,json=relativeLikelihood,oneof"` +} + +func (*CustomInfoType_DetectionRule_LikelihoodAdjustment_FixedLikelihood) isCustomInfoType_DetectionRule_LikelihoodAdjustment_Adjustment() { +} +func (*CustomInfoType_DetectionRule_LikelihoodAdjustment_RelativeLikelihood) isCustomInfoType_DetectionRule_LikelihoodAdjustment_Adjustment() { +} + +func (m *CustomInfoType_DetectionRule_LikelihoodAdjustment) GetAdjustment() isCustomInfoType_DetectionRule_LikelihoodAdjustment_Adjustment { + if m != nil { + return m.Adjustment + } + return nil +} + +func (m *CustomInfoType_DetectionRule_LikelihoodAdjustment) GetFixedLikelihood() Likelihood { + if x, ok := m.GetAdjustment().(*CustomInfoType_DetectionRule_LikelihoodAdjustment_FixedLikelihood); ok { + return x.FixedLikelihood + } + return Likelihood_LIKELIHOOD_UNSPECIFIED +} + +func (m *CustomInfoType_DetectionRule_LikelihoodAdjustment) GetRelativeLikelihood() int32 { + if x, ok := m.GetAdjustment().(*CustomInfoType_DetectionRule_LikelihoodAdjustment_RelativeLikelihood); ok { + return x.RelativeLikelihood + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*CustomInfoType_DetectionRule_LikelihoodAdjustment) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _CustomInfoType_DetectionRule_LikelihoodAdjustment_OneofMarshaler, _CustomInfoType_DetectionRule_LikelihoodAdjustment_OneofUnmarshaler, _CustomInfoType_DetectionRule_LikelihoodAdjustment_OneofSizer, []interface{}{ + (*CustomInfoType_DetectionRule_LikelihoodAdjustment_FixedLikelihood)(nil), + (*CustomInfoType_DetectionRule_LikelihoodAdjustment_RelativeLikelihood)(nil), + } +} + +func _CustomInfoType_DetectionRule_LikelihoodAdjustment_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*CustomInfoType_DetectionRule_LikelihoodAdjustment) + // adjustment + switch x := m.Adjustment.(type) { + case *CustomInfoType_DetectionRule_LikelihoodAdjustment_FixedLikelihood: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.FixedLikelihood)) + case *CustomInfoType_DetectionRule_LikelihoodAdjustment_RelativeLikelihood: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.RelativeLikelihood)) + case nil: + default: + return fmt.Errorf("CustomInfoType_DetectionRule_LikelihoodAdjustment.Adjustment has unexpected type %T", x) + } + return nil +} + +func _CustomInfoType_DetectionRule_LikelihoodAdjustment_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*CustomInfoType_DetectionRule_LikelihoodAdjustment) + switch tag { + case 1: // adjustment.fixed_likelihood + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Adjustment = &CustomInfoType_DetectionRule_LikelihoodAdjustment_FixedLikelihood{Likelihood(x)} + return true, err + case 2: // adjustment.relative_likelihood + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Adjustment = &CustomInfoType_DetectionRule_LikelihoodAdjustment_RelativeLikelihood{int32(x)} + return true, err + default: + return false, nil + } +} + +func _CustomInfoType_DetectionRule_LikelihoodAdjustment_OneofSizer(msg proto.Message) (n int) { + m := msg.(*CustomInfoType_DetectionRule_LikelihoodAdjustment) + // adjustment + switch x := m.Adjustment.(type) { + case *CustomInfoType_DetectionRule_LikelihoodAdjustment_FixedLikelihood: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.FixedLikelihood)) + case *CustomInfoType_DetectionRule_LikelihoodAdjustment_RelativeLikelihood: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.RelativeLikelihood)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Detection rule that adjusts the likelihood of findings within a certain +// proximity of hotwords. +type CustomInfoType_DetectionRule_HotwordRule struct { + // Regex pattern defining what qualifies as a hotword. + HotwordRegex *CustomInfoType_Regex `protobuf:"bytes,1,opt,name=hotword_regex,json=hotwordRegex" json:"hotword_regex,omitempty"` + // Proximity of the finding within which the entire hotword must reside. + // The total length of the window cannot exceed 1000 characters. Note that + // the finding itself will be included in the window, so that hotwords may + // be used to match substrings of the finding itself. For example, the + // certainty of a phone number regex "\(\d{3}\) \d{3}-\d{4}" could be + // adjusted upwards if the area code is known to be the local area code of + // a company office using the hotword regex "\(xxx\)", where "xxx" + // is the area code in question. + Proximity *CustomInfoType_DetectionRule_Proximity `protobuf:"bytes,2,opt,name=proximity" json:"proximity,omitempty"` + // Likelihood adjustment to apply to all matching findings. + LikelihoodAdjustment *CustomInfoType_DetectionRule_LikelihoodAdjustment `protobuf:"bytes,3,opt,name=likelihood_adjustment,json=likelihoodAdjustment" json:"likelihood_adjustment,omitempty"` +} + +func (m *CustomInfoType_DetectionRule_HotwordRule) Reset() { + *m = CustomInfoType_DetectionRule_HotwordRule{} +} +func (m *CustomInfoType_DetectionRule_HotwordRule) String() string { return proto.CompactTextString(m) } +func (*CustomInfoType_DetectionRule_HotwordRule) ProtoMessage() {} +func (*CustomInfoType_DetectionRule_HotwordRule) Descriptor() ([]byte, []int) { + return fileDescriptor1, []int{1, 3, 2} +} + +func (m *CustomInfoType_DetectionRule_HotwordRule) GetHotwordRegex() *CustomInfoType_Regex { + if m != nil { + return m.HotwordRegex + } + return nil +} + +func (m *CustomInfoType_DetectionRule_HotwordRule) GetProximity() *CustomInfoType_DetectionRule_Proximity { + if m != nil { + return m.Proximity + } + return nil +} + +func (m *CustomInfoType_DetectionRule_HotwordRule) GetLikelihoodAdjustment() *CustomInfoType_DetectionRule_LikelihoodAdjustment { + if m != nil { + return m.LikelihoodAdjustment + } + return nil +} // General identifier of a data field in a storage service. type FieldId struct { @@ -451,6 +816,9 @@ func (m *DatastoreOptions) GetKind() *KindExpression { // a Google Cloud Storage bucket. type CloudStorageOptions struct { FileSet *CloudStorageOptions_FileSet `protobuf:"bytes,1,opt,name=file_set,json=fileSet" json:"file_set,omitempty"` + // Max number of bytes to scan from a file. If a scanned file's size is bigger + // than this value then the rest of the bytes are omitted. + BytesLimitPerFile int64 `protobuf:"varint,4,opt,name=bytes_limit_per_file,json=bytesLimitPerFile" json:"bytes_limit_per_file,omitempty"` } func (m *CloudStorageOptions) Reset() { *m = CloudStorageOptions{} } @@ -465,6 +833,13 @@ func (m *CloudStorageOptions) GetFileSet() *CloudStorageOptions_FileSet { return nil } +func (m *CloudStorageOptions) GetBytesLimitPerFile() int64 { + if m != nil { + return m.BytesLimitPerFile + } + return 0 +} + // Set of files to scan. type CloudStorageOptions_FileSet struct { // The url, in the format `gs:///`. Trailing wildcard in the @@ -518,7 +893,8 @@ type StorageConfig struct { // *StorageConfig_DatastoreOptions // *StorageConfig_CloudStorageOptions // *StorageConfig_BigQueryOptions - Type isStorageConfig_Type `protobuf_oneof:"type"` + Type isStorageConfig_Type `protobuf_oneof:"type"` + TimespanConfig *StorageConfig_TimespanConfig `protobuf:"bytes,6,opt,name=timespan_config,json=timespanConfig" json:"timespan_config,omitempty"` } func (m *StorageConfig) Reset() { *m = StorageConfig{} } @@ -572,6 +948,13 @@ func (m *StorageConfig) GetBigQueryOptions() *BigQueryOptions { return nil } +func (m *StorageConfig) GetTimespanConfig() *StorageConfig_TimespanConfig { + if m != nil { + return m.TimespanConfig + } + return nil +} + // XXX_OneofFuncs is for the internal use of the proto package. func (*StorageConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _StorageConfig_OneofMarshaler, _StorageConfig_OneofUnmarshaler, _StorageConfig_OneofSizer, []interface{}{ @@ -665,6 +1048,75 @@ func _StorageConfig_OneofSizer(msg proto.Message) (n int) { return n } +// Configuration of the timespan of the items to include in scanning. +// Currently only supported when inspecting Google Cloud Storage and BigQuery. +type StorageConfig_TimespanConfig struct { + // Exclude files older than this value. + StartTime *google_protobuf1.Timestamp `protobuf:"bytes,1,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // Exclude files newer than this value. + // If set to zero, no upper time limit is applied. + EndTime *google_protobuf1.Timestamp `protobuf:"bytes,2,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + // When the job is started by a JobTrigger we will automatically figure out + // a valid start_time to avoid scanning files that have not been modified + // since the last time the JobTrigger executed. This will be based on the + // time of the execution of the last run of the JobTrigger. + EnableAutoPopulationOfTimespanConfig bool `protobuf:"varint,4,opt,name=enable_auto_population_of_timespan_config,json=enableAutoPopulationOfTimespanConfig" json:"enable_auto_population_of_timespan_config,omitempty"` +} + +func (m *StorageConfig_TimespanConfig) Reset() { *m = StorageConfig_TimespanConfig{} } +func (m *StorageConfig_TimespanConfig) String() string { return proto.CompactTextString(m) } +func (*StorageConfig_TimespanConfig) ProtoMessage() {} +func (*StorageConfig_TimespanConfig) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8, 0} } + +func (m *StorageConfig_TimespanConfig) GetStartTime() *google_protobuf1.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *StorageConfig_TimespanConfig) GetEndTime() *google_protobuf1.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *StorageConfig_TimespanConfig) GetEnableAutoPopulationOfTimespanConfig() bool { + if m != nil { + return m.EnableAutoPopulationOfTimespanConfig + } + return false +} + +// Row key for identifying a record in BigQuery table. +type BigQueryKey struct { + // Complete BigQuery table reference. + TableReference *BigQueryTable `protobuf:"bytes,1,opt,name=table_reference,json=tableReference" json:"table_reference,omitempty"` + // Absolute number of the row from the beginning of the table at the time + // of scanning. + RowNumber int64 `protobuf:"varint,2,opt,name=row_number,json=rowNumber" json:"row_number,omitempty"` +} + +func (m *BigQueryKey) Reset() { *m = BigQueryKey{} } +func (m *BigQueryKey) String() string { return proto.CompactTextString(m) } +func (*BigQueryKey) ProtoMessage() {} +func (*BigQueryKey) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } + +func (m *BigQueryKey) GetTableReference() *BigQueryTable { + if m != nil { + return m.TableReference + } + return nil +} + +func (m *BigQueryKey) GetRowNumber() int64 { + if m != nil { + return m.RowNumber + } + return 0 +} + // Record key for a finding in a Cloud Storage file. type CloudStorageKey struct { // Path to the file. @@ -676,7 +1128,7 @@ type CloudStorageKey struct { func (m *CloudStorageKey) Reset() { *m = CloudStorageKey{} } func (m *CloudStorageKey) String() string { return proto.CompactTextString(m) } func (*CloudStorageKey) ProtoMessage() {} -func (*CloudStorageKey) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } +func (*CloudStorageKey) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } func (m *CloudStorageKey) GetFilePath() string { if m != nil { @@ -701,7 +1153,7 @@ type DatastoreKey struct { func (m *DatastoreKey) Reset() { *m = DatastoreKey{} } func (m *DatastoreKey) String() string { return proto.CompactTextString(m) } func (*DatastoreKey) ProtoMessage() {} -func (*DatastoreKey) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } +func (*DatastoreKey) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11} } func (m *DatastoreKey) GetEntityKey() *Key { if m != nil { @@ -734,7 +1186,7 @@ type Key struct { func (m *Key) Reset() { *m = Key{} } func (m *Key) String() string { return proto.CompactTextString(m) } func (*Key) ProtoMessage() {} -func (*Key) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11} } +func (*Key) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} } func (m *Key) GetPartitionId() *PartitionId { if m != nil { @@ -771,7 +1223,7 @@ type Key_PathElement struct { func (m *Key_PathElement) Reset() { *m = Key_PathElement{} } func (m *Key_PathElement) String() string { return proto.CompactTextString(m) } func (*Key_PathElement) ProtoMessage() {} -func (*Key_PathElement) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11, 0} } +func (*Key_PathElement) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12, 0} } type isKey_PathElement_IdType interface { isKey_PathElement_IdType() @@ -885,13 +1337,14 @@ type RecordKey struct { // Types that are valid to be assigned to Type: // *RecordKey_CloudStorageKey // *RecordKey_DatastoreKey + // *RecordKey_BigQueryKey Type isRecordKey_Type `protobuf_oneof:"type"` } func (m *RecordKey) Reset() { *m = RecordKey{} } func (m *RecordKey) String() string { return proto.CompactTextString(m) } func (*RecordKey) ProtoMessage() {} -func (*RecordKey) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} } +func (*RecordKey) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{13} } type isRecordKey_Type interface { isRecordKey_Type() @@ -903,9 +1356,13 @@ type RecordKey_CloudStorageKey struct { type RecordKey_DatastoreKey struct { DatastoreKey *DatastoreKey `protobuf:"bytes,2,opt,name=datastore_key,json=datastoreKey,oneof"` } +type RecordKey_BigQueryKey struct { + BigQueryKey *BigQueryKey `protobuf:"bytes,3,opt,name=big_query_key,json=bigQueryKey,oneof"` +} func (*RecordKey_CloudStorageKey) isRecordKey_Type() {} func (*RecordKey_DatastoreKey) isRecordKey_Type() {} +func (*RecordKey_BigQueryKey) isRecordKey_Type() {} func (m *RecordKey) GetType() isRecordKey_Type { if m != nil { @@ -928,11 +1385,19 @@ func (m *RecordKey) GetDatastoreKey() *DatastoreKey { return nil } +func (m *RecordKey) GetBigQueryKey() *BigQueryKey { + if x, ok := m.GetType().(*RecordKey_BigQueryKey); ok { + return x.BigQueryKey + } + return nil +} + // XXX_OneofFuncs is for the internal use of the proto package. func (*RecordKey) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _RecordKey_OneofMarshaler, _RecordKey_OneofUnmarshaler, _RecordKey_OneofSizer, []interface{}{ (*RecordKey_CloudStorageKey)(nil), (*RecordKey_DatastoreKey)(nil), + (*RecordKey_BigQueryKey)(nil), } } @@ -950,6 +1415,11 @@ func _RecordKey_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { if err := b.EncodeMessage(x.DatastoreKey); err != nil { return err } + case *RecordKey_BigQueryKey: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BigQueryKey); err != nil { + return err + } case nil: default: return fmt.Errorf("RecordKey.Type has unexpected type %T", x) @@ -976,6 +1446,14 @@ func _RecordKey_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buff err := b.DecodeMessage(msg) m.Type = &RecordKey_DatastoreKey{msg} return true, err + case 3: // type.big_query_key + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BigQueryKey) + err := b.DecodeMessage(msg) + m.Type = &RecordKey_BigQueryKey{msg} + return true, err default: return false, nil } @@ -995,6 +1473,11 @@ func _RecordKey_OneofSizer(msg proto.Message) (n int) { n += proto.SizeVarint(2<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s + case *RecordKey_BigQueryKey: + s := proto.Size(x.BigQueryKey) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) @@ -1020,7 +1503,7 @@ type BigQueryTable struct { func (m *BigQueryTable) Reset() { *m = BigQueryTable{} } func (m *BigQueryTable) String() string { return proto.CompactTextString(m) } func (*BigQueryTable) ProtoMessage() {} -func (*BigQueryTable) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{13} } +func (*BigQueryTable) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{14} } func (m *BigQueryTable) GetProjectId() string { if m != nil { @@ -1056,7 +1539,7 @@ type EntityId struct { func (m *EntityId) Reset() { *m = EntityId{} } func (m *EntityId) String() string { return proto.CompactTextString(m) } func (*EntityId) ProtoMessage() {} -func (*EntityId) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{14} } +func (*EntityId) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{15} } func (m *EntityId) GetField() *FieldId { if m != nil { @@ -1070,7 +1553,12 @@ func init() { proto.RegisterType((*CustomInfoType)(nil), "google.privacy.dlp.v2beta2.CustomInfoType") proto.RegisterType((*CustomInfoType_Dictionary)(nil), "google.privacy.dlp.v2beta2.CustomInfoType.Dictionary") proto.RegisterType((*CustomInfoType_Dictionary_WordList)(nil), "google.privacy.dlp.v2beta2.CustomInfoType.Dictionary.WordList") + proto.RegisterType((*CustomInfoType_Regex)(nil), "google.privacy.dlp.v2beta2.CustomInfoType.Regex") proto.RegisterType((*CustomInfoType_SurrogateType)(nil), "google.privacy.dlp.v2beta2.CustomInfoType.SurrogateType") + proto.RegisterType((*CustomInfoType_DetectionRule)(nil), "google.privacy.dlp.v2beta2.CustomInfoType.DetectionRule") + proto.RegisterType((*CustomInfoType_DetectionRule_Proximity)(nil), "google.privacy.dlp.v2beta2.CustomInfoType.DetectionRule.Proximity") + proto.RegisterType((*CustomInfoType_DetectionRule_LikelihoodAdjustment)(nil), "google.privacy.dlp.v2beta2.CustomInfoType.DetectionRule.LikelihoodAdjustment") + proto.RegisterType((*CustomInfoType_DetectionRule_HotwordRule)(nil), "google.privacy.dlp.v2beta2.CustomInfoType.DetectionRule.HotwordRule") proto.RegisterType((*FieldId)(nil), "google.privacy.dlp.v2beta2.FieldId") proto.RegisterType((*PartitionId)(nil), "google.privacy.dlp.v2beta2.PartitionId") proto.RegisterType((*KindExpression)(nil), "google.privacy.dlp.v2beta2.KindExpression") @@ -1079,6 +1567,8 @@ func init() { proto.RegisterType((*CloudStorageOptions_FileSet)(nil), "google.privacy.dlp.v2beta2.CloudStorageOptions.FileSet") proto.RegisterType((*BigQueryOptions)(nil), "google.privacy.dlp.v2beta2.BigQueryOptions") proto.RegisterType((*StorageConfig)(nil), "google.privacy.dlp.v2beta2.StorageConfig") + proto.RegisterType((*StorageConfig_TimespanConfig)(nil), "google.privacy.dlp.v2beta2.StorageConfig.TimespanConfig") + proto.RegisterType((*BigQueryKey)(nil), "google.privacy.dlp.v2beta2.BigQueryKey") proto.RegisterType((*CloudStorageKey)(nil), "google.privacy.dlp.v2beta2.CloudStorageKey") proto.RegisterType((*DatastoreKey)(nil), "google.privacy.dlp.v2beta2.DatastoreKey") proto.RegisterType((*Key)(nil), "google.privacy.dlp.v2beta2.Key") @@ -1092,74 +1582,105 @@ func init() { func init() { proto.RegisterFile("google/privacy/dlp/v2beta2/storage.proto", fileDescriptor1) } var fileDescriptor1 = []byte{ - // 1100 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xdf, 0x6e, 0xe3, 0xc4, - 0x17, 0x8e, 0x93, 0xb4, 0x4d, 0x4e, 0x9a, 0x26, 0x9d, 0xdd, 0xdf, 0x4f, 0x25, 0x4b, 0x77, 0x4b, - 0x58, 0x89, 0xb2, 0x45, 0x8e, 0x54, 0x84, 0x00, 0x21, 0x8a, 0x48, 0x93, 0x12, 0xd3, 0xaa, 0xe9, - 0x4e, 0xba, 0x5d, 0x15, 0x8a, 0x2c, 0x27, 0x33, 0x71, 0x87, 0x3a, 0x1e, 0x63, 0x4f, 0x76, 0xc9, - 0x0b, 0xf0, 0x0a, 0xdc, 0x71, 0xc3, 0x1d, 0x0f, 0x80, 0xc4, 0x15, 0x12, 0x97, 0x3c, 0x09, 0x8f, - 0x81, 0xe6, 0x8f, 0xd3, 0xa4, 0x94, 0xd0, 0x45, 0xdc, 0xcd, 0x9c, 0x39, 0xe7, 0x9b, 0x6f, 0xce, - 0xf9, 0xce, 0xb1, 0x61, 0xdb, 0xe7, 0xdc, 0x0f, 0x68, 0x23, 0x8a, 0xd9, 0x0b, 0x6f, 0x30, 0x69, - 0x90, 0x20, 0x6a, 0xbc, 0xd8, 0xed, 0x53, 0xe1, 0xed, 0x36, 0x12, 0xc1, 0x63, 0xcf, 0xa7, 0x76, - 0x14, 0x73, 0xc1, 0x51, 0x4d, 0x7b, 0xda, 0xc6, 0xd3, 0x26, 0x41, 0x64, 0x1b, 0xcf, 0xda, 0xeb, - 0x06, 0xc5, 0x8b, 0x58, 0xc3, 0x0b, 0x43, 0x2e, 0x3c, 0xc1, 0x78, 0x98, 0xe8, 0xc8, 0xda, 0xa3, - 0xe9, 0x1d, 0x5c, 0xf0, 0xfe, 0x78, 0xd8, 0x10, 0x6c, 0x44, 0x13, 0xe1, 0x8d, 0x22, 0xed, 0x50, - 0x7f, 0x08, 0x05, 0x27, 0x1c, 0xf2, 0xd3, 0x49, 0x44, 0x11, 0x82, 0x7c, 0xe8, 0x8d, 0xe8, 0x86, - 0xb5, 0x65, 0x6d, 0x17, 0xb1, 0x5a, 0xd7, 0x7f, 0xcb, 0xc1, 0xda, 0xfe, 0x38, 0x11, 0x7c, 0x34, - 0x75, 0xfb, 0x14, 0x8a, 0x2c, 0x1c, 0x72, 0x57, 0x4c, 0x22, 0xed, 0x5b, 0xda, 0x7d, 0x6c, 0xff, - 0x3d, 0x43, 0x3b, 0x0d, 0xc4, 0x05, 0x96, 0x42, 0x3c, 0x07, 0x20, 0x6c, 0x20, 0x89, 0x7a, 0xf1, - 0x64, 0x23, 0xab, 0x30, 0xde, 0x5b, 0x84, 0x31, 0x4f, 0xc1, 0x6e, 0x4d, 0x83, 0x3b, 0x19, 0x3c, - 0x03, 0x85, 0x3c, 0x58, 0x4b, 0xc6, 0x71, 0xcc, 0x7d, 0x4f, 0x50, 0x4d, 0x30, 0xaf, 0xc0, 0x3f, - 0x78, 0x05, 0xf0, 0x5e, 0x0a, 0x20, 0x77, 0x9d, 0x0c, 0x2e, 0x27, 0xb3, 0x86, 0xda, 0xf7, 0x16, - 0xc0, 0xf5, 0xfd, 0xe8, 0x2b, 0x28, 0xbe, 0xe4, 0x31, 0x71, 0x03, 0x96, 0x08, 0x93, 0x8d, 0xbd, - 0x7f, 0xf5, 0x12, 0xfb, 0x39, 0x8f, 0xc9, 0x11, 0x4b, 0x44, 0x27, 0x83, 0x0b, 0x2f, 0xcd, 0xba, - 0xb6, 0x05, 0x85, 0xd4, 0x8e, 0xee, 0xc3, 0x92, 0xb4, 0x27, 0x1b, 0xd6, 0x56, 0x6e, 0xbb, 0x88, - 0xf5, 0xa6, 0x59, 0x80, 0xe5, 0x84, 0x8f, 0xe3, 0x01, 0xad, 0x55, 0xa0, 0x3c, 0xc7, 0xbd, 0xb9, - 0x0c, 0x79, 0x99, 0x83, 0xfa, 0x26, 0xac, 0x1c, 0x30, 0x1a, 0x10, 0x87, 0xdc, 0x5a, 0xe3, 0x2e, - 0x94, 0x4e, 0xbc, 0x58, 0x30, 0x49, 0xc4, 0x21, 0x68, 0x13, 0x20, 0x8a, 0xf9, 0xd7, 0x74, 0x20, - 0x5c, 0x46, 0x54, 0x71, 0x8a, 0xb8, 0x68, 0x2c, 0x0e, 0x41, 0x6f, 0xc0, 0xaa, 0x8c, 0x4a, 0x22, - 0x6f, 0x40, 0xa5, 0x43, 0x5e, 0x39, 0x94, 0xa6, 0x36, 0x87, 0xd4, 0x1f, 0xc3, 0xda, 0x21, 0x0b, - 0x49, 0xfb, 0xdb, 0x28, 0xa6, 0x49, 0xc2, 0x78, 0x78, 0xeb, 0xb5, 0x3f, 0x58, 0x50, 0x6d, 0x79, - 0xc2, 0x93, 0x5a, 0xa7, 0xdd, 0x48, 0xc9, 0x16, 0x7d, 0x0e, 0xab, 0x51, 0xca, 0x45, 0xa2, 0xeb, - 0x8c, 0xbe, 0xb5, 0x28, 0xa3, 0x33, 0xdc, 0x71, 0x29, 0x9a, 0x79, 0xc8, 0x1e, 0xe4, 0xaf, 0x58, - 0x48, 0x8c, 0xbe, 0x9e, 0x2c, 0xc2, 0x98, 0xa7, 0x8b, 0x55, 0x5c, 0xfd, 0x3b, 0x0b, 0xee, 0xed, - 0x07, 0x7c, 0x4c, 0x7a, 0xba, 0x1b, 0x53, 0x8e, 0x18, 0x0a, 0x43, 0x16, 0x50, 0x37, 0xa1, 0x69, - 0xc5, 0xdf, 0x5f, 0x58, 0xf1, 0xbf, 0x42, 0xd8, 0x07, 0x2c, 0xa0, 0x3d, 0x2a, 0xf0, 0xca, 0x50, - 0x2f, 0x6a, 0x0f, 0x64, 0x89, 0xd4, 0x12, 0x55, 0x21, 0x37, 0x8e, 0x03, 0x93, 0x2a, 0xb9, 0xac, - 0xff, 0x62, 0x41, 0xa5, 0xc9, 0xfc, 0xa7, 0x63, 0x1a, 0x4f, 0xae, 0x49, 0x54, 0x84, 0xd7, 0x0f, - 0xa8, 0x1b, 0xd3, 0x21, 0x8d, 0x69, 0x38, 0x48, 0x7b, 0xf1, 0xed, 0x45, 0x5c, 0x52, 0x94, 0x53, - 0x19, 0x8a, 0xd7, 0x14, 0x02, 0x4e, 0x01, 0x10, 0x06, 0xc4, 0x08, 0x0d, 0x05, 0x1b, 0x4e, 0x58, - 0xe8, 0xbb, 0x43, 0xa9, 0x99, 0x64, 0x23, 0xbb, 0x95, 0xdb, 0x2e, 0xed, 0xbe, 0xb9, 0x08, 0xd6, - 0xa8, 0x0b, 0xaf, 0xcf, 0x84, 0x2b, 0x5b, 0x52, 0xff, 0x39, 0x0b, 0x65, 0xf3, 0xf8, 0x7d, 0x1e, - 0x0e, 0x99, 0x8f, 0xbe, 0x84, 0x75, 0x92, 0x96, 0xdd, 0xe5, 0xfa, 0x39, 0xa6, 0x46, 0xef, 0x2c, - 0xba, 0xe4, 0xa6, 0x56, 0x3a, 0x19, 0x5c, 0x25, 0x37, 0xf5, 0x43, 0xe1, 0x7f, 0x03, 0x99, 0x6f, - 0xd7, 0x4c, 0xd0, 0xe9, 0x05, 0x39, 0x75, 0x41, 0xe3, 0x15, 0x0b, 0xd5, 0xc9, 0xe0, 0x7b, 0x83, - 0x5b, 0x24, 0x70, 0x0e, 0xeb, 0x7d, 0xe6, 0xbb, 0xdf, 0xc8, 0x5c, 0x4e, 0xaf, 0xd0, 0xa3, 0x66, - 0xe7, 0x2e, 0xf9, 0xbf, 0x86, 0xaf, 0xf4, 0xe7, 0x4d, 0xd3, 0xa6, 0x7d, 0x0a, 0x95, 0x59, 0x42, - 0x87, 0x74, 0x82, 0x1e, 0x40, 0x51, 0x09, 0x2f, 0xf2, 0xc4, 0xa5, 0xd1, 0x87, 0x52, 0xe2, 0x89, - 0x27, 0x2e, 0x65, 0x5f, 0x26, 0xc2, 0x8b, 0x85, 0xcb, 0x87, 0x43, 0xa9, 0x4c, 0x99, 0xd1, 0x1c, - 0x2e, 0x29, 0x5b, 0x57, 0x99, 0xea, 0xc7, 0xb0, 0x3a, 0x4d, 0xa2, 0xc4, 0xdb, 0x03, 0x90, 0xe5, - 0x12, 0x13, 0xf7, 0x8a, 0x4e, 0x8c, 0x7c, 0x1e, 0x2d, 0x6c, 0x13, 0x3a, 0xc1, 0x45, 0x1d, 0x72, - 0x48, 0x27, 0xf5, 0x3f, 0x2c, 0xc8, 0x49, 0x9c, 0xff, 0xb2, 0x69, 0x3f, 0x81, 0xbc, 0x7a, 0x9e, - 0x56, 0xdd, 0xce, 0x3f, 0xb0, 0xb1, 0xe5, 0xd3, 0xdb, 0x01, 0x1d, 0xd1, 0x50, 0x60, 0x15, 0x58, - 0x3b, 0x95, 0xd3, 0x6c, 0x6a, 0x94, 0x93, 0x47, 0x0d, 0x01, 0x33, 0x79, 0xe4, 0x1a, 0x55, 0x21, - 0x6b, 0x26, 0x5b, 0xae, 0x93, 0xc1, 0x59, 0x46, 0xd0, 0x7d, 0x33, 0x9f, 0xa4, 0x4a, 0x8a, 0x9d, - 0x8c, 0x9e, 0x50, 0xcd, 0x22, 0xac, 0x30, 0xa2, 0x3e, 0x23, 0xf5, 0x5f, 0x2d, 0x28, 0x62, 0x3a, - 0xe0, 0x31, 0x91, 0x0f, 0x3e, 0x87, 0xf5, 0x79, 0x95, 0x5d, 0xe7, 0x6f, 0xe7, 0xae, 0x0a, 0x3b, - 0xa4, 0xf2, 0xe3, 0x55, 0x19, 0xdc, 0xa8, 0x71, 0x17, 0xca, 0xd7, 0xdd, 0x21, 0x61, 0x75, 0x67, - 0x6c, 0xdf, 0xa9, 0x33, 0x34, 0xe6, 0x2a, 0x99, 0xd9, 0x4f, 0xf5, 0x74, 0x09, 0xe5, 0xb9, 0xee, - 0xbf, 0x31, 0xe7, 0xad, 0x9b, 0x73, 0x7e, 0x13, 0x40, 0xe1, 0xd0, 0xd9, 0xcf, 0x80, 0xb1, 0x38, - 0x04, 0xbd, 0x06, 0x05, 0x3d, 0x7f, 0x18, 0xd1, 0x59, 0xc3, 0x2b, 0x6a, 0xef, 0x90, 0x7a, 0x1b, - 0x0a, 0x6d, 0xa5, 0x11, 0x87, 0xa0, 0x0f, 0x61, 0x49, 0x8d, 0x11, 0x93, 0x9d, 0x3b, 0x4d, 0x11, - 0x1d, 0xf1, 0x44, 0x00, 0x1c, 0xb1, 0x2b, 0x1a, 0xb0, 0x4b, 0xce, 0x09, 0xaa, 0xc1, 0xff, 0x8f, - 0x9c, 0xc3, 0xf6, 0x91, 0xd3, 0xe9, 0x76, 0x5b, 0xee, 0xb3, 0xe3, 0xde, 0x49, 0x7b, 0xdf, 0x39, - 0x70, 0xda, 0xad, 0x6a, 0x06, 0xad, 0x43, 0xf9, 0xac, 0x8d, 0xcf, 0xdd, 0x67, 0xc7, 0xca, 0xe5, - 0xbc, 0x6a, 0xa1, 0x55, 0x28, 0x4c, 0x77, 0x59, 0xb9, 0x3b, 0xe9, 0xf6, 0x7a, 0x4e, 0xf3, 0xa8, - 0x5d, 0xcd, 0x21, 0x80, 0x65, 0x73, 0x92, 0x47, 0x15, 0x28, 0xa9, 0x50, 0x63, 0x58, 0x6a, 0xfe, - 0x68, 0xc1, 0xc3, 0x01, 0x1f, 0x2d, 0xe0, 0xd9, 0x84, 0x56, 0x10, 0x99, 0x8a, 0x9d, 0x58, 0x5f, - 0x7c, 0x6c, 0x3c, 0x7d, 0x1e, 0x78, 0xa1, 0x6f, 0xf3, 0xd8, 0x6f, 0xf8, 0x34, 0x54, 0x7f, 0x57, - 0x0d, 0x7d, 0xe4, 0x45, 0x2c, 0xb9, 0xed, 0x2f, 0xef, 0x23, 0x12, 0x44, 0x3f, 0x65, 0x37, 0x3e, - 0xd3, 0xf1, 0x4a, 0x1b, 0x76, 0x2b, 0x88, 0xec, 0xb3, 0xdd, 0xa6, 0x3c, 0xfe, 0x3d, 0x3d, 0xba, - 0x50, 0x47, 0x17, 0xad, 0x20, 0xba, 0x38, 0xd3, 0x91, 0xfd, 0x65, 0x85, 0xff, 0xee, 0x9f, 0x01, - 0x00, 0x00, 0xff, 0xff, 0x8f, 0x4f, 0x2f, 0x44, 0x44, 0x0a, 0x00, 0x00, + // 1585 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x58, 0xcd, 0x72, 0x23, 0x49, + 0x11, 0x56, 0xeb, 0xc7, 0x96, 0x52, 0xd6, 0x8f, 0x6b, 0xbc, 0x84, 0xe8, 0xc5, 0xbb, 0x5e, 0xed, + 0x04, 0x78, 0x77, 0x08, 0x09, 0x4c, 0x6c, 0xc0, 0x06, 0xc1, 0x10, 0x96, 0x25, 0x23, 0x61, 0xaf, + 0xa5, 0x2d, 0x79, 0x66, 0xc2, 0x30, 0x44, 0xd3, 0x52, 0x55, 0xcb, 0x35, 0xd3, 0xea, 0x6a, 0xba, + 0x4b, 0x63, 0xeb, 0x46, 0xf0, 0x00, 0x5c, 0xb9, 0x71, 0xe1, 0x46, 0x70, 0xe1, 0x44, 0xf0, 0x0a, + 0x70, 0xe5, 0xc6, 0x0b, 0xf0, 0x06, 0x5c, 0x89, 0xaa, 0xea, 0x6e, 0x49, 0xc6, 0xc8, 0x3f, 0xc1, + 0x9e, 0xdc, 0x99, 0x95, 0xf9, 0x55, 0xfe, 0x67, 0x59, 0xb0, 0x3f, 0xe1, 0x7c, 0xe2, 0xd2, 0xa6, + 0x1f, 0xb0, 0x77, 0xf6, 0x78, 0xde, 0x24, 0xae, 0xdf, 0x7c, 0x77, 0x30, 0xa2, 0xc2, 0x3e, 0x68, + 0x86, 0x82, 0x07, 0xf6, 0x84, 0x36, 0xfc, 0x80, 0x0b, 0x8e, 0x4c, 0x2d, 0xd9, 0x88, 0x24, 0x1b, + 0xc4, 0xf5, 0x1b, 0x91, 0xa4, 0xf9, 0x8d, 0x08, 0xc5, 0xf6, 0x59, 0xd3, 0xf6, 0x3c, 0x2e, 0x6c, + 0xc1, 0xb8, 0x17, 0x6a, 0x4d, 0xf3, 0xc3, 0xe4, 0x0e, 0x2e, 0xf8, 0x68, 0xe6, 0x34, 0x05, 0x9b, + 0xd2, 0x50, 0xd8, 0x53, 0x5f, 0x0b, 0xd4, 0x3f, 0x80, 0x7c, 0xcf, 0x73, 0xf8, 0xf9, 0xdc, 0xa7, + 0x08, 0x41, 0xd6, 0xb3, 0xa7, 0xb4, 0x66, 0xec, 0x19, 0xfb, 0x05, 0xac, 0xbe, 0xeb, 0x7f, 0x2e, + 0x42, 0xf9, 0x68, 0x16, 0x0a, 0x3e, 0x4d, 0xc4, 0x0e, 0xa1, 0xc0, 0x3c, 0x87, 0x5b, 0x62, 0xee, + 0x6b, 0xd9, 0xe2, 0xc1, 0xd3, 0xc6, 0xff, 0xb6, 0xb0, 0x11, 0x2b, 0xe2, 0x3c, 0x8b, 0x21, 0x8e, + 0x01, 0x5c, 0xf6, 0x96, 0xba, 0xec, 0x92, 0x73, 0x52, 0xdb, 0xd8, 0x33, 0xf6, 0xcb, 0x07, 0xdf, + 0x5c, 0x87, 0x71, 0x9a, 0x48, 0xe3, 0x25, 0x4d, 0xf4, 0x0a, 0x80, 0xb0, 0xb1, 0x74, 0xd8, 0x0e, + 0xe6, 0xb5, 0xb4, 0xb2, 0xe5, 0xb3, 0x75, 0x38, 0xab, 0xae, 0x34, 0xda, 0x89, 0x72, 0x37, 0x85, + 0x97, 0xa0, 0x50, 0x17, 0x72, 0x01, 0x9d, 0xd0, 0xeb, 0x5a, 0x46, 0x61, 0x7e, 0xe7, 0x01, 0x98, + 0x58, 0xea, 0x75, 0x53, 0x58, 0x03, 0x20, 0x1b, 0xca, 0xe1, 0x2c, 0x08, 0xf8, 0xc4, 0x16, 0x54, + 0x87, 0x2c, 0xab, 0x20, 0x7f, 0xf0, 0x00, 0xc8, 0x61, 0x0c, 0x20, 0xa9, 0x6e, 0x0a, 0x97, 0xc2, + 0x65, 0x06, 0xb2, 0xa1, 0x42, 0xa8, 0xa0, 0xca, 0x78, 0x2b, 0x98, 0xb9, 0x34, 0xac, 0x6d, 0xee, + 0x65, 0x1e, 0x78, 0x47, 0x3b, 0x46, 0xc0, 0x33, 0x97, 0xe2, 0x32, 0x59, 0x26, 0x43, 0xf3, 0x77, + 0x06, 0xc0, 0x22, 0x58, 0xe8, 0x17, 0x50, 0xb8, 0xe2, 0x01, 0xb1, 0x5c, 0x16, 0x8a, 0xa8, 0x04, + 0x9e, 0x3f, 0x2a, 0xec, 0x8d, 0x57, 0x3c, 0x20, 0xa7, 0x2c, 0x14, 0xdd, 0x14, 0xce, 0x5f, 0x45, + 0xdf, 0xe6, 0x1e, 0xe4, 0x63, 0x3e, 0xda, 0x81, 0x9c, 0xe4, 0x87, 0x35, 0x63, 0x2f, 0xb3, 0x5f, + 0xc0, 0x9a, 0x68, 0xe5, 0x61, 0x23, 0xe4, 0xb3, 0x60, 0x4c, 0xcd, 0x8f, 0x20, 0xa7, 0x22, 0x8e, + 0x6a, 0xb0, 0xe9, 0xdb, 0x42, 0xd0, 0xc0, 0x8b, 0x0a, 0x38, 0x26, 0xcd, 0x0a, 0x94, 0x56, 0x22, + 0x68, 0xfe, 0x23, 0x07, 0xa5, 0x15, 0x7f, 0x11, 0x83, 0xad, 0x4b, 0x2e, 0x94, 0x4f, 0x32, 0x80, + 0x91, 0x4f, 0xed, 0xc7, 0xc6, 0xaf, 0xd1, 0xd5, 0x60, 0xf2, 0xbb, 0x9b, 0xc2, 0xc5, 0xcb, 0x05, + 0x69, 0x0e, 0xa1, 0x30, 0x08, 0xf8, 0x35, 0x9b, 0x32, 0x31, 0x47, 0x1f, 0x43, 0xe9, 0x8a, 0x79, + 0x84, 0x5f, 0x59, 0x23, 0xea, 0xf0, 0x40, 0x5f, 0x9c, 0xc3, 0x5b, 0x9a, 0xd9, 0x52, 0x3c, 0xf4, + 0x11, 0x44, 0xb4, 0x65, 0x3b, 0x82, 0x06, 0xaa, 0xce, 0x73, 0xb8, 0xa8, 0x79, 0x87, 0x92, 0x65, + 0xfe, 0xc9, 0x80, 0x9d, 0x45, 0x8f, 0x1c, 0x92, 0x37, 0xb3, 0x50, 0x4c, 0xa9, 0x27, 0xd0, 0x10, + 0xaa, 0x0e, 0xbb, 0xa6, 0x32, 0x55, 0x49, 0xbf, 0x19, 0x0f, 0xe9, 0xb7, 0x6e, 0x0a, 0x57, 0x14, + 0xc2, 0x82, 0x85, 0xbe, 0x0b, 0x4f, 0x02, 0xea, 0xda, 0x82, 0xbd, 0xa3, 0xcb, 0xb8, 0xca, 0xae, + 0x6e, 0x0a, 0xa3, 0xf8, 0x70, 0xa1, 0xd2, 0xda, 0x02, 0xb0, 0x13, 0xab, 0xcc, 0xbf, 0xa7, 0xa1, + 0xb8, 0x14, 0x22, 0xf4, 0x02, 0x4a, 0x49, 0xf8, 0x55, 0xdb, 0x19, 0x8f, 0x6b, 0x3b, 0x1c, 0x67, + 0x51, 0x97, 0xc4, 0x2f, 0xa1, 0xe0, 0xc7, 0xa1, 0x8e, 0xa6, 0x43, 0xeb, 0xd1, 0x29, 0x4d, 0x92, + 0x86, 0x17, 0xa0, 0xe8, 0x37, 0x06, 0xbc, 0xb7, 0x88, 0x80, 0xb5, 0x70, 0x31, 0x1a, 0x1c, 0x5f, + 0x3c, 0xfa, 0xba, 0xdb, 0xb2, 0x89, 0x77, 0xdc, 0x5b, 0xb8, 0xad, 0x0d, 0xc8, 0xca, 0xc1, 0x12, + 0xff, 0xad, 0xef, 0xc2, 0xe6, 0x31, 0xa3, 0x2e, 0xe9, 0x91, 0x5b, 0x47, 0x7a, 0x1f, 0x8a, 0x03, + 0x3b, 0x10, 0x4c, 0x5e, 0xd5, 0x23, 0x68, 0x17, 0xc0, 0x0f, 0xf8, 0x1b, 0x3a, 0x16, 0x16, 0xd3, + 0x39, 0x2c, 0x28, 0x0f, 0x25, 0xa7, 0x47, 0x64, 0xf1, 0x49, 0xad, 0xd0, 0xb7, 0xc7, 0x54, 0x0a, + 0x64, 0x95, 0x40, 0x31, 0xe1, 0xf5, 0x48, 0xfd, 0x29, 0x94, 0x4f, 0x98, 0x47, 0x3a, 0xd7, 0x7e, + 0x40, 0xc3, 0x90, 0x71, 0xef, 0xd6, 0x6b, 0x7f, 0x6f, 0x40, 0xb5, 0x6d, 0x0b, 0x5b, 0xae, 0x36, + 0xda, 0xf7, 0xd5, 0x96, 0x42, 0x3f, 0x85, 0x2d, 0x3f, 0xb6, 0x45, 0xa2, 0xeb, 0xbc, 0x7f, 0x6b, + 0x5d, 0xd4, 0x96, 0x6c, 0xc7, 0x45, 0x7f, 0xc9, 0x91, 0xe7, 0x90, 0x7d, 0xcb, 0x3c, 0x12, 0x25, + 0xfa, 0xd3, 0x75, 0x18, 0xab, 0xe6, 0x62, 0xa5, 0x57, 0xff, 0x8b, 0x01, 0x4f, 0x8e, 0x5c, 0x3e, + 0x23, 0x43, 0xbd, 0x7c, 0x63, 0x1b, 0x31, 0xe4, 0x1d, 0xe6, 0x52, 0x2b, 0xa4, 0xf1, 0xac, 0xfb, + 0xfe, 0xda, 0xac, 0xfe, 0x37, 0x44, 0xe3, 0x98, 0xb9, 0x74, 0x48, 0x05, 0xde, 0x74, 0xf4, 0x07, + 0x6a, 0xc2, 0xce, 0x68, 0x2e, 0x68, 0x68, 0xb9, 0xb2, 0x8e, 0x2c, 0x9f, 0x06, 0x96, 0x3c, 0x52, + 0xd1, 0xcd, 0xe0, 0x6d, 0x75, 0x76, 0x2a, 0x8f, 0x06, 0x34, 0x90, 0xca, 0xe6, 0xfb, 0x32, 0xa7, + 0x5a, 0xb7, 0x0a, 0x99, 0x59, 0xe0, 0x46, 0xb1, 0x95, 0x9f, 0xf5, 0xbf, 0x1a, 0x50, 0x69, 0xb1, + 0xc9, 0x97, 0x33, 0x1a, 0xcc, 0x17, 0x56, 0x57, 0x84, 0x3d, 0x72, 0xa9, 0x15, 0x50, 0x87, 0x06, + 0xd4, 0x1b, 0xc7, 0x43, 0xed, 0x93, 0x75, 0xc6, 0xc7, 0x28, 0xe7, 0x52, 0x15, 0x97, 0x15, 0x02, + 0x8e, 0x01, 0x10, 0x06, 0xc4, 0x08, 0xf5, 0x04, 0x73, 0xe6, 0xcc, 0x9b, 0x58, 0x8e, 0x2c, 0xb2, + 0xb0, 0x96, 0x56, 0xbb, 0xe6, 0xe3, 0x75, 0xb0, 0x51, 0x39, 0xe2, 0xed, 0x25, 0x75, 0xc5, 0x0b, + 0xeb, 0xff, 0xce, 0x42, 0x29, 0x8a, 0xd6, 0x11, 0xf7, 0x1c, 0x36, 0x41, 0x3f, 0x87, 0x6d, 0x12, + 0xd7, 0x89, 0xc5, 0xb5, 0x3b, 0x51, 0x52, 0xbf, 0xbd, 0xee, 0x92, 0x9b, 0xc5, 0xd5, 0x4d, 0xe1, + 0x2a, 0xb9, 0x59, 0x70, 0x14, 0xde, 0x1b, 0xcb, 0x04, 0x59, 0xd1, 0x0b, 0x2b, 0xb9, 0x40, 0xf7, + 0x6b, 0xf3, 0x81, 0x99, 0xed, 0xa6, 0xf0, 0x93, 0xf1, 0x2d, 0x35, 0x73, 0x01, 0xdb, 0x23, 0x36, + 0xb1, 0x7e, 0x25, 0x63, 0x99, 0x5c, 0xa1, 0x17, 0xff, 0xb3, 0xfb, 0xc4, 0x7f, 0x01, 0x5f, 0x19, + 0xdd, 0x48, 0xac, 0x0d, 0x15, 0xf5, 0x88, 0xf3, 0x6d, 0xcf, 0x1a, 0xab, 0x88, 0xa9, 0x07, 0xd4, + 0x1d, 0xdb, 0x7e, 0x25, 0xc4, 0x8d, 0xf3, 0x08, 0x40, 0x93, 0xb8, 0x2c, 0x56, 0x68, 0xf3, 0x9f, + 0x06, 0x94, 0x57, 0x45, 0xd0, 0xe7, 0x00, 0xa1, 0xb0, 0x03, 0x61, 0x49, 0xd1, 0xa8, 0x92, 0xcc, + 0xc5, 0x85, 0xfa, 0x75, 0xa9, 0x71, 0xe5, 0xeb, 0x12, 0x17, 0x94, 0xb4, 0xa4, 0xd1, 0x67, 0x90, + 0xa7, 0x1e, 0xd1, 0x8a, 0xe9, 0x3b, 0x15, 0x37, 0xa9, 0x47, 0x94, 0xda, 0x2b, 0xf8, 0x84, 0x7a, + 0xaa, 0x82, 0xed, 0x99, 0xe0, 0x96, 0xcf, 0xfd, 0x99, 0xab, 0xde, 0xb6, 0x16, 0x77, 0xac, 0x9b, + 0x11, 0x90, 0xa1, 0xcd, 0xe3, 0xa7, 0x5a, 0xe1, 0x70, 0x26, 0xf8, 0x20, 0x11, 0xef, 0x3b, 0xab, + 0xae, 0x24, 0x63, 0xf2, 0xd7, 0x06, 0x14, 0xe3, 0x78, 0x9f, 0xd0, 0xf9, 0x57, 0xd2, 0x31, 0xbb, + 0x00, 0x01, 0xbf, 0xb2, 0xbc, 0xd9, 0x74, 0x14, 0x2d, 0xee, 0x0c, 0x2e, 0x04, 0xfc, 0xea, 0x4c, + 0x31, 0xea, 0x5f, 0x42, 0x65, 0xb9, 0xa8, 0xa4, 0x15, 0xef, 0x43, 0x41, 0x4d, 0x1b, 0xdf, 0x16, + 0x97, 0x51, 0x8f, 0xab, 0xf1, 0x33, 0xb0, 0xc5, 0xa5, 0x1c, 0xc6, 0x3a, 0x0b, 0xdc, 0x71, 0xe4, + 0x38, 0xd2, 0x80, 0x45, 0xc5, 0xeb, 0x2b, 0x56, 0xfd, 0x0c, 0xb6, 0x92, 0x46, 0x90, 0x78, 0xcf, + 0x01, 0x64, 0xcb, 0x89, 0xb9, 0xf5, 0x96, 0xce, 0x23, 0x87, 0x3e, 0x5c, 0x3b, 0x1b, 0xe9, 0x1c, + 0x17, 0xb4, 0xca, 0x09, 0x9d, 0xd7, 0xff, 0x65, 0x40, 0x46, 0xe2, 0xfc, 0x3f, 0x27, 0xf5, 0x8f, + 0x21, 0xab, 0xdc, 0xd3, 0x93, 0xe3, 0xd9, 0x1d, 0xd6, 0x34, 0xa4, 0xeb, 0x1d, 0x97, 0xaa, 0x0d, + 0xa8, 0x14, 0xcd, 0x73, 0xb9, 0xc2, 0x12, 0xa6, 0x5c, 0x37, 0x6a, 0xf2, 0x47, 0xeb, 0x46, 0x7e, + 0xa3, 0x2a, 0xa4, 0xa3, 0x75, 0x96, 0xe9, 0xa6, 0x70, 0x9a, 0x11, 0xb4, 0x13, 0x2d, 0x25, 0xd9, + 0xe9, 0x85, 0x6e, 0x4a, 0xaf, 0xa5, 0x56, 0x01, 0x36, 0x19, 0x51, 0x0f, 0xf3, 0xfa, 0x6f, 0xd3, + 0x50, 0xc0, 0x74, 0xcc, 0x03, 0x22, 0x1d, 0xbe, 0x80, 0xed, 0xd5, 0x49, 0xb1, 0x88, 0xdf, 0xb3, + 0xfb, 0x4e, 0x89, 0x13, 0x2a, 0xff, 0xb1, 0xa8, 0x8c, 0x6f, 0xe4, 0xb8, 0x0f, 0xa5, 0xc5, 0x84, + 0x93, 0xb0, 0xba, 0x2d, 0xf6, 0xef, 0x35, 0xdd, 0x34, 0xe6, 0x16, 0x59, 0x4e, 0xf2, 0x17, 0x50, + 0x5a, 0x8c, 0x1b, 0x09, 0x98, 0xb9, 0x3b, 0x3b, 0x4b, 0xa5, 0x2f, 0x9f, 0xa8, 0xa3, 0x05, 0x99, + 0x74, 0xc8, 0x25, 0x94, 0x56, 0xca, 0xfb, 0xc6, 0x5b, 0xc1, 0xb8, 0xf9, 0x56, 0xd8, 0x05, 0x50, + 0x66, 0xd1, 0xe5, 0xa7, 0x44, 0xc4, 0xe9, 0x11, 0xf4, 0x75, 0xc8, 0xeb, 0x06, 0x63, 0x44, 0x27, + 0x01, 0x6f, 0x2a, 0xba, 0x47, 0xea, 0x1d, 0xc8, 0x77, 0x54, 0xc9, 0xf5, 0x08, 0xfa, 0x1c, 0x72, + 0x6a, 0xb3, 0x44, 0xc1, 0xbe, 0xd7, 0x62, 0xd1, 0x1a, 0x9f, 0x0a, 0x80, 0xa5, 0x67, 0xaa, 0x09, + 0x5f, 0x3b, 0xed, 0x9d, 0x74, 0x4e, 0x7b, 0xdd, 0x7e, 0xbf, 0x6d, 0xbd, 0x38, 0x1b, 0x0e, 0x3a, + 0x47, 0xbd, 0xe3, 0x5e, 0xa7, 0x5d, 0x4d, 0xa1, 0x6d, 0x28, 0xbd, 0xec, 0xe0, 0x0b, 0xeb, 0xc5, + 0x99, 0x12, 0xb9, 0xa8, 0x1a, 0x68, 0x0b, 0xf2, 0x09, 0x95, 0x96, 0xd4, 0xa0, 0x3f, 0x1c, 0xf6, + 0x5a, 0xa7, 0x9d, 0x6a, 0x06, 0x01, 0x6c, 0x44, 0x27, 0x59, 0x54, 0x81, 0xa2, 0x52, 0x8d, 0x18, + 0xb9, 0xd6, 0x1f, 0x0c, 0xf8, 0x60, 0xcc, 0xa7, 0x6b, 0xec, 0x6c, 0x41, 0xdb, 0xf5, 0xa3, 0x02, + 0x18, 0x18, 0x3f, 0xfb, 0x51, 0x24, 0x39, 0xe1, 0xae, 0xed, 0x4d, 0x1a, 0x3c, 0x98, 0x34, 0x27, + 0xd4, 0x53, 0xc3, 0xb0, 0xa9, 0x8f, 0x6c, 0x9f, 0x85, 0xb7, 0xfd, 0x30, 0xf0, 0x43, 0xe2, 0xfa, + 0x7f, 0x4c, 0xd7, 0x7e, 0xa2, 0xf5, 0x55, 0xa9, 0x35, 0xda, 0xae, 0xdf, 0x78, 0x79, 0xd0, 0x92, + 0xc7, 0x7f, 0x8b, 0x8f, 0x5e, 0xab, 0xa3, 0xd7, 0x6d, 0xd7, 0x7f, 0xfd, 0x52, 0x6b, 0x8e, 0x36, + 0x14, 0xfe, 0xf7, 0xfe, 0x13, 0x00, 0x00, 0xff, 0xff, 0x78, 0x92, 0xfd, 0x26, 0x77, 0x10, 0x00, + 0x00, } diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/keys.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/keys.pb.go index b86585469..66728bea2 100644 --- a/vendor/google.golang.org/genproto/googleapis/spanner/v1/keys.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/keys.pb.go @@ -30,6 +30,11 @@ It has these top-level messages: ListSessionsResponse DeleteSessionRequest ExecuteSqlRequest + PartitionOptions + PartitionQueryRequest + PartitionReadRequest + Partition + PartitionResponse ReadRequest BeginTransactionRequest CommitRequest diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/mutation.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/mutation.pb.go index 8abef20cd..2f827795e 100644 --- a/vendor/google.golang.org/genproto/googleapis/spanner/v1/mutation.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/mutation.pb.go @@ -286,6 +286,8 @@ type Mutation_Delete struct { // Required. The table whose rows will be deleted. Table string `protobuf:"bytes,1,opt,name=table" json:"table,omitempty"` // Required. The primary keys of the rows within [table][google.spanner.v1.Mutation.Delete.table] to delete. + // Delete is idempotent. The transaction will succeed even if some or all + // rows do not exist. KeySet *KeySet `protobuf:"bytes,2,opt,name=key_set,json=keySet" json:"key_set,omitempty"` } diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/spanner.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/spanner.pb.go index 2db458519..27125f188 100644 --- a/vendor/google.golang.org/genproto/googleapis/spanner/v1/spanner.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/spanner.pb.go @@ -300,8 +300,14 @@ type ExecuteSqlRequest struct { // request that yielded this token. ResumeToken []byte `protobuf:"bytes,6,opt,name=resume_token,json=resumeToken,proto3" json:"resume_token,omitempty"` // Used to control the amount of debugging information returned in - // [ResultSetStats][google.spanner.v1.ResultSetStats]. + // [ResultSetStats][google.spanner.v1.ResultSetStats]. If [partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token] is set, [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] can only + // be set to [QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL]. QueryMode ExecuteSqlRequest_QueryMode `protobuf:"varint,7,opt,name=query_mode,json=queryMode,enum=google.spanner.v1.ExecuteSqlRequest_QueryMode" json:"query_mode,omitempty"` + // If present, results will be restricted to the specified partition + // previously created using PartitionQuery(). There must be an exact + // match for the values of fields common to this message and the + // PartitionQueryRequest message used to create this partition_token. + PartitionToken []byte `protobuf:"bytes,8,opt,name=partition_token,json=partitionToken,proto3" json:"partition_token,omitempty"` } func (m *ExecuteSqlRequest) Reset() { *m = ExecuteSqlRequest{} } @@ -358,6 +364,267 @@ func (m *ExecuteSqlRequest) GetQueryMode() ExecuteSqlRequest_QueryMode { return ExecuteSqlRequest_NORMAL } +func (m *ExecuteSqlRequest) GetPartitionToken() []byte { + if m != nil { + return m.PartitionToken + } + return nil +} + +// Options for a PartitionQueryRequest and +// PartitionReadRequest. +type PartitionOptions struct { + // The desired data size for each partition generated. The default for this + // option is currently 1 GiB. This is only a hint. The actual size of each + // partition may be smaller or larger than this size request. + PartitionSizeBytes int64 `protobuf:"varint,1,opt,name=partition_size_bytes,json=partitionSizeBytes" json:"partition_size_bytes,omitempty"` + // The desired maximum number of partitions to return. For example, this may + // be set to the number of workers available. The default for this option + // is currently 10,000. The maximum value is currently 200,000. This is only + // a hint. The actual number of partitions returned may be smaller or larger + // than this maximum count request. + MaxPartitions int64 `protobuf:"varint,2,opt,name=max_partitions,json=maxPartitions" json:"max_partitions,omitempty"` +} + +func (m *PartitionOptions) Reset() { *m = PartitionOptions{} } +func (m *PartitionOptions) String() string { return proto.CompactTextString(m) } +func (*PartitionOptions) ProtoMessage() {} +func (*PartitionOptions) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{7} } + +func (m *PartitionOptions) GetPartitionSizeBytes() int64 { + if m != nil { + return m.PartitionSizeBytes + } + return 0 +} + +func (m *PartitionOptions) GetMaxPartitions() int64 { + if m != nil { + return m.MaxPartitions + } + return 0 +} + +// The request for [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] +type PartitionQueryRequest struct { + // Required. The session used to create the partitions. + Session string `protobuf:"bytes,1,opt,name=session" json:"session,omitempty"` + // Read only snapshot transactions are supported, read/write and single use + // transactions are not. + Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` + // The query request to generate partitions for. The request will fail if + // the query is not root partitionable. The query plan of a root + // partitionable query has a single distributed union operator. A distributed + // union operator conceptually divides one or more tables into multiple + // splits, remotely evaluates a subquery independently on each split, and + // then unions all results. + Sql string `protobuf:"bytes,3,opt,name=sql" json:"sql,omitempty"` + // The SQL query string can contain parameter placeholders. A parameter + // placeholder consists of `'@'` followed by the parameter + // name. Parameter names consist of any combination of letters, + // numbers, and underscores. + // + // Parameters can appear anywhere that a literal value is expected. The same + // parameter name can be used more than once, for example: + // `"WHERE id > @msg_id AND id < @msg_id + 100"` + // + // It is an error to execute an SQL query with unbound parameters. + // + // Parameter values are specified using `params`, which is a JSON + // object whose keys are parameter names, and whose values are the + // corresponding parameter values. + Params *google_protobuf1.Struct `protobuf:"bytes,4,opt,name=params" json:"params,omitempty"` + // It is not always possible for Cloud Spanner to infer the right SQL type + // from a JSON value. For example, values of type `BYTES` and values + // of type `STRING` both appear in [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings. + // + // In these cases, `param_types` can be used to specify the exact + // SQL type for some or all of the SQL query parameters. See the + // definition of [Type][google.spanner.v1.Type] for more information + // about SQL types. + ParamTypes map[string]*Type `protobuf:"bytes,5,rep,name=param_types,json=paramTypes" json:"param_types,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Additional options that affect how many partitions are created. + PartitionOptions *PartitionOptions `protobuf:"bytes,6,opt,name=partition_options,json=partitionOptions" json:"partition_options,omitempty"` +} + +func (m *PartitionQueryRequest) Reset() { *m = PartitionQueryRequest{} } +func (m *PartitionQueryRequest) String() string { return proto.CompactTextString(m) } +func (*PartitionQueryRequest) ProtoMessage() {} +func (*PartitionQueryRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{8} } + +func (m *PartitionQueryRequest) GetSession() string { + if m != nil { + return m.Session + } + return "" +} + +func (m *PartitionQueryRequest) GetTransaction() *TransactionSelector { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *PartitionQueryRequest) GetSql() string { + if m != nil { + return m.Sql + } + return "" +} + +func (m *PartitionQueryRequest) GetParams() *google_protobuf1.Struct { + if m != nil { + return m.Params + } + return nil +} + +func (m *PartitionQueryRequest) GetParamTypes() map[string]*Type { + if m != nil { + return m.ParamTypes + } + return nil +} + +func (m *PartitionQueryRequest) GetPartitionOptions() *PartitionOptions { + if m != nil { + return m.PartitionOptions + } + return nil +} + +// The request for [PartitionRead][google.spanner.v1.Spanner.PartitionRead] +type PartitionReadRequest struct { + // Required. The session used to create the partitions. + Session string `protobuf:"bytes,1,opt,name=session" json:"session,omitempty"` + // Read only snapshot transactions are supported, read/write and single use + // transactions are not. + Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` + // Required. The name of the table in the database to be read. + Table string `protobuf:"bytes,3,opt,name=table" json:"table,omitempty"` + // If non-empty, the name of an index on [table][google.spanner.v1.PartitionReadRequest.table]. This index is + // used instead of the table primary key when interpreting [key_set][google.spanner.v1.PartitionReadRequest.key_set] + // and sorting result rows. See [key_set][google.spanner.v1.PartitionReadRequest.key_set] for further information. + Index string `protobuf:"bytes,4,opt,name=index" json:"index,omitempty"` + // The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be returned for each row matching + // this request. + Columns []string `protobuf:"bytes,5,rep,name=columns" json:"columns,omitempty"` + // Required. `key_set` identifies the rows to be yielded. `key_set` names the + // primary keys of the rows in [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless [index][google.spanner.v1.PartitionReadRequest.index] + // is present. If [index][google.spanner.v1.PartitionReadRequest.index] is present, then [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names + // index keys in [index][google.spanner.v1.PartitionReadRequest.index]. + // + // It is not an error for the `key_set` to name rows that do not + // exist in the database. Read yields nothing for nonexistent rows. + KeySet *KeySet `protobuf:"bytes,6,opt,name=key_set,json=keySet" json:"key_set,omitempty"` + // Additional options that affect how many partitions are created. + PartitionOptions *PartitionOptions `protobuf:"bytes,9,opt,name=partition_options,json=partitionOptions" json:"partition_options,omitempty"` +} + +func (m *PartitionReadRequest) Reset() { *m = PartitionReadRequest{} } +func (m *PartitionReadRequest) String() string { return proto.CompactTextString(m) } +func (*PartitionReadRequest) ProtoMessage() {} +func (*PartitionReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{9} } + +func (m *PartitionReadRequest) GetSession() string { + if m != nil { + return m.Session + } + return "" +} + +func (m *PartitionReadRequest) GetTransaction() *TransactionSelector { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *PartitionReadRequest) GetTable() string { + if m != nil { + return m.Table + } + return "" +} + +func (m *PartitionReadRequest) GetIndex() string { + if m != nil { + return m.Index + } + return "" +} + +func (m *PartitionReadRequest) GetColumns() []string { + if m != nil { + return m.Columns + } + return nil +} + +func (m *PartitionReadRequest) GetKeySet() *KeySet { + if m != nil { + return m.KeySet + } + return nil +} + +func (m *PartitionReadRequest) GetPartitionOptions() *PartitionOptions { + if m != nil { + return m.PartitionOptions + } + return nil +} + +// Information returned for each partition returned in a +// PartitionResponse. +type Partition struct { + // This token can be passed to Read, StreamingRead, ExecuteSql, or + // ExecuteStreamingSql requests to restrict the results to those identified by + // this partition token. + PartitionToken []byte `protobuf:"bytes,1,opt,name=partition_token,json=partitionToken,proto3" json:"partition_token,omitempty"` +} + +func (m *Partition) Reset() { *m = Partition{} } +func (m *Partition) String() string { return proto.CompactTextString(m) } +func (*Partition) ProtoMessage() {} +func (*Partition) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{10} } + +func (m *Partition) GetPartitionToken() []byte { + if m != nil { + return m.PartitionToken + } + return nil +} + +// The response for [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] +// or [PartitionRead][google.spanner.v1.Spanner.PartitionRead] +type PartitionResponse struct { + // Partitions created by this request. + Partitions []*Partition `protobuf:"bytes,1,rep,name=partitions" json:"partitions,omitempty"` + // Transaction created by this request. + Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` +} + +func (m *PartitionResponse) Reset() { *m = PartitionResponse{} } +func (m *PartitionResponse) String() string { return proto.CompactTextString(m) } +func (*PartitionResponse) ProtoMessage() {} +func (*PartitionResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{11} } + +func (m *PartitionResponse) GetPartitions() []*Partition { + if m != nil { + return m.Partitions + } + return nil +} + +func (m *PartitionResponse) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + // The request for [Read][google.spanner.v1.Spanner.Read] and // [StreamingRead][google.spanner.v1.Spanner.StreamingRead]. type ReadRequest struct { @@ -380,14 +647,17 @@ type ReadRequest struct { // is present. If [index][google.spanner.v1.ReadRequest.index] is present, then [key_set][google.spanner.v1.ReadRequest.key_set] instead names // index keys in [index][google.spanner.v1.ReadRequest.index]. // - // Rows are yielded in table primary key order (if [index][google.spanner.v1.ReadRequest.index] is empty) - // or index key order (if [index][google.spanner.v1.ReadRequest.index] is non-empty). + // If the [partition_token][google.spanner.v1.ReadRequest.partition_token] field is empty, rows are yielded + // in table primary key order (if [index][google.spanner.v1.ReadRequest.index] is empty) or index key order + // (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the [partition_token][google.spanner.v1.ReadRequest.partition_token] field is not + // empty, rows will be yielded in an unspecified order. // // It is not an error for the `key_set` to name rows that do not // exist in the database. Read yields nothing for nonexistent rows. KeySet *KeySet `protobuf:"bytes,6,opt,name=key_set,json=keySet" json:"key_set,omitempty"` // If greater than zero, only the first `limit` rows are yielded. If `limit` - // is zero, the default is no limit. + // is zero, the default is no limit. A limit cannot be specified if + // `partition_token` is set. Limit int64 `protobuf:"varint,8,opt,name=limit" json:"limit,omitempty"` // If this request is resuming a previously interrupted read, // `resume_token` should be copied from the last @@ -396,12 +666,17 @@ type ReadRequest struct { // rest of the request parameters must exactly match the request // that yielded this token. ResumeToken []byte `protobuf:"bytes,9,opt,name=resume_token,json=resumeToken,proto3" json:"resume_token,omitempty"` + // If present, results will be restricted to the specified partition + // previously created using PartitionRead(). There must be an exact + // match for the values of fields common to this message and the + // PartitionReadRequest message used to create this partition_token. + PartitionToken []byte `protobuf:"bytes,10,opt,name=partition_token,json=partitionToken,proto3" json:"partition_token,omitempty"` } func (m *ReadRequest) Reset() { *m = ReadRequest{} } func (m *ReadRequest) String() string { return proto.CompactTextString(m) } func (*ReadRequest) ProtoMessage() {} -func (*ReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{7} } +func (*ReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{12} } func (m *ReadRequest) GetSession() string { if m != nil { @@ -459,6 +734,13 @@ func (m *ReadRequest) GetResumeToken() []byte { return nil } +func (m *ReadRequest) GetPartitionToken() []byte { + if m != nil { + return m.PartitionToken + } + return nil +} + // The request for [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. type BeginTransactionRequest struct { // Required. The session in which the transaction runs. @@ -470,7 +752,7 @@ type BeginTransactionRequest struct { func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } func (*BeginTransactionRequest) ProtoMessage() {} -func (*BeginTransactionRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{8} } +func (*BeginTransactionRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{13} } func (m *BeginTransactionRequest) GetSession() string { if m != nil { @@ -505,7 +787,7 @@ type CommitRequest struct { func (m *CommitRequest) Reset() { *m = CommitRequest{} } func (m *CommitRequest) String() string { return proto.CompactTextString(m) } func (*CommitRequest) ProtoMessage() {} -func (*CommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{9} } +func (*CommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{14} } type isCommitRequest_Transaction interface { isCommitRequest_Transaction() @@ -635,7 +917,7 @@ type CommitResponse struct { func (m *CommitResponse) Reset() { *m = CommitResponse{} } func (m *CommitResponse) String() string { return proto.CompactTextString(m) } func (*CommitResponse) ProtoMessage() {} -func (*CommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{10} } +func (*CommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{15} } func (m *CommitResponse) GetCommitTimestamp() *google_protobuf3.Timestamp { if m != nil { @@ -655,7 +937,7 @@ type RollbackRequest struct { func (m *RollbackRequest) Reset() { *m = RollbackRequest{} } func (m *RollbackRequest) String() string { return proto.CompactTextString(m) } func (*RollbackRequest) ProtoMessage() {} -func (*RollbackRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{11} } +func (*RollbackRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{16} } func (m *RollbackRequest) GetSession() string { if m != nil { @@ -679,6 +961,11 @@ func init() { proto.RegisterType((*ListSessionsResponse)(nil), "google.spanner.v1.ListSessionsResponse") proto.RegisterType((*DeleteSessionRequest)(nil), "google.spanner.v1.DeleteSessionRequest") proto.RegisterType((*ExecuteSqlRequest)(nil), "google.spanner.v1.ExecuteSqlRequest") + proto.RegisterType((*PartitionOptions)(nil), "google.spanner.v1.PartitionOptions") + proto.RegisterType((*PartitionQueryRequest)(nil), "google.spanner.v1.PartitionQueryRequest") + proto.RegisterType((*PartitionReadRequest)(nil), "google.spanner.v1.PartitionReadRequest") + proto.RegisterType((*Partition)(nil), "google.spanner.v1.Partition") + proto.RegisterType((*PartitionResponse)(nil), "google.spanner.v1.PartitionResponse") proto.RegisterType((*ReadRequest)(nil), "google.spanner.v1.ReadRequest") proto.RegisterType((*BeginTransactionRequest)(nil), "google.spanner.v1.BeginTransactionRequest") proto.RegisterType((*CommitRequest)(nil), "google.spanner.v1.CommitRequest") @@ -787,6 +1074,24 @@ type SpannerClient interface { // transaction was already aborted, or the transaction is not // found. `Rollback` never returns `ABORTED`. Rollback(ctx context.Context, in *RollbackRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) + // Creates a set of partition tokens that can be used to execute a query + // operation in parallel. Each of the returned partition tokens can be used + // by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to specify a subset + // of the query result to read. The same session and read-only transaction + // must be used by the PartitionQueryRequest used to create the + // partition tokens and the ExecuteSqlRequests that use the partition tokens. + // Partition tokens become invalid when the session used to create them + // is deleted or begins a new transaction. + PartitionQuery(ctx context.Context, in *PartitionQueryRequest, opts ...grpc.CallOption) (*PartitionResponse, error) + // Creates a set of partition tokens that can be used to execute a read + // operation in parallel. Each of the returned partition tokens can be used + // by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a subset of the read + // result to read. The same session and read-only transaction must be used by + // the PartitionReadRequest used to create the partition tokens and the + // ReadRequests that use the partition tokens. + // Partition tokens become invalid when the session used to create them + // is deleted or begins a new transaction. + PartitionRead(ctx context.Context, in *PartitionReadRequest, opts ...grpc.CallOption) (*PartitionResponse, error) } type spannerClient struct { @@ -942,6 +1247,24 @@ func (c *spannerClient) Rollback(ctx context.Context, in *RollbackRequest, opts return out, nil } +func (c *spannerClient) PartitionQuery(ctx context.Context, in *PartitionQueryRequest, opts ...grpc.CallOption) (*PartitionResponse, error) { + out := new(PartitionResponse) + err := grpc.Invoke(ctx, "/google.spanner.v1.Spanner/PartitionQuery", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spannerClient) PartitionRead(ctx context.Context, in *PartitionReadRequest, opts ...grpc.CallOption) (*PartitionResponse, error) { + out := new(PartitionResponse) + err := grpc.Invoke(ctx, "/google.spanner.v1.Spanner/PartitionRead", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // Server API for Spanner service type SpannerServer interface { @@ -1034,6 +1357,24 @@ type SpannerServer interface { // transaction was already aborted, or the transaction is not // found. `Rollback` never returns `ABORTED`. Rollback(context.Context, *RollbackRequest) (*google_protobuf4.Empty, error) + // Creates a set of partition tokens that can be used to execute a query + // operation in parallel. Each of the returned partition tokens can be used + // by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to specify a subset + // of the query result to read. The same session and read-only transaction + // must be used by the PartitionQueryRequest used to create the + // partition tokens and the ExecuteSqlRequests that use the partition tokens. + // Partition tokens become invalid when the session used to create them + // is deleted or begins a new transaction. + PartitionQuery(context.Context, *PartitionQueryRequest) (*PartitionResponse, error) + // Creates a set of partition tokens that can be used to execute a read + // operation in parallel. Each of the returned partition tokens can be used + // by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a subset of the read + // result to read. The same session and read-only transaction must be used by + // the PartitionReadRequest used to create the partition tokens and the + // ReadRequests that use the partition tokens. + // Partition tokens become invalid when the session used to create them + // is deleted or begins a new transaction. + PartitionRead(context.Context, *PartitionReadRequest) (*PartitionResponse, error) } func RegisterSpannerServer(s *grpc.Server, srv SpannerServer) { @@ -1244,6 +1585,42 @@ func _Spanner_Rollback_Handler(srv interface{}, ctx context.Context, dec func(in return interceptor(ctx, in, info, handler) } +func _Spanner_PartitionQuery_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PartitionQueryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpannerServer).PartitionQuery(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.v1.Spanner/PartitionQuery", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpannerServer).PartitionQuery(ctx, req.(*PartitionQueryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Spanner_PartitionRead_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PartitionReadRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpannerServer).PartitionRead(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.v1.Spanner/PartitionRead", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpannerServer).PartitionRead(ctx, req.(*PartitionReadRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Spanner_serviceDesc = grpc.ServiceDesc{ ServiceName: "google.spanner.v1.Spanner", HandlerType: (*SpannerServer)(nil), @@ -1284,6 +1661,14 @@ var _Spanner_serviceDesc = grpc.ServiceDesc{ MethodName: "Rollback", Handler: _Spanner_Rollback_Handler, }, + { + MethodName: "PartitionQuery", + Handler: _Spanner_PartitionQuery_Handler, + }, + { + MethodName: "PartitionRead", + Handler: _Spanner_PartitionRead_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -1303,94 +1688,109 @@ var _Spanner_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("google/spanner/v1/spanner.proto", fileDescriptor4) } var fileDescriptor4 = []byte{ - // 1416 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4f, 0x6f, 0x13, 0x47, - 0x14, 0x67, 0x9d, 0xc4, 0x89, 0x9f, 0xf3, 0x8f, 0x21, 0x0d, 0xc6, 0x50, 0x30, 0x4b, 0x21, 0x91, - 0xa5, 0xda, 0x4d, 0x8a, 0x2a, 0x30, 0x6d, 0x81, 0x80, 0x81, 0x08, 0x87, 0x98, 0x75, 0x00, 0x09, - 0x51, 0x59, 0x63, 0x7b, 0x70, 0xb7, 0xd9, 0x7f, 0xd9, 0x19, 0x47, 0x31, 0x15, 0x97, 0x4a, 0x3d, - 0xf5, 0xd2, 0x52, 0x55, 0x3d, 0xb4, 0xb7, 0xf6, 0x54, 0x71, 0xef, 0xad, 0x1f, 0xa0, 0xd7, 0x7e, - 0x85, 0x7e, 0x8b, 0x5e, 0xaa, 0xf9, 0xe7, 0x6c, 0xec, 0xc5, 0x09, 0x72, 0xd5, 0x93, 0x67, 0xe6, - 0xbd, 0x79, 0xef, 0xb7, 0xbf, 0xf7, 0x66, 0x7e, 0x63, 0x38, 0xd7, 0xf6, 0xfd, 0xb6, 0x43, 0x8a, - 0x34, 0xc0, 0x9e, 0x47, 0xc2, 0xe2, 0xee, 0x8a, 0x1e, 0x16, 0x82, 0xd0, 0x67, 0x3e, 0x3a, 0x2e, - 0x1d, 0x0a, 0x7a, 0x75, 0x77, 0x25, 0x7b, 0x46, 0xed, 0xc1, 0x81, 0x5d, 0xc4, 0x9e, 0xe7, 0x33, - 0xcc, 0x6c, 0xdf, 0xa3, 0x72, 0x43, 0xf6, 0xb4, 0xb2, 0x8a, 0x59, 0xa3, 0xf3, 0xbc, 0x48, 0xdc, - 0x80, 0x75, 0x95, 0xf1, 0x4c, 0xbf, 0x91, 0xb2, 0xb0, 0xd3, 0x64, 0xca, 0x7a, 0xae, 0xdf, 0xca, - 0x6c, 0x97, 0x50, 0x86, 0xdd, 0xa0, 0x6f, 0x7b, 0x04, 0xed, 0x36, 0xe9, 0xea, 0xcc, 0xb9, 0x41, - 0xab, 0xdb, 0x91, 0xe0, 0x94, 0x87, 0x39, 0xe8, 0x11, 0x12, 0xda, 0x71, 0x58, 0x9d, 0x12, 0x0d, - 0xe2, 0xc2, 0xa0, 0x0f, 0x0b, 0xb1, 0x47, 0x71, 0x33, 0x12, 0x28, 0x06, 0x08, 0xeb, 0x06, 0x44, - 0x5a, 0xcd, 0xcf, 0x61, 0xe1, 0x56, 0x48, 0x30, 0x23, 0x35, 0x42, 0xa9, 0xed, 0x7b, 0x16, 0xd9, - 0xe9, 0x10, 0xca, 0x50, 0x16, 0xa6, 0x5a, 0x98, 0xe1, 0x06, 0xa6, 0x24, 0x63, 0xe4, 0x8c, 0xe5, - 0x94, 0xd5, 0x9b, 0xa3, 0xcb, 0x30, 0x49, 0xa5, 0x77, 0x26, 0x91, 0x33, 0x96, 0xd3, 0xab, 0xd9, - 0xc2, 0x00, 0xf3, 0x05, 0x1d, 0x4f, 0xbb, 0x9a, 0xaf, 0x13, 0x30, 0xa9, 0x16, 0x11, 0x82, 0x71, - 0x0f, 0xbb, 0x3a, 0xb2, 0x18, 0xa3, 0x4f, 0x21, 0xe9, 0xe0, 0x06, 0x71, 0x68, 0x26, 0x91, 0x1b, - 0x5b, 0x4e, 0xaf, 0x5e, 0x7a, 0x73, 0xd0, 0x42, 0x45, 0x38, 0x96, 0x3d, 0x16, 0x76, 0x2d, 0xb5, - 0x0b, 0x5d, 0x83, 0x74, 0x53, 0x7c, 0x49, 0x9d, 0x97, 0x22, 0x33, 0x76, 0x10, 0x99, 0xae, 0x53, - 0x61, 0x4b, 0xd7, 0xc9, 0x02, 0xe9, 0xce, 0x17, 0xd0, 0x23, 0x38, 0x85, 0x83, 0x20, 0xf4, 0xf7, - 0x6c, 0x97, 0x47, 0x70, 0x30, 0x65, 0xf5, 0x0e, 0x55, 0xa1, 0xc6, 0x0f, 0x0d, 0xb5, 0x18, 0xd9, - 0x5c, 0xc1, 0x94, 0x3d, 0xa2, 0x22, 0x6c, 0xf6, 0x2a, 0xa4, 0x23, 0x50, 0xd1, 0x3c, 0x8c, 0x6d, - 0x93, 0xae, 0xfa, 0x6a, 0x3e, 0x44, 0x0b, 0x30, 0xb1, 0x8b, 0x9d, 0x0e, 0x11, 0x44, 0xa6, 0x2c, - 0x39, 0x29, 0x25, 0xae, 0x18, 0xe6, 0x12, 0x1c, 0xbf, 0x4b, 0x58, 0x5f, 0x55, 0x62, 0x78, 0x33, - 0xbf, 0x36, 0xe0, 0x44, 0xc5, 0xa6, 0xda, 0x95, 0x1e, 0xa5, 0x82, 0xa7, 0x21, 0x15, 0xe0, 0x36, - 0xa9, 0x53, 0xfb, 0x85, 0x4c, 0x3d, 0x61, 0x4d, 0xf1, 0x85, 0x9a, 0xfd, 0x82, 0xa0, 0x77, 0x01, - 0x84, 0x91, 0xf9, 0xdb, 0xc4, 0x13, 0x3c, 0xa6, 0x2c, 0xe1, 0xbe, 0xc5, 0x17, 0xd0, 0x22, 0x24, - 0x9f, 0xdb, 0x0e, 0x23, 0xa1, 0xe0, 0x25, 0x65, 0xa9, 0x99, 0xb9, 0x0b, 0x0b, 0x07, 0x61, 0xd0, - 0xc0, 0xf7, 0x28, 0x41, 0x1f, 0xc1, 0x94, 0x6a, 0x01, 0x9a, 0x31, 0x44, 0x65, 0x87, 0xb5, 0x4b, - 0xcf, 0x17, 0x5d, 0x82, 0x39, 0x8f, 0xec, 0xb1, 0x7a, 0x04, 0x8b, 0x24, 0x69, 0x86, 0x2f, 0x57, - 0x35, 0x1e, 0x33, 0x0f, 0x0b, 0xb7, 0x89, 0x43, 0x06, 0x3a, 0x38, 0x8e, 0xab, 0x6f, 0xc6, 0xe1, - 0x78, 0x79, 0x8f, 0x34, 0x3b, 0x8c, 0xd4, 0x76, 0x1c, 0xed, 0x99, 0xd9, 0xef, 0x67, 0xe9, 0xac, - 0xa7, 0xe8, 0x1e, 0xa4, 0x23, 0x07, 0x4a, 0x75, 0x7b, 0x5c, 0x63, 0x6e, 0xed, 0x7b, 0xd5, 0x88, - 0x43, 0x9a, 0xcc, 0x0f, 0xad, 0xe8, 0x56, 0x5e, 0x7a, 0xba, 0xe3, 0x28, 0x36, 0xf9, 0x10, 0x15, - 0x21, 0x19, 0xe0, 0x10, 0xbb, 0x54, 0xf5, 0xd7, 0xc9, 0x81, 0xfe, 0xaa, 0x89, 0x0b, 0xc7, 0x52, - 0x6e, 0xe8, 0x11, 0xa4, 0xc5, 0xa8, 0xce, 0x8f, 0x2f, 0xcd, 0x4c, 0x08, 0x2e, 0x2f, 0xc7, 0x80, - 0x19, 0xf8, 0xc2, 0x42, 0x95, 0xef, 0xdb, 0xe2, 0xdb, 0xe4, 0x99, 0x81, 0xa0, 0xb7, 0x80, 0xce, - 0xc3, 0x34, 0xbf, 0x58, 0x5c, 0x4d, 0x72, 0x32, 0x67, 0x2c, 0x4f, 0x5b, 0x69, 0xb9, 0x26, 0x4b, - 0xbe, 0x01, 0xb0, 0xd3, 0x21, 0x61, 0xb7, 0xee, 0xfa, 0x2d, 0x92, 0x99, 0xcc, 0x19, 0xcb, 0xb3, - 0xab, 0x85, 0x23, 0x25, 0x7e, 0xc8, 0xb7, 0x6d, 0xf8, 0x2d, 0x62, 0xa5, 0x76, 0xf4, 0x30, 0xfb, - 0x18, 0xe6, 0xfa, 0x00, 0xc5, 0x9c, 0x8c, 0xf7, 0xa3, 0x27, 0x23, 0xc2, 0x4e, 0x94, 0xf4, 0x6e, - 0x40, 0xa2, 0x47, 0xa6, 0x00, 0xa9, 0x5e, 0x3e, 0x04, 0x90, 0x7c, 0xb0, 0x69, 0x6d, 0xdc, 0xac, - 0xcc, 0x1f, 0x43, 0x53, 0x30, 0x5e, 0xad, 0xdc, 0x7c, 0x30, 0x6f, 0xa0, 0x34, 0x4c, 0x56, 0xad, - 0xcd, 0x3b, 0xeb, 0x95, 0xf2, 0x7c, 0xc2, 0xfc, 0x35, 0x01, 0x69, 0x8b, 0xe0, 0xd6, 0xff, 0xd9, - 0x07, 0x0b, 0x30, 0xc1, 0x70, 0xc3, 0x21, 0xaa, 0x13, 0xe4, 0x84, 0xaf, 0xda, 0x5e, 0x8b, 0xec, - 0xa9, 0x23, 0x25, 0x27, 0x1c, 0x4f, 0xd3, 0x77, 0x3a, 0xae, 0x27, 0x8b, 0x9d, 0xb2, 0xf4, 0x14, - 0xad, 0xc2, 0xe4, 0x36, 0xe9, 0x72, 0x25, 0x10, 0xe5, 0x4a, 0xaf, 0x9e, 0x8a, 0xc1, 0x72, 0x9f, - 0x74, 0x6b, 0x84, 0x59, 0xc9, 0x6d, 0xf1, 0xcb, 0x73, 0x38, 0xb6, 0x6b, 0xb3, 0xcc, 0x54, 0xce, - 0x58, 0x1e, 0xb3, 0xe4, 0x64, 0xa0, 0xfa, 0xa9, 0x81, 0xea, 0x9b, 0x0c, 0x4e, 0xae, 0x91, 0xb6, - 0xed, 0x45, 0xbe, 0xed, 0x70, 0xc6, 0xae, 0xc3, 0xa4, 0x1f, 0x08, 0xad, 0x55, 0x6c, 0x5d, 0x1c, - 0xce, 0xd6, 0xa6, 0x74, 0xb6, 0xf4, 0x2e, 0xf3, 0x1f, 0x03, 0x66, 0x6e, 0xf9, 0xae, 0x6b, 0xb3, - 0xc3, 0x93, 0x2d, 0xc1, 0x6c, 0x84, 0xe3, 0xba, 0xdd, 0x12, 0x39, 0xa7, 0xef, 0x1d, 0xb3, 0x66, - 0x22, 0xeb, 0xeb, 0x2d, 0xf4, 0x19, 0x2c, 0x52, 0xdb, 0x6b, 0x3b, 0x44, 0x5e, 0xee, 0x91, 0x92, - 0x8e, 0xbd, 0x05, 0xc8, 0x7b, 0xc7, 0xac, 0x05, 0x19, 0x86, 0xdf, 0xf3, 0x91, 0xe2, 0x5e, 0x85, - 0x94, 0x56, 0x71, 0x7e, 0xaa, 0xf9, 0xf9, 0x3c, 0x1d, 0x13, 0x71, 0x43, 0xf9, 0x58, 0xfb, 0xde, - 0x6b, 0x33, 0x07, 0x3a, 0xcc, 0x7c, 0x02, 0xb3, 0xfa, 0xe3, 0xd5, 0x35, 0x5a, 0x86, 0xf9, 0xa6, - 0x58, 0xa9, 0xf7, 0x5e, 0x1a, 0x82, 0x86, 0xe1, 0xc2, 0x34, 0x27, 0xf7, 0xf4, 0x16, 0x4c, 0x0b, - 0xe6, 0x2c, 0xdf, 0x71, 0x1a, 0xb8, 0xb9, 0x7d, 0x38, 0xaf, 0x17, 0xe3, 0x79, 0xed, 0x63, 0x75, - 0xf5, 0xd5, 0x2c, 0x4c, 0xd6, 0xe4, 0xe7, 0xa1, 0x9f, 0x78, 0xd9, 0xa2, 0x0f, 0x0a, 0xb4, 0x14, - 0xc3, 0x40, 0xdc, 0x93, 0x23, 0x3b, 0x44, 0x16, 0xcc, 0xf2, 0x57, 0x7f, 0xfd, 0xfd, 0x7d, 0xe2, - 0xba, 0x59, 0xe2, 0xcf, 0x97, 0x2f, 0xb5, 0x8e, 0x7d, 0x12, 0x84, 0xfe, 0x17, 0xa4, 0xc9, 0x68, - 0x31, 0x5f, 0xb4, 0x3d, 0xca, 0xb0, 0xd7, 0x24, 0x7c, 0xac, 0xed, 0xb4, 0x98, 0x7f, 0x59, 0xd4, - 0x82, 0x52, 0x32, 0xf2, 0xe8, 0x5b, 0x03, 0x60, 0x5f, 0x55, 0xd1, 0x7b, 0x31, 0x19, 0x07, 0x44, - 0x77, 0x28, 0xae, 0x1b, 0x02, 0x57, 0x09, 0x5d, 0x11, 0xb8, 0xb8, 0xc6, 0x1c, 0x01, 0x53, 0x0f, - 0x52, 0x31, 0xff, 0x12, 0xfd, 0x62, 0xc0, 0x74, 0x54, 0x37, 0x51, 0xdc, 0xb5, 0x12, 0xa3, 0xef, - 0xd9, 0xa5, 0x43, 0xfd, 0x64, 0xe7, 0x98, 0x6b, 0x02, 0xe3, 0xc7, 0x68, 0x04, 0xee, 0xd0, 0x2b, - 0x03, 0x66, 0x0e, 0xa8, 0x6c, 0x6c, 0x59, 0xe3, 0x74, 0x38, 0xbb, 0x38, 0xd0, 0x9e, 0x65, 0xfe, - 0xca, 0xd6, 0xd4, 0xe5, 0x47, 0xa2, 0x0e, 0xf6, 0x25, 0x27, 0xb6, 0x9a, 0x03, 0x8a, 0x94, 0x3d, - 0x13, 0xe3, 0x65, 0x89, 0x87, 0x75, 0x8d, 0x30, 0xf3, 0xa1, 0x00, 0x75, 0xdf, 0xbc, 0x23, 0x40, - 0xa9, 0x64, 0x6f, 0x89, 0xab, 0x44, 0x7a, 0x49, 0x79, 0xcf, 0xfd, 0x61, 0xc0, 0x09, 0x0d, 0x83, - 0x85, 0x04, 0xbb, 0xb6, 0xd7, 0x3e, 0x3a, 0xdc, 0x0b, 0x31, 0x5e, 0x55, 0x1c, 0x32, 0x1b, 0x3b, - 0xfb, 0xa8, 0x9f, 0x0a, 0xd4, 0x5b, 0xe6, 0xe6, 0x7f, 0x81, 0x3a, 0x82, 0xb1, 0x64, 0xe4, 0x3f, - 0x30, 0xd0, 0x77, 0x06, 0x8c, 0x73, 0x99, 0x44, 0x67, 0x63, 0xa9, 0xeb, 0xe9, 0xe7, 0x21, 0xd4, - 0xde, 0x17, 0x20, 0xcb, 0xe6, 0x8d, 0x51, 0x40, 0x86, 0x04, 0xb7, 0x38, 0xa9, 0xaf, 0x0d, 0x98, - 0xe9, 0x21, 0x3d, 0x12, 0xb8, 0x23, 0x11, 0xb9, 0x25, 0x30, 0x3e, 0x30, 0xd7, 0x47, 0xc1, 0x48, - 0xa3, 0xb8, 0x24, 0x85, 0xbf, 0x1b, 0x30, 0xdf, 0xaf, 0xa1, 0x28, 0x1f, 0x83, 0xe8, 0x0d, 0x42, - 0x9b, 0x3d, 0x3b, 0x5c, 0x98, 0xcc, 0x27, 0x02, 0xf8, 0x43, 0xb3, 0x32, 0x0a, 0xf0, 0x46, 0x5f, - 0x72, 0x4e, 0xf4, 0xcf, 0x06, 0x24, 0xa5, 0x12, 0xa1, 0x5c, 0xdc, 0x45, 0x1e, 0x55, 0xe8, 0xec, - 0xf9, 0x21, 0x1e, 0xea, 0x32, 0xda, 0x10, 0x40, 0xef, 0x9a, 0x6b, 0xa3, 0x00, 0x95, 0xa2, 0xc6, - 0xe1, 0xfd, 0x68, 0xc0, 0x94, 0xd6, 0x33, 0x64, 0xc6, 0xb5, 0xc0, 0x41, 0xb1, 0x7b, 0xe3, 0x6d, - 0xb4, 0x29, 0x70, 0xad, 0x9b, 0xb7, 0x47, 0xea, 0x4e, 0x95, 0xac, 0x64, 0xe4, 0xd7, 0x7e, 0x30, - 0xe0, 0x9d, 0xa6, 0xef, 0x0e, 0x42, 0x5a, 0x9b, 0x56, 0x5a, 0x59, 0xe5, 0x08, 0xaa, 0xc6, 0xd3, - 0x2b, 0xca, 0xa5, 0xed, 0x3b, 0xd8, 0x6b, 0x17, 0xfc, 0xb0, 0x5d, 0x6c, 0x13, 0x4f, 0xe0, 0x2b, - 0x4a, 0x13, 0x0e, 0x6c, 0x1a, 0xf9, 0xff, 0x7e, 0x4d, 0x0d, 0x7f, 0x4b, 0x9c, 0xbc, 0x2b, 0xb7, - 0xde, 0x72, 0xfc, 0x4e, 0xab, 0xa0, 0xe2, 0x16, 0x1e, 0xaf, 0xfc, 0xa9, 0x2d, 0xcf, 0x84, 0xe5, - 0x99, 0xb2, 0x3c, 0x7b, 0xbc, 0xd2, 0x48, 0x8a, 0xc0, 0x1f, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, - 0x57, 0x35, 0x15, 0x7f, 0x4e, 0x11, 0x00, 0x00, + // 1657 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x58, 0xdd, 0x6f, 0x53, 0xc9, + 0x15, 0xe7, 0xda, 0x89, 0x13, 0x1f, 0xc7, 0x89, 0x33, 0x98, 0x60, 0x0c, 0x85, 0x70, 0xf9, 0x48, + 0x64, 0xa9, 0x36, 0x49, 0x51, 0x15, 0x02, 0x2d, 0x10, 0x08, 0x90, 0x92, 0x10, 0x73, 0x9d, 0x80, + 0x8a, 0xa8, 0xac, 0xb1, 0x3d, 0xb8, 0xb7, 0xb9, 0x5f, 0xb9, 0x33, 0x8e, 0x62, 0x2a, 0x5e, 0x5a, + 0xf5, 0xbd, 0x2d, 0xaa, 0xfa, 0xd0, 0xbe, 0xed, 0xdb, 0x8a, 0x47, 0x24, 0xde, 0xf6, 0x65, 0xa5, + 0x7d, 0x58, 0x69, 0x9f, 0xf6, 0x5f, 0xd8, 0xff, 0x62, 0x5f, 0x56, 0x33, 0xf7, 0xc3, 0xd7, 0xf6, + 0xc4, 0x31, 0x32, 0xbb, 0xd2, 0x6a, 0x9f, 0x3c, 0x33, 0xe7, 0xcc, 0x9c, 0xdf, 0xfd, 0x9d, 0x33, + 0x73, 0xce, 0x31, 0x5c, 0x68, 0xda, 0x76, 0xd3, 0x20, 0x25, 0xea, 0x60, 0xcb, 0x22, 0x6e, 0xe9, + 0x60, 0x29, 0x18, 0x16, 0x1d, 0xd7, 0x66, 0x36, 0x9a, 0xf5, 0x14, 0x8a, 0xc1, 0xea, 0xc1, 0x52, + 0xfe, 0x9c, 0xbf, 0x07, 0x3b, 0x7a, 0x09, 0x5b, 0x96, 0xcd, 0x30, 0xd3, 0x6d, 0x8b, 0x7a, 0x1b, + 0xf2, 0x67, 0x7d, 0xa9, 0x98, 0xd5, 0x5a, 0xaf, 0x4a, 0xc4, 0x74, 0x58, 0xdb, 0x17, 0x9e, 0xeb, + 0x15, 0x52, 0xe6, 0xb6, 0xea, 0xcc, 0x97, 0x5e, 0xe8, 0x95, 0x32, 0xdd, 0x24, 0x94, 0x61, 0xd3, + 0xe9, 0xd9, 0x1e, 0x41, 0xbb, 0x47, 0xda, 0x81, 0xe5, 0xf9, 0x7e, 0xa9, 0xd9, 0xf2, 0xc0, 0xf9, + 0x1a, 0x6a, 0xbf, 0x86, 0x4b, 0x68, 0xcb, 0x60, 0x55, 0x4a, 0x02, 0x10, 0x97, 0xfa, 0x75, 0x98, + 0x8b, 0x2d, 0x8a, 0xeb, 0x91, 0x83, 0x24, 0x40, 0x58, 0xdb, 0x21, 0x9e, 0x54, 0xfd, 0x33, 0x64, + 0xef, 0xb9, 0x04, 0x33, 0x52, 0x21, 0x94, 0xea, 0xb6, 0xa5, 0x91, 0xfd, 0x16, 0xa1, 0x0c, 0xe5, + 0x61, 0xb2, 0x81, 0x19, 0xae, 0x61, 0x4a, 0x72, 0xca, 0xbc, 0xb2, 0x98, 0xd4, 0xc2, 0x39, 0xba, + 0x0e, 0x13, 0xd4, 0xd3, 0xce, 0xc5, 0xe6, 0x95, 0xc5, 0xd4, 0x72, 0xbe, 0xd8, 0xc7, 0x7c, 0x31, + 0x38, 0x2f, 0x50, 0x55, 0xdf, 0xc5, 0x60, 0xc2, 0x5f, 0x44, 0x08, 0xc6, 0x2c, 0x6c, 0x06, 0x27, + 0x8b, 0x31, 0xfa, 0x3d, 0x24, 0x0c, 0x5c, 0x23, 0x06, 0xcd, 0xc5, 0xe6, 0xe3, 0x8b, 0xa9, 0xe5, + 0xab, 0x47, 0x1f, 0x5a, 0xdc, 0x14, 0x8a, 0xeb, 0x16, 0x73, 0xdb, 0x9a, 0xbf, 0x0b, 0xdd, 0x84, + 0x54, 0x5d, 0x7c, 0x49, 0x95, 0xbb, 0x22, 0x17, 0xef, 0x46, 0x16, 0xf8, 0xa9, 0xb8, 0x13, 0xf8, + 0x49, 0x03, 0x4f, 0x9d, 0x2f, 0xa0, 0x5d, 0x38, 0x83, 0x1d, 0xc7, 0xb5, 0x0f, 0x75, 0x93, 0x9f, + 0x60, 0x60, 0xca, 0xaa, 0x2d, 0xea, 0x1f, 0x35, 0x76, 0xec, 0x51, 0x73, 0x91, 0xcd, 0x9b, 0x98, + 0xb2, 0x5d, 0x2a, 0x8e, 0xcd, 0xdf, 0x80, 0x54, 0x04, 0x2a, 0xca, 0x40, 0x7c, 0x8f, 0xb4, 0xfd, + 0xaf, 0xe6, 0x43, 0x94, 0x85, 0xf1, 0x03, 0x6c, 0xb4, 0x88, 0x20, 0x32, 0xa9, 0x79, 0x93, 0xd5, + 0xd8, 0x8a, 0xa2, 0x2e, 0xc0, 0xec, 0x43, 0xc2, 0x7a, 0xbc, 0x22, 0xe1, 0x4d, 0xfd, 0x87, 0x02, + 0x27, 0x37, 0x75, 0x1a, 0xa8, 0xd2, 0x61, 0x3c, 0x78, 0x16, 0x92, 0x0e, 0x6e, 0x92, 0x2a, 0xd5, + 0x5f, 0x7b, 0xa6, 0xc7, 0xb5, 0x49, 0xbe, 0x50, 0xd1, 0x5f, 0x13, 0xf4, 0x2b, 0x00, 0x21, 0x64, + 0xf6, 0x1e, 0xb1, 0x04, 0x8f, 0x49, 0x4d, 0xa8, 0xef, 0xf0, 0x05, 0x34, 0x07, 0x89, 0x57, 0xba, + 0xc1, 0x88, 0x2b, 0x78, 0x49, 0x6a, 0xfe, 0x4c, 0x3d, 0x80, 0x6c, 0x37, 0x0c, 0xea, 0xd8, 0x16, + 0x25, 0xe8, 0xb7, 0x30, 0xe9, 0x87, 0x00, 0xcd, 0x29, 0xc2, 0xb3, 0x83, 0xc2, 0x25, 0xd4, 0x45, + 0x57, 0x61, 0xc6, 0x22, 0x87, 0xac, 0x1a, 0xc1, 0xe2, 0x91, 0x94, 0xe6, 0xcb, 0xe5, 0x00, 0x8f, + 0x5a, 0x80, 0xec, 0x7d, 0x62, 0x90, 0xbe, 0x08, 0x96, 0x71, 0xf5, 0x7e, 0x0c, 0x66, 0xd7, 0x0f, + 0x49, 0xbd, 0xc5, 0x48, 0x65, 0xdf, 0x08, 0x34, 0x73, 0x9d, 0x78, 0xf6, 0x94, 0x83, 0x29, 0x7a, + 0x04, 0xa9, 0xc8, 0x85, 0xf2, 0xa3, 0x5d, 0x16, 0x98, 0x3b, 0x1d, 0xad, 0x0a, 0x31, 0x48, 0x9d, + 0xd9, 0xae, 0x16, 0xdd, 0xca, 0x5d, 0x4f, 0xf7, 0x0d, 0x9f, 0x4d, 0x3e, 0x44, 0x25, 0x48, 0x38, + 0xd8, 0xc5, 0x26, 0xf5, 0xe3, 0xeb, 0x74, 0x5f, 0x7c, 0x55, 0xc4, 0x83, 0xa3, 0xf9, 0x6a, 0x68, + 0x17, 0x52, 0x62, 0x54, 0xe5, 0xd7, 0x97, 0xe6, 0xc6, 0x05, 0x97, 0xd7, 0x25, 0x60, 0xfa, 0xbe, + 0xb0, 0x58, 0xe6, 0xfb, 0x76, 0xf8, 0x36, 0xef, 0xce, 0x80, 0x13, 0x2e, 0xa0, 0x8b, 0x30, 0xc5, + 0x1f, 0x16, 0x33, 0x20, 0x39, 0x31, 0xaf, 0x2c, 0x4e, 0x69, 0x29, 0x6f, 0xcd, 0x73, 0xf9, 0x16, + 0xc0, 0x7e, 0x8b, 0xb8, 0xed, 0xaa, 0x69, 0x37, 0x48, 0x6e, 0x62, 0x5e, 0x59, 0x9c, 0x5e, 0x2e, + 0x0e, 0x65, 0xf8, 0x29, 0xdf, 0xb6, 0x65, 0x37, 0x88, 0x96, 0xdc, 0x0f, 0x86, 0x68, 0x01, 0x66, + 0x1c, 0xec, 0x32, 0x9d, 0x13, 0xe3, 0x1b, 0x9d, 0x14, 0x46, 0xa7, 0xc3, 0x65, 0x61, 0x37, 0xff, + 0x0c, 0x66, 0x7a, 0x90, 0x4b, 0xae, 0xd0, 0xaf, 0xa3, 0x57, 0x28, 0x42, 0x63, 0xd4, 0x3b, 0x6d, + 0x87, 0x44, 0xef, 0x56, 0x11, 0x92, 0x21, 0x30, 0x04, 0x90, 0x78, 0xb2, 0xad, 0x6d, 0xdd, 0xdd, + 0xcc, 0x9c, 0x40, 0x93, 0x30, 0x56, 0xde, 0xbc, 0xfb, 0x24, 0xa3, 0xa0, 0x14, 0x4c, 0x94, 0xb5, + 0xed, 0x07, 0x1b, 0x9b, 0xeb, 0x99, 0x98, 0xba, 0x07, 0x99, 0x72, 0x80, 0x6c, 0xdb, 0x11, 0x19, + 0x04, 0x5d, 0x83, 0x6c, 0xe7, 0x23, 0xf8, 0x3d, 0xaa, 0xd6, 0xda, 0x8c, 0x50, 0x81, 0x2c, 0xae, + 0xa1, 0x50, 0xc6, 0xaf, 0xd4, 0x1a, 0x97, 0xa0, 0x2b, 0x30, 0x6d, 0xe2, 0xc3, 0x6a, 0x28, 0xa1, + 0x02, 0x71, 0x5c, 0x4b, 0x9b, 0xf8, 0x30, 0x3c, 0x9e, 0xaa, 0x5f, 0xc6, 0xe1, 0x54, 0x38, 0x15, + 0x30, 0x7f, 0x66, 0x71, 0xfa, 0x47, 0x59, 0x9c, 0xae, 0x48, 0xc0, 0x48, 0xbf, 0x72, 0x60, 0xac, + 0x96, 0x61, 0xb6, 0x43, 0xba, 0xed, 0x79, 0x42, 0x04, 0x6c, 0x6a, 0xf9, 0xd2, 0x20, 0x03, 0xbe, + 0xd3, 0xb4, 0x8c, 0xd3, 0xb3, 0xf2, 0xa3, 0x85, 0xd8, 0x57, 0x31, 0xc8, 0x86, 0xe6, 0x35, 0x82, + 0x1b, 0x3f, 0xa5, 0x13, 0xb3, 0x30, 0xce, 0x70, 0xcd, 0x20, 0xbe, 0x1b, 0xbd, 0x09, 0x5f, 0xd5, + 0xad, 0x06, 0x39, 0xf4, 0xdf, 0x6d, 0x6f, 0xc2, 0xf1, 0xd4, 0x6d, 0xa3, 0x65, 0x5a, 0x9e, 0xa7, + 0x92, 0x5a, 0x30, 0x45, 0xcb, 0x30, 0xb1, 0x47, 0xda, 0xbc, 0xdc, 0xf0, 0x29, 0x3e, 0x23, 0xc1, + 0xf2, 0x98, 0xb4, 0x2b, 0x84, 0x69, 0x89, 0x3d, 0xf1, 0x2b, 0x77, 0x50, 0x72, 0x04, 0x07, 0xa9, + 0xd7, 0x21, 0x19, 0x6a, 0xc9, 0x5e, 0x0e, 0x45, 0xf6, 0x72, 0xa8, 0x6f, 0x15, 0x98, 0x8d, 0xd0, + 0xef, 0xa7, 0xa2, 0x5b, 0x3c, 0xb3, 0x85, 0xb7, 0xcf, 0x4b, 0x46, 0xe7, 0x06, 0xc1, 0xd2, 0x22, + 0xfa, 0xe8, 0x8e, 0xcc, 0x3f, 0xe7, 0x07, 0xfb, 0xa7, 0xcb, 0x2f, 0xea, 0x37, 0x31, 0x48, 0xfd, + 0x72, 0x62, 0x21, 0x0b, 0xe3, 0x86, 0x6e, 0xea, 0x4c, 0x3c, 0xee, 0x71, 0xcd, 0x9b, 0xf4, 0xa5, + 0x9b, 0x64, 0x7f, 0xba, 0x91, 0x78, 0x19, 0xa4, 0x5e, 0x66, 0x70, 0x7a, 0x8d, 0x34, 0x75, 0x2b, + 0x4a, 0xf8, 0xb1, 0xd4, 0xde, 0x86, 0x89, 0x20, 0x30, 0x3d, 0x5a, 0xaf, 0x0c, 0xa6, 0x35, 0x08, + 0xcd, 0x60, 0x97, 0xfa, 0xbd, 0x02, 0xe9, 0x7b, 0xb6, 0x69, 0xea, 0xec, 0x78, 0x63, 0x0b, 0x30, + 0x1d, 0x71, 0x46, 0x55, 0x6f, 0x08, 0x9b, 0x53, 0x8f, 0x4e, 0x68, 0xe9, 0xc8, 0xfa, 0x46, 0x03, + 0xfd, 0x09, 0xe6, 0xa8, 0x6e, 0x35, 0x0d, 0xe2, 0x95, 0x9d, 0x11, 0xdf, 0xc7, 0x3f, 0x02, 0xe4, + 0xa3, 0x13, 0x5a, 0xd6, 0x3b, 0x86, 0x57, 0xa0, 0x91, 0x28, 0xb8, 0x01, 0xc9, 0xa0, 0xbf, 0xe0, + 0xef, 0x38, 0x0f, 0xfc, 0xb3, 0x92, 0x13, 0xb7, 0x7c, 0x1d, 0xad, 0xa3, 0xbd, 0x96, 0xee, 0x0a, + 0x45, 0xf5, 0x39, 0x4c, 0x07, 0x1f, 0xef, 0xdf, 0xaa, 0x75, 0xc8, 0xd4, 0xc5, 0x4a, 0x35, 0xec, + 0x81, 0x04, 0x0d, 0x83, 0x4b, 0xe6, 0x19, 0x6f, 0x4f, 0xb8, 0xa0, 0x6a, 0x30, 0xa3, 0xd9, 0x86, + 0x51, 0xc3, 0xf5, 0xbd, 0xe3, 0x79, 0xbd, 0x22, 0xe7, 0xb5, 0x87, 0xd5, 0xe5, 0xbf, 0xcf, 0xc2, + 0x44, 0xc5, 0xfb, 0x3c, 0xf4, 0x3f, 0xee, 0xb6, 0x68, 0xab, 0x83, 0x16, 0x24, 0x0c, 0xc8, 0x9a, + 0xa1, 0xfc, 0x80, 0x82, 0x55, 0x5d, 0xff, 0xdb, 0xb7, 0xdf, 0xbd, 0x8d, 0xdd, 0x56, 0x57, 0x79, + 0x63, 0xf5, 0xd7, 0xa0, 0xc2, 0xfe, 0x9d, 0xe3, 0xda, 0x7f, 0x21, 0x75, 0x46, 0x4b, 0x85, 0x92, + 0x6e, 0x51, 0x86, 0xad, 0x3a, 0xe1, 0xe3, 0x40, 0x4e, 0x4b, 0x85, 0x37, 0xa5, 0xa0, 0xd4, 0x5d, + 0x55, 0x0a, 0xe8, 0x9f, 0x0a, 0x40, 0xa7, 0xde, 0x47, 0x97, 0x25, 0x16, 0xfb, 0xda, 0x81, 0x81, + 0xb8, 0xee, 0x08, 0x5c, 0xab, 0x68, 0x45, 0xe0, 0xe2, 0xd5, 0xef, 0x10, 0x98, 0x42, 0x48, 0xa5, + 0xc2, 0x1b, 0xf4, 0x99, 0x02, 0x53, 0xd1, 0x8a, 0x1e, 0xc9, 0xde, 0x1f, 0x49, 0xe7, 0x91, 0x5f, + 0x38, 0x56, 0xcf, 0x8b, 0x1c, 0x75, 0x4d, 0x60, 0xbc, 0x85, 0x46, 0xe0, 0x0e, 0xfd, 0x5b, 0x81, + 0x74, 0x57, 0xfd, 0x2f, 0x75, 0xab, 0xac, 0x43, 0xc8, 0xcf, 0xf5, 0x85, 0xe7, 0x3a, 0xef, 0xff, + 0x03, 0xea, 0x0a, 0x23, 0x51, 0x07, 0x9d, 0x62, 0x58, 0xea, 0xcd, 0xbe, 0x5a, 0x39, 0x2f, 0xcb, + 0x44, 0x9a, 0x68, 0xf9, 0x2b, 0x84, 0xa9, 0x4f, 0x05, 0xa8, 0xc7, 0xea, 0x03, 0x01, 0xca, 0x37, + 0xf6, 0x91, 0xb8, 0x56, 0x49, 0x68, 0x94, 0xc7, 0xdc, 0x17, 0x0a, 0x9c, 0x0c, 0x60, 0x30, 0x97, + 0x60, 0x53, 0xb7, 0x9a, 0xc3, 0xc3, 0x3d, 0x32, 0x9f, 0x63, 0xa3, 0x83, 0xfa, 0x85, 0x40, 0xbd, + 0xa3, 0x6e, 0x7f, 0x0a, 0xd4, 0x11, 0x8c, 0xab, 0x4a, 0xe1, 0x9a, 0x82, 0xfe, 0xa5, 0xc0, 0x18, + 0xcf, 0xa7, 0xe8, 0xbc, 0x94, 0xba, 0x30, 0xd1, 0x1e, 0x43, 0xed, 0x63, 0x01, 0x72, 0x5d, 0xbd, + 0x33, 0x0a, 0x48, 0x97, 0xe0, 0x06, 0x27, 0xf5, 0x9d, 0x02, 0xe9, 0x10, 0xe9, 0x50, 0xe0, 0x86, + 0x22, 0x72, 0x47, 0x60, 0x7c, 0xa2, 0x6e, 0x8c, 0x82, 0x91, 0x46, 0x71, 0x79, 0x14, 0x7e, 0x50, + 0x20, 0xd3, 0x9b, 0x43, 0x51, 0x41, 0x82, 0xe8, 0x88, 0x44, 0x9b, 0x3f, 0xa6, 0x00, 0x52, 0x9f, + 0x0b, 0xe0, 0x4f, 0xd5, 0xcd, 0x51, 0x80, 0xd7, 0x7a, 0x8c, 0x73, 0xa2, 0xff, 0xaf, 0x40, 0xc2, + 0xcb, 0x44, 0x68, 0x5e, 0xf6, 0x90, 0x47, 0x33, 0x74, 0xfe, 0xe2, 0x00, 0x0d, 0xff, 0x31, 0xda, + 0x12, 0x40, 0x1f, 0xaa, 0x6b, 0xa3, 0x00, 0xf5, 0x92, 0x1a, 0x87, 0xf7, 0x5f, 0x05, 0x26, 0x83, + 0x7c, 0x86, 0x54, 0x59, 0x08, 0x74, 0x27, 0xbb, 0x23, 0x5f, 0xa3, 0x6d, 0x81, 0x6b, 0x43, 0xbd, + 0x3f, 0x52, 0x74, 0xfa, 0xc6, 0x38, 0xb2, 0x0f, 0x0a, 0x4c, 0x77, 0xb7, 0x5e, 0x68, 0x71, 0xd8, + 0xee, 0x2c, 0x7f, 0x79, 0x60, 0xb9, 0x1c, 0x70, 0xb9, 0x2b, 0x30, 0x6f, 0xab, 0x7f, 0x18, 0x05, + 0xb3, 0xd3, 0x05, 0x80, 0x23, 0x7f, 0xaf, 0x40, 0xba, 0xab, 0xa9, 0x92, 0xbe, 0xf5, 0xb2, 0xb6, + 0x6b, 0x48, 0xdc, 0x9f, 0xe4, 0x96, 0x39, 0x51, 0xfb, 0xab, 0x4a, 0x61, 0xed, 0x3f, 0x0a, 0x9c, + 0xaa, 0xdb, 0x66, 0x3f, 0x82, 0xb5, 0x29, 0xbf, 0x38, 0x29, 0x73, 0x97, 0x97, 0x95, 0x17, 0x2b, + 0xbe, 0x4a, 0xd3, 0x36, 0xb0, 0xd5, 0x2c, 0xda, 0x6e, 0xb3, 0xd4, 0x24, 0x96, 0x08, 0x88, 0x92, + 0x27, 0xc2, 0x8e, 0x4e, 0x23, 0x7f, 0xe5, 0xde, 0xf4, 0x87, 0x9f, 0xc7, 0x4e, 0x3f, 0xf4, 0xb6, + 0xde, 0x33, 0xec, 0x56, 0xa3, 0xe8, 0x9f, 0x5b, 0x7c, 0xb6, 0xf4, 0x75, 0x20, 0x79, 0x29, 0x24, + 0x2f, 0x7d, 0xc9, 0xcb, 0x67, 0x4b, 0xb5, 0x84, 0x38, 0xf8, 0x37, 0x3f, 0x04, 0x00, 0x00, 0xff, + 0xff, 0x92, 0x18, 0x4b, 0x1c, 0x59, 0x17, 0x00, 0x00, } diff --git a/vendor/google.golang.org/genproto/googleapis/tracing/v1/trace.pb.go b/vendor/google.golang.org/genproto/googleapis/tracing/v1/trace.pb.go deleted file mode 100644 index b25493668..000000000 --- a/vendor/google.golang.org/genproto/googleapis/tracing/v1/trace.pb.go +++ /dev/null @@ -1,888 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/tracing/trace.proto - -/* -Package tracing is a generated protocol buffer package. - -It is generated from these files: - google/tracing/trace.proto - -It has these top-level messages: - TraceId - Module - StackTrace - LabelValue - Span - Trace -*/ -package tracing - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "google.golang.org/genproto/googleapis/api/annotations" -import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp" -import google_rpc "google.golang.org/genproto/googleapis/rpc/status" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// The type of the network event. SENT or RECV event. -type Span_TimeEvent_NetworkEvent_Type int32 - -const ( - Span_TimeEvent_NetworkEvent_UNSPECIFIED Span_TimeEvent_NetworkEvent_Type = 0 - Span_TimeEvent_NetworkEvent_SENT Span_TimeEvent_NetworkEvent_Type = 1 - Span_TimeEvent_NetworkEvent_RECV Span_TimeEvent_NetworkEvent_Type = 2 -) - -var Span_TimeEvent_NetworkEvent_Type_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "SENT", - 2: "RECV", -} -var Span_TimeEvent_NetworkEvent_Type_value = map[string]int32{ - "UNSPECIFIED": 0, - "SENT": 1, - "RECV": 2, -} - -func (x Span_TimeEvent_NetworkEvent_Type) String() string { - return proto.EnumName(Span_TimeEvent_NetworkEvent_Type_name, int32(x)) -} -func (Span_TimeEvent_NetworkEvent_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{4, 0, 1, 0} -} - -// The type of the link. -type Span_Link_Type int32 - -const ( - Span_Link_UNSPECIFIED Span_Link_Type = 0 - Span_Link_CHILD Span_Link_Type = 1 - Span_Link_PARENT Span_Link_Type = 2 -) - -var Span_Link_Type_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "CHILD", - 2: "PARENT", -} -var Span_Link_Type_value = map[string]int32{ - "UNSPECIFIED": 0, - "CHILD": 1, - "PARENT": 2, -} - -func (x Span_Link_Type) String() string { - return proto.EnumName(Span_Link_Type_name, int32(x)) -} -func (Span_Link_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 1, 0} } - -// A TraceId uniquely identifies a Trace. It is conceptually a 128-bit value, -// represented as a string, containing the hex-encoded value. -type TraceId struct { - // Trace ID specified as a hex-encoded string. *Must* be 32 bytes long. - HexEncoded string `protobuf:"bytes,1,opt,name=hex_encoded,json=hexEncoded" json:"hex_encoded,omitempty"` -} - -func (m *TraceId) Reset() { *m = TraceId{} } -func (m *TraceId) String() string { return proto.CompactTextString(m) } -func (*TraceId) ProtoMessage() {} -func (*TraceId) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *TraceId) GetHexEncoded() string { - if m != nil { - return m.HexEncoded - } - return "" -} - -type Module struct { - // Binary module. - // E.g. main binary, kernel modules, and dynamic libraries - // such as libc.so, sharedlib.so - Module string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - // Build_id is a unique identifier for the module, - // probably a hash of its contents - BuildId string `protobuf:"bytes,2,opt,name=build_id,json=buildId" json:"build_id,omitempty"` -} - -func (m *Module) Reset() { *m = Module{} } -func (m *Module) String() string { return proto.CompactTextString(m) } -func (*Module) ProtoMessage() {} -func (*Module) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *Module) GetModule() string { - if m != nil { - return m.Module - } - return "" -} - -func (m *Module) GetBuildId() string { - if m != nil { - return m.BuildId - } - return "" -} - -type StackTrace struct { - // Stack frames of this stack trace. - StackFrame []*StackTrace_StackFrame `protobuf:"bytes,1,rep,name=stack_frame,json=stackFrame" json:"stack_frame,omitempty"` - // User can choose to use his own hash function to hash large labels to save - // network bandwidth and storage. - // Typical usage is to pass both initially to inform the storage of the - // mapping. And in subsequent calls, pass in stack_trace_hash_id only. - // User shall verify the hash value is successfully stored. - StackTraceHashId uint64 `protobuf:"varint,2,opt,name=stack_trace_hash_id,json=stackTraceHashId" json:"stack_trace_hash_id,omitempty"` -} - -func (m *StackTrace) Reset() { *m = StackTrace{} } -func (m *StackTrace) String() string { return proto.CompactTextString(m) } -func (*StackTrace) ProtoMessage() {} -func (*StackTrace) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *StackTrace) GetStackFrame() []*StackTrace_StackFrame { - if m != nil { - return m.StackFrame - } - return nil -} - -func (m *StackTrace) GetStackTraceHashId() uint64 { - if m != nil { - return m.StackTraceHashId - } - return 0 -} - -// Presents a single stack frame in a stack trace. -type StackTrace_StackFrame struct { - // Fully qualified names which uniquely identify function/method/etc. - FunctionName string `protobuf:"bytes,1,opt,name=function_name,json=functionName" json:"function_name,omitempty"` - // Used when function name is ‘mangled’. Not guaranteed to be fully - // qualified but usually it is. - OrigFunctionName string `protobuf:"bytes,2,opt,name=orig_function_name,json=origFunctionName" json:"orig_function_name,omitempty"` - // File name of the frame. - FileName string `protobuf:"bytes,3,opt,name=file_name,json=fileName" json:"file_name,omitempty"` - // Line number of the frame. - LineNumber int64 `protobuf:"varint,4,opt,name=line_number,json=lineNumber" json:"line_number,omitempty"` - // Column number is important in JavaScript(anonymous functions), - // Might not be available in some languages. - ColumnNumber int64 `protobuf:"varint,5,opt,name=column_number,json=columnNumber" json:"column_number,omitempty"` - // Binary module the code is loaded from. - LoadModule *Module `protobuf:"bytes,6,opt,name=load_module,json=loadModule" json:"load_module,omitempty"` - // source_version is deployment specific. It might be - // better to be stored in deployment metadata. - // However, in distributed tracing, it’s hard to keep track of - // source/binary versions at one place for all spans. - SourceVersion string `protobuf:"bytes,7,opt,name=source_version,json=sourceVersion" json:"source_version,omitempty"` -} - -func (m *StackTrace_StackFrame) Reset() { *m = StackTrace_StackFrame{} } -func (m *StackTrace_StackFrame) String() string { return proto.CompactTextString(m) } -func (*StackTrace_StackFrame) ProtoMessage() {} -func (*StackTrace_StackFrame) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } - -func (m *StackTrace_StackFrame) GetFunctionName() string { - if m != nil { - return m.FunctionName - } - return "" -} - -func (m *StackTrace_StackFrame) GetOrigFunctionName() string { - if m != nil { - return m.OrigFunctionName - } - return "" -} - -func (m *StackTrace_StackFrame) GetFileName() string { - if m != nil { - return m.FileName - } - return "" -} - -func (m *StackTrace_StackFrame) GetLineNumber() int64 { - if m != nil { - return m.LineNumber - } - return 0 -} - -func (m *StackTrace_StackFrame) GetColumnNumber() int64 { - if m != nil { - return m.ColumnNumber - } - return 0 -} - -func (m *StackTrace_StackFrame) GetLoadModule() *Module { - if m != nil { - return m.LoadModule - } - return nil -} - -func (m *StackTrace_StackFrame) GetSourceVersion() string { - if m != nil { - return m.SourceVersion - } - return "" -} - -// Allowed label values. -type LabelValue struct { - // The value of the label. - // - // Types that are valid to be assigned to Value: - // *LabelValue_StringValue - // *LabelValue_IntValue - // *LabelValue_BoolValue - Value isLabelValue_Value `protobuf_oneof:"value"` -} - -func (m *LabelValue) Reset() { *m = LabelValue{} } -func (m *LabelValue) String() string { return proto.CompactTextString(m) } -func (*LabelValue) ProtoMessage() {} -func (*LabelValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -type isLabelValue_Value interface { - isLabelValue_Value() -} - -type LabelValue_StringValue struct { - StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,oneof"` -} -type LabelValue_IntValue struct { - IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,oneof"` -} -type LabelValue_BoolValue struct { - BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,oneof"` -} - -func (*LabelValue_StringValue) isLabelValue_Value() {} -func (*LabelValue_IntValue) isLabelValue_Value() {} -func (*LabelValue_BoolValue) isLabelValue_Value() {} - -func (m *LabelValue) GetValue() isLabelValue_Value { - if m != nil { - return m.Value - } - return nil -} - -func (m *LabelValue) GetStringValue() string { - if x, ok := m.GetValue().(*LabelValue_StringValue); ok { - return x.StringValue - } - return "" -} - -func (m *LabelValue) GetIntValue() int64 { - if x, ok := m.GetValue().(*LabelValue_IntValue); ok { - return x.IntValue - } - return 0 -} - -func (m *LabelValue) GetBoolValue() bool { - if x, ok := m.GetValue().(*LabelValue_BoolValue); ok { - return x.BoolValue - } - return false -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*LabelValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _LabelValue_OneofMarshaler, _LabelValue_OneofUnmarshaler, _LabelValue_OneofSizer, []interface{}{ - (*LabelValue_StringValue)(nil), - (*LabelValue_IntValue)(nil), - (*LabelValue_BoolValue)(nil), - } -} - -func _LabelValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*LabelValue) - // value - switch x := m.Value.(type) { - case *LabelValue_StringValue: - b.EncodeVarint(1<<3 | proto.WireBytes) - b.EncodeStringBytes(x.StringValue) - case *LabelValue_IntValue: - b.EncodeVarint(2<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.IntValue)) - case *LabelValue_BoolValue: - t := uint64(0) - if x.BoolValue { - t = 1 - } - b.EncodeVarint(3<<3 | proto.WireVarint) - b.EncodeVarint(t) - case nil: - default: - return fmt.Errorf("LabelValue.Value has unexpected type %T", x) - } - return nil -} - -func _LabelValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*LabelValue) - switch tag { - case 1: // value.string_value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Value = &LabelValue_StringValue{x} - return true, err - case 2: // value.int_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Value = &LabelValue_IntValue{int64(x)} - return true, err - case 3: // value.bool_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Value = &LabelValue_BoolValue{x != 0} - return true, err - default: - return false, nil - } -} - -func _LabelValue_OneofSizer(msg proto.Message) (n int) { - m := msg.(*LabelValue) - // value - switch x := m.Value.(type) { - case *LabelValue_StringValue: - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.StringValue))) - n += len(x.StringValue) - case *LabelValue_IntValue: - n += proto.SizeVarint(2<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.IntValue)) - case *LabelValue_BoolValue: - n += proto.SizeVarint(3<<3 | proto.WireVarint) - n += 1 - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// A span represents a single operation within a trace. Spans can be nested -// and form a trace tree. Often, a trace contains a root span that describes the -// end-to-end latency and, optionally, one or more subspans for -// its sub-operations. Spans do not need to be contiguous. There may be gaps -// between spans in a trace. -type Span struct { - // Identifier for the span. Must be a 64-bit integer other than 0 and - // unique within a trace. - Id uint64 `protobuf:"fixed64,1,opt,name=id" json:"id,omitempty"` - // Name of the span. The span name is sanitized and displayed in the - // Stackdriver Trace tool in the {% dynamic print site_values.console_name %}. - // The name may be a method name or some other per-call site name. - // For the same executable and the same call point, a best practice is - // to use a consistent name, which makes it easier to correlate - // cross-trace spans. - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - // ID of parent span. 0 or missing if this is a root span. - ParentId uint64 `protobuf:"fixed64,3,opt,name=parent_id,json=parentId" json:"parent_id,omitempty"` - // Local machine clock in nanoseconds from the UNIX epoch, - // at which span execution started. - // On the server side these are the times when the server application - // handler starts running. - LocalStartTime *google_protobuf1.Timestamp `protobuf:"bytes,4,opt,name=local_start_time,json=localStartTime" json:"local_start_time,omitempty"` - // Local machine clock in nanoseconds from the UNIX epoch, - // at which span execution ended. - // On the server side these are the times when the server application - // handler finishes running. - LocalEndTime *google_protobuf1.Timestamp `protobuf:"bytes,5,opt,name=local_end_time,json=localEndTime" json:"local_end_time,omitempty"` - // Properties of a span. Labels at the span level. - // E.g. - // "/instance_id": "my-instance" - // "/zone": "us-central1-a" - // "/grpc/peer_address": "ip:port" (dns, etc.) - // "/grpc/deadline": "Duration" - // "/http/user_agent" - // "/http/request_bytes": 300 - // "/http/response_bytes": 1200 - // "/http/url": google.com/apis - // "/pid" - // "abc.com/mylabel": "my label value" - Labels map[string]*LabelValue `protobuf:"bytes,6,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // Stack trace captured at the start of the span. This is optional. - StackTrace *StackTrace `protobuf:"bytes,7,opt,name=stack_trace,json=stackTrace" json:"stack_trace,omitempty"` - // A collection of time-stamped events. - TimeEvents []*Span_TimeEvent `protobuf:"bytes,8,rep,name=time_events,json=timeEvents" json:"time_events,omitempty"` - // A collection of links. - Links []*Span_Link `protobuf:"bytes,9,rep,name=links" json:"links,omitempty"` - // The final status of the Span. This is optional. - Status *google_rpc.Status `protobuf:"bytes,10,opt,name=status" json:"status,omitempty"` - // True if this Span has a remote parent (is an RPC server Span). - HasRemoteParent bool `protobuf:"varint,11,opt,name=has_remote_parent,json=hasRemoteParent" json:"has_remote_parent,omitempty"` -} - -func (m *Span) Reset() { *m = Span{} } -func (m *Span) String() string { return proto.CompactTextString(m) } -func (*Span) ProtoMessage() {} -func (*Span) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } - -func (m *Span) GetId() uint64 { - if m != nil { - return m.Id - } - return 0 -} - -func (m *Span) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Span) GetParentId() uint64 { - if m != nil { - return m.ParentId - } - return 0 -} - -func (m *Span) GetLocalStartTime() *google_protobuf1.Timestamp { - if m != nil { - return m.LocalStartTime - } - return nil -} - -func (m *Span) GetLocalEndTime() *google_protobuf1.Timestamp { - if m != nil { - return m.LocalEndTime - } - return nil -} - -func (m *Span) GetLabels() map[string]*LabelValue { - if m != nil { - return m.Labels - } - return nil -} - -func (m *Span) GetStackTrace() *StackTrace { - if m != nil { - return m.StackTrace - } - return nil -} - -func (m *Span) GetTimeEvents() []*Span_TimeEvent { - if m != nil { - return m.TimeEvents - } - return nil -} - -func (m *Span) GetLinks() []*Span_Link { - if m != nil { - return m.Links - } - return nil -} - -func (m *Span) GetStatus() *google_rpc.Status { - if m != nil { - return m.Status - } - return nil -} - -func (m *Span) GetHasRemoteParent() bool { - if m != nil { - return m.HasRemoteParent - } - return false -} - -// A time-stamped annotation in the Span. -type Span_TimeEvent struct { - // The local machine absolute timestamp when this event happened. - LocalTime *google_protobuf1.Timestamp `protobuf:"bytes,1,opt,name=local_time,json=localTime" json:"local_time,omitempty"` - // Types that are valid to be assigned to Value: - // *Span_TimeEvent_Annotation_ - // *Span_TimeEvent_NetworkEvent_ - Value isSpan_TimeEvent_Value `protobuf_oneof:"value"` -} - -func (m *Span_TimeEvent) Reset() { *m = Span_TimeEvent{} } -func (m *Span_TimeEvent) String() string { return proto.CompactTextString(m) } -func (*Span_TimeEvent) ProtoMessage() {} -func (*Span_TimeEvent) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } - -type isSpan_TimeEvent_Value interface { - isSpan_TimeEvent_Value() -} - -type Span_TimeEvent_Annotation_ struct { - Annotation *Span_TimeEvent_Annotation `protobuf:"bytes,2,opt,name=annotation,oneof"` -} -type Span_TimeEvent_NetworkEvent_ struct { - NetworkEvent *Span_TimeEvent_NetworkEvent `protobuf:"bytes,3,opt,name=network_event,json=networkEvent,oneof"` -} - -func (*Span_TimeEvent_Annotation_) isSpan_TimeEvent_Value() {} -func (*Span_TimeEvent_NetworkEvent_) isSpan_TimeEvent_Value() {} - -func (m *Span_TimeEvent) GetValue() isSpan_TimeEvent_Value { - if m != nil { - return m.Value - } - return nil -} - -func (m *Span_TimeEvent) GetLocalTime() *google_protobuf1.Timestamp { - if m != nil { - return m.LocalTime - } - return nil -} - -func (m *Span_TimeEvent) GetAnnotation() *Span_TimeEvent_Annotation { - if x, ok := m.GetValue().(*Span_TimeEvent_Annotation_); ok { - return x.Annotation - } - return nil -} - -func (m *Span_TimeEvent) GetNetworkEvent() *Span_TimeEvent_NetworkEvent { - if x, ok := m.GetValue().(*Span_TimeEvent_NetworkEvent_); ok { - return x.NetworkEvent - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Span_TimeEvent) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Span_TimeEvent_OneofMarshaler, _Span_TimeEvent_OneofUnmarshaler, _Span_TimeEvent_OneofSizer, []interface{}{ - (*Span_TimeEvent_Annotation_)(nil), - (*Span_TimeEvent_NetworkEvent_)(nil), - } -} - -func _Span_TimeEvent_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Span_TimeEvent) - // value - switch x := m.Value.(type) { - case *Span_TimeEvent_Annotation_: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Annotation); err != nil { - return err - } - case *Span_TimeEvent_NetworkEvent_: - b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.NetworkEvent); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("Span_TimeEvent.Value has unexpected type %T", x) - } - return nil -} - -func _Span_TimeEvent_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Span_TimeEvent) - switch tag { - case 2: // value.annotation - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Span_TimeEvent_Annotation) - err := b.DecodeMessage(msg) - m.Value = &Span_TimeEvent_Annotation_{msg} - return true, err - case 3: // value.network_event - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Span_TimeEvent_NetworkEvent) - err := b.DecodeMessage(msg) - m.Value = &Span_TimeEvent_NetworkEvent_{msg} - return true, err - default: - return false, nil - } -} - -func _Span_TimeEvent_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Span_TimeEvent) - // value - switch x := m.Value.(type) { - case *Span_TimeEvent_Annotation_: - s := proto.Size(x.Annotation) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *Span_TimeEvent_NetworkEvent_: - s := proto.Size(x.NetworkEvent) - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// Text annotation with a set of labels. -type Span_TimeEvent_Annotation struct { - // A user-supplied message describing the event. - Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` - // A set of labels on the annotation. - Labels map[string]*LabelValue `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` -} - -func (m *Span_TimeEvent_Annotation) Reset() { *m = Span_TimeEvent_Annotation{} } -func (m *Span_TimeEvent_Annotation) String() string { return proto.CompactTextString(m) } -func (*Span_TimeEvent_Annotation) ProtoMessage() {} -func (*Span_TimeEvent_Annotation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0, 0} } - -func (m *Span_TimeEvent_Annotation) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Span_TimeEvent_Annotation) GetLabels() map[string]*LabelValue { - if m != nil { - return m.Labels - } - return nil -} - -// An event describing an RPC message sent/received on the network. -type Span_TimeEvent_NetworkEvent struct { - // If available, this is the kernel time: - // For sent messages, this is the time at which the first bit was sent. - // For received messages, this is the time at which the last bit was - // received. - KernelTime *google_protobuf1.Timestamp `protobuf:"bytes,1,opt,name=kernel_time,json=kernelTime" json:"kernel_time,omitempty"` - Type Span_TimeEvent_NetworkEvent_Type `protobuf:"varint,2,opt,name=type,enum=google.tracing.v1.Span_TimeEvent_NetworkEvent_Type" json:"type,omitempty"` - // Every message has an identifier, that must be different from all the - // network messages in this span. - // This is very important when the request/response are streamed. - MessageId uint64 `protobuf:"varint,3,opt,name=message_id,json=messageId" json:"message_id,omitempty"` - // Number of bytes send/receive. - MessageSize uint64 `protobuf:"varint,4,opt,name=message_size,json=messageSize" json:"message_size,omitempty"` -} - -func (m *Span_TimeEvent_NetworkEvent) Reset() { *m = Span_TimeEvent_NetworkEvent{} } -func (m *Span_TimeEvent_NetworkEvent) String() string { return proto.CompactTextString(m) } -func (*Span_TimeEvent_NetworkEvent) ProtoMessage() {} -func (*Span_TimeEvent_NetworkEvent) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{4, 0, 1} -} - -func (m *Span_TimeEvent_NetworkEvent) GetKernelTime() *google_protobuf1.Timestamp { - if m != nil { - return m.KernelTime - } - return nil -} - -func (m *Span_TimeEvent_NetworkEvent) GetType() Span_TimeEvent_NetworkEvent_Type { - if m != nil { - return m.Type - } - return Span_TimeEvent_NetworkEvent_UNSPECIFIED -} - -func (m *Span_TimeEvent_NetworkEvent) GetMessageId() uint64 { - if m != nil { - return m.MessageId - } - return 0 -} - -func (m *Span_TimeEvent_NetworkEvent) GetMessageSize() uint64 { - if m != nil { - return m.MessageSize - } - return 0 -} - -// Link one span with another which may be in a different Trace. Used (for -// example) in batching operations, where a single batch handler processes -// multiple requests from different traces. -type Span_Link struct { - // The trace and span identifier of the linked span. - TraceId *TraceId `protobuf:"bytes,1,opt,name=trace_id,json=traceId" json:"trace_id,omitempty"` - SpanId uint64 `protobuf:"fixed64,2,opt,name=span_id,json=spanId" json:"span_id,omitempty"` - Type Span_Link_Type `protobuf:"varint,3,opt,name=type,enum=google.tracing.v1.Span_Link_Type" json:"type,omitempty"` -} - -func (m *Span_Link) Reset() { *m = Span_Link{} } -func (m *Span_Link) String() string { return proto.CompactTextString(m) } -func (*Span_Link) ProtoMessage() {} -func (*Span_Link) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 1} } - -func (m *Span_Link) GetTraceId() *TraceId { - if m != nil { - return m.TraceId - } - return nil -} - -func (m *Span_Link) GetSpanId() uint64 { - if m != nil { - return m.SpanId - } - return 0 -} - -func (m *Span_Link) GetType() Span_Link_Type { - if m != nil { - return m.Type - } - return Span_Link_UNSPECIFIED -} - -// A trace describes how long it takes for an application to perform some -// operations. It consists of a tree of spans, each of which contains details -// about an operation with time information and operation details. -type Trace struct { - // Globally unique identifier for the trace. Common to all the spans. - TraceId *TraceId `protobuf:"bytes,1,opt,name=trace_id,json=traceId" json:"trace_id,omitempty"` - // Collection of spans in the trace. The root span has parent_id == 0. - Spans []*Span `protobuf:"bytes,2,rep,name=spans" json:"spans,omitempty"` -} - -func (m *Trace) Reset() { *m = Trace{} } -func (m *Trace) String() string { return proto.CompactTextString(m) } -func (*Trace) ProtoMessage() {} -func (*Trace) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } - -func (m *Trace) GetTraceId() *TraceId { - if m != nil { - return m.TraceId - } - return nil -} - -func (m *Trace) GetSpans() []*Span { - if m != nil { - return m.Spans - } - return nil -} - -func init() { - proto.RegisterType((*TraceId)(nil), "google.tracing.v1.TraceId") - proto.RegisterType((*Module)(nil), "google.tracing.v1.Module") - proto.RegisterType((*StackTrace)(nil), "google.tracing.v1.StackTrace") - proto.RegisterType((*StackTrace_StackFrame)(nil), "google.tracing.v1.StackTrace.StackFrame") - proto.RegisterType((*LabelValue)(nil), "google.tracing.v1.LabelValue") - proto.RegisterType((*Span)(nil), "google.tracing.v1.Span") - proto.RegisterType((*Span_TimeEvent)(nil), "google.tracing.v1.Span.TimeEvent") - proto.RegisterType((*Span_TimeEvent_Annotation)(nil), "google.tracing.v1.Span.TimeEvent.Annotation") - proto.RegisterType((*Span_TimeEvent_NetworkEvent)(nil), "google.tracing.v1.Span.TimeEvent.NetworkEvent") - proto.RegisterType((*Span_Link)(nil), "google.tracing.v1.Span.Link") - proto.RegisterType((*Trace)(nil), "google.tracing.v1.Trace") - proto.RegisterEnum("google.tracing.v1.Span_TimeEvent_NetworkEvent_Type", Span_TimeEvent_NetworkEvent_Type_name, Span_TimeEvent_NetworkEvent_Type_value) - proto.RegisterEnum("google.tracing.v1.Span_Link_Type", Span_Link_Type_name, Span_Link_Type_value) -} - -func init() { proto.RegisterFile("google/tracing/trace.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 1102 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xdd, 0x6e, 0x1a, 0x47, - 0x14, 0x66, 0xf9, 0x59, 0xe0, 0x2c, 0x76, 0xc8, 0x54, 0xad, 0x09, 0x8d, 0x65, 0x9b, 0xa8, 0x92, - 0xe5, 0x26, 0x8b, 0x82, 0x15, 0xc9, 0x8d, 0xa5, 0xaa, 0xb1, 0x8d, 0x0b, 0x52, 0x8a, 0xd0, 0xe0, - 0x58, 0x55, 0x6f, 0x56, 0xc3, 0xee, 0x18, 0x56, 0x2c, 0xb3, 0xab, 0x9d, 0x81, 0xc6, 0xbe, 0xed, - 0x1b, 0xf4, 0x1d, 0x7a, 0xdb, 0x37, 0xe8, 0x83, 0xb4, 0x4f, 0x53, 0xcd, 0xcf, 0x62, 0xaa, 0xd8, - 0x71, 0x53, 0xa9, 0x57, 0xcc, 0x7c, 0xe7, 0x3b, 0x67, 0xce, 0x7c, 0xe7, 0x9c, 0x59, 0xa0, 0x39, - 0x89, 0xe3, 0x49, 0x44, 0xdb, 0x22, 0x25, 0x7e, 0xc8, 0x26, 0xea, 0x97, 0xba, 0x49, 0x1a, 0x8b, - 0x18, 0x3d, 0xd6, 0x36, 0xd7, 0xd8, 0xdc, 0xe5, 0xcb, 0xe6, 0x53, 0x43, 0x27, 0x49, 0xd8, 0x26, - 0x8c, 0xc5, 0x82, 0x88, 0x30, 0x66, 0x5c, 0x3b, 0x34, 0x77, 0x8c, 0x55, 0xed, 0xc6, 0x8b, 0xab, - 0xb6, 0x08, 0xe7, 0x94, 0x0b, 0x32, 0x4f, 0x0c, 0x61, 0xcb, 0x10, 0xd2, 0xc4, 0x6f, 0x73, 0x41, - 0xc4, 0xc2, 0x78, 0xb6, 0x0e, 0xa0, 0x7c, 0x21, 0x4f, 0xee, 0x07, 0x68, 0x07, 0x9c, 0x29, 0x7d, - 0xef, 0x51, 0xe6, 0xc7, 0x01, 0x0d, 0x1a, 0xd6, 0xae, 0xb5, 0x5f, 0xc5, 0x30, 0xa5, 0xef, 0xbb, - 0x1a, 0x69, 0x1d, 0x83, 0xfd, 0x43, 0x1c, 0x2c, 0x22, 0x8a, 0xbe, 0x00, 0x7b, 0xae, 0x56, 0x86, - 0x65, 0x76, 0xe8, 0x09, 0x54, 0xc6, 0x8b, 0x30, 0x0a, 0xbc, 0x30, 0x68, 0xe4, 0x95, 0xa5, 0xac, - 0xf6, 0xfd, 0xa0, 0xf5, 0x7b, 0x01, 0x60, 0x24, 0x88, 0x3f, 0x53, 0xc7, 0xa1, 0x3e, 0x38, 0x5c, - 0xee, 0xbc, 0xab, 0x94, 0xcc, 0x65, 0x98, 0xc2, 0xbe, 0xd3, 0xd9, 0x77, 0x3f, 0xb8, 0xb8, 0x7b, - 0xeb, 0xa3, 0x97, 0xe7, 0x92, 0x8f, 0x81, 0xaf, 0xd6, 0xe8, 0x05, 0x7c, 0xa6, 0x43, 0x29, 0x09, - 0xbd, 0x29, 0xe1, 0xd3, 0xec, 0xfc, 0x22, 0xae, 0xf3, 0x95, 0x7f, 0x8f, 0xf0, 0x69, 0x3f, 0x68, - 0xfe, 0x96, 0x37, 0x89, 0x68, 0xef, 0x67, 0xb0, 0x71, 0xb5, 0x60, 0xbe, 0x54, 0xd3, 0x63, 0x3a, - 0x15, 0x99, 0x77, 0x2d, 0x03, 0x07, 0x92, 0xf4, 0x1c, 0x50, 0x9c, 0x86, 0x13, 0xef, 0x9f, 0x4c, - 0x7d, 0xc3, 0xba, 0xb4, 0x9c, 0xaf, 0xb3, 0xbf, 0x84, 0xea, 0x55, 0x18, 0x51, 0x4d, 0x2a, 0x28, - 0x52, 0x45, 0x02, 0xca, 0xb8, 0x03, 0x4e, 0x14, 0x32, 0xea, 0xb1, 0xc5, 0x7c, 0x4c, 0xd3, 0x46, - 0x71, 0xd7, 0xda, 0x2f, 0x60, 0x90, 0xd0, 0x40, 0x21, 0x32, 0x21, 0x3f, 0x8e, 0x16, 0x73, 0x96, - 0x51, 0x4a, 0x8a, 0x52, 0xd3, 0xa0, 0x21, 0xbd, 0x06, 0x27, 0x8a, 0x49, 0xe0, 0x99, 0x2a, 0xd8, - 0xbb, 0xd6, 0xbe, 0xd3, 0x79, 0x72, 0x87, 0x7c, 0xba, 0x60, 0x18, 0x24, 0xdb, 0x14, 0xef, 0x2b, - 0xd8, 0xe4, 0xf1, 0x22, 0xf5, 0xa9, 0xb7, 0xa4, 0x29, 0x0f, 0x63, 0xd6, 0x28, 0xab, 0x1c, 0x37, - 0x34, 0x7a, 0xa9, 0xc1, 0xd6, 0x0d, 0xc0, 0x5b, 0x32, 0xa6, 0xd1, 0x25, 0x89, 0x16, 0x52, 0xa6, - 0x1a, 0x17, 0x69, 0xc8, 0x26, 0xde, 0x52, 0xee, 0xb5, 0x4a, 0xbd, 0x1c, 0x76, 0x34, 0xaa, 0x49, - 0xdb, 0x50, 0x0d, 0x99, 0x30, 0x0c, 0xa9, 0x4e, 0xa1, 0x97, 0xc3, 0x95, 0x90, 0x09, 0x6d, 0xde, - 0x01, 0x18, 0xc7, 0x71, 0x64, 0xec, 0x52, 0x98, 0x4a, 0x2f, 0x87, 0xab, 0x12, 0x53, 0x84, 0x93, - 0x32, 0x94, 0x94, 0xad, 0xf5, 0x6b, 0x0d, 0x8a, 0xa3, 0x84, 0x30, 0xb4, 0x09, 0xf9, 0x50, 0xb7, - 0xa2, 0x8d, 0xf3, 0x61, 0x80, 0x10, 0x14, 0xd7, 0xa4, 0x57, 0x6b, 0x29, 0x77, 0x42, 0x52, 0xca, - 0x84, 0xac, 0x7a, 0x41, 0x51, 0x2b, 0x1a, 0xe8, 0x07, 0xe8, 0x0c, 0xea, 0x51, 0xec, 0x93, 0xc8, - 0xe3, 0x82, 0xa4, 0xc2, 0x93, 0x73, 0xa1, 0x34, 0x77, 0x3a, 0xcd, 0x4c, 0xad, 0x6c, 0x68, 0xdc, - 0x8b, 0x6c, 0x68, 0xf0, 0xa6, 0xf2, 0x19, 0x49, 0x17, 0x09, 0xa2, 0xef, 0x40, 0x23, 0x1e, 0x65, - 0x81, 0x8e, 0x51, 0x7a, 0x30, 0x46, 0x4d, 0x79, 0x74, 0x59, 0xa0, 0x22, 0x1c, 0x83, 0x1d, 0x49, - 0x35, 0x79, 0xc3, 0x56, 0xad, 0xfe, 0xec, 0xae, 0x56, 0x4f, 0x08, 0x73, 0x95, 0xe6, 0xbc, 0xcb, - 0x44, 0x7a, 0x8d, 0x8d, 0x0b, 0xfa, 0x36, 0x1b, 0x16, 0xd5, 0xe1, 0xaa, 0x5c, 0x4e, 0x67, 0xfb, - 0xa3, 0xc3, 0x62, 0x26, 0x44, 0x0f, 0xdb, 0x09, 0x38, 0x32, 0x69, 0x8f, 0x2e, 0x29, 0x13, 0xbc, - 0x51, 0x51, 0x19, 0xec, 0xdd, 0x97, 0x81, 0xcc, 0xb7, 0x2b, 0x99, 0x18, 0x44, 0xb6, 0xe4, 0xa8, - 0x03, 0xa5, 0x28, 0x64, 0x33, 0xde, 0xa8, 0x2a, 0xef, 0xa7, 0xf7, 0xe6, 0x1f, 0xb2, 0x19, 0xd6, - 0x54, 0x74, 0x00, 0xb6, 0x7e, 0x6c, 0x1a, 0xa0, 0x52, 0x46, 0x99, 0x53, 0x9a, 0xf8, 0x32, 0x57, - 0xb1, 0xe0, 0xd8, 0x30, 0xd0, 0x01, 0x3c, 0x9e, 0x12, 0xee, 0xa5, 0x74, 0x1e, 0x0b, 0xea, 0xe9, - 0xfa, 0x35, 0x1c, 0xd9, 0x23, 0xf8, 0xd1, 0x94, 0x70, 0xac, 0xf0, 0xa1, 0x82, 0x9b, 0x7f, 0x96, - 0xa0, 0xba, 0xca, 0x12, 0x7d, 0x03, 0xa0, 0x8b, 0xa3, 0x0a, 0x63, 0x3d, 0x58, 0x98, 0xaa, 0x62, - 0xab, 0xaa, 0x0c, 0x00, 0x6e, 0x1f, 0x53, 0xd5, 0x54, 0x4e, 0xe7, 0xf9, 0x83, 0xba, 0xb8, 0x6f, - 0x56, 0x3e, 0xbd, 0x1c, 0x5e, 0x8b, 0x80, 0xde, 0xc1, 0x06, 0xa3, 0xe2, 0xe7, 0x38, 0x9d, 0x69, - 0xad, 0x55, 0x3b, 0x3a, 0x1d, 0xf7, 0xe1, 0x90, 0x03, 0xed, 0xa6, 0x36, 0xbd, 0x1c, 0xae, 0xb1, - 0xb5, 0x7d, 0xf3, 0x2f, 0x0b, 0xe0, 0xf6, 0x4c, 0xb4, 0x0b, 0x4e, 0x40, 0xb9, 0x9f, 0x86, 0x89, - 0x4a, 0x5b, 0x3f, 0x58, 0xeb, 0x10, 0x1a, 0xae, 0xba, 0x2d, 0xaf, 0xaa, 0x75, 0xf4, 0x29, 0x77, - 0xba, 0xab, 0x05, 0x9b, 0x3f, 0x82, 0xb3, 0x06, 0xa3, 0x3a, 0x14, 0x66, 0xf4, 0xda, 0x1c, 0x2d, - 0x97, 0xe8, 0xd0, 0xcc, 0xae, 0x51, 0xf1, 0xae, 0xee, 0xbc, 0x7d, 0x4e, 0xb0, 0xe6, 0xbe, 0xce, - 0x1f, 0x59, 0xcd, 0x5f, 0xf2, 0x50, 0x5b, 0xbf, 0x3d, 0x3a, 0x06, 0x67, 0x46, 0x53, 0x46, 0xff, - 0x75, 0x41, 0x41, 0xd3, 0x55, 0x45, 0xbf, 0x87, 0xa2, 0xb8, 0x4e, 0x74, 0x16, 0x9b, 0x9d, 0xc3, - 0x4f, 0x13, 0xde, 0xbd, 0xb8, 0x4e, 0x28, 0x56, 0x01, 0xd0, 0x36, 0xc0, 0x9c, 0x72, 0x4e, 0x26, - 0x34, 0x7b, 0x56, 0x8a, 0xb8, 0x6a, 0x90, 0x7e, 0x80, 0xf6, 0xa0, 0x96, 0x99, 0x79, 0x78, 0xa3, - 0xdf, 0x94, 0x22, 0x76, 0x0c, 0x36, 0x0a, 0x6f, 0x68, 0xeb, 0x6b, 0x28, 0xca, 0x78, 0xe8, 0x11, - 0x38, 0xef, 0x06, 0xa3, 0x61, 0xf7, 0xb4, 0x7f, 0xde, 0xef, 0x9e, 0xd5, 0x73, 0xa8, 0x02, 0xc5, - 0x51, 0x77, 0x70, 0x51, 0xb7, 0xe4, 0x0a, 0x77, 0x4f, 0x2f, 0xeb, 0xf9, 0xd5, 0xd3, 0xd7, 0xfc, - 0xc3, 0x82, 0xa2, 0x9c, 0x21, 0xf4, 0x0a, 0x2a, 0xfa, 0x83, 0x66, 0x1e, 0xc0, 0x35, 0x0d, 0xd6, - 0x6e, 0x63, 0x3e, 0xde, 0xb8, 0x2c, 0xcc, 0x57, 0x7c, 0x0b, 0xca, 0x3c, 0x21, 0x2c, 0xfb, 0x02, - 0xda, 0xd8, 0x96, 0xdb, 0x7e, 0x80, 0x5e, 0x19, 0x65, 0x0a, 0x4a, 0x99, 0xbd, 0x8f, 0xcd, 0xef, - 0x9a, 0x0e, 0x2d, 0xf7, 0xbe, 0x5b, 0x54, 0xa1, 0x74, 0xda, 0xeb, 0xbf, 0x3d, 0xab, 0x5b, 0x08, - 0xc0, 0x1e, 0xbe, 0xc1, 0xf2, 0x4a, 0xf9, 0xff, 0xaf, 0x51, 0x5a, 0x73, 0x28, 0xe9, 0xe7, 0xec, - 0x3f, 0x2a, 0xf3, 0x02, 0x4a, 0x52, 0x8a, 0x6c, 0x26, 0xb6, 0xee, 0x51, 0x00, 0x6b, 0xd6, 0x89, - 0x07, 0x9f, 0xfb, 0xf1, 0xfc, 0x43, 0xd2, 0x09, 0xa8, 0xc8, 0x43, 0xd9, 0x87, 0x43, 0xeb, 0xa7, - 0x23, 0x43, 0x98, 0xc4, 0x11, 0x61, 0x13, 0x37, 0x4e, 0x27, 0xed, 0x09, 0x65, 0xaa, 0x4b, 0xdb, - 0xda, 0x44, 0x92, 0x90, 0xaf, 0xfe, 0xe6, 0x2d, 0x5f, 0x1e, 0x9b, 0xe5, 0xd8, 0x56, 0xa4, 0xc3, - 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x30, 0x5b, 0x04, 0x0a, 0x0a, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go index 42693d686..485bf0066 100644 --- a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go +++ b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go @@ -228,12 +228,6 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // // Note that oneof type names ("test_oneof" in this case) cannot be used in // paths. -// -// ## Field Mask Verification -// -// The implementation of the all the API methods, which have any FieldMask type -// field in the request, should verify the included field paths, and return -// `INVALID_ARGUMENT` error if any path is duplicated or unmappable. type FieldMask struct { // The set of field mask paths. Paths []string `protobuf:"bytes,1,rep,name=paths" json:"paths,omitempty"`