From 1dcc93b32bed46813d2719ac70189ecfd85d134b Mon Sep 17 00:00:00 2001 From: Travis Reeder Date: Wed, 12 Jul 2017 14:18:01 -0700 Subject: [PATCH] Updated dependencies --- glide.lock | 14 +- glide.yaml | 3 + vendor/github.com/docker/distribution/AUTHORS | 35 - .../docker/distribution/BUILDING.md | 10 +- .../docker/distribution/CHANGELOG.md | 83 +- .../github.com/docker/distribution/Dockerfile | 5 +- .../docker/distribution/Godeps/Godeps.json | 458 +++ .../docker/distribution/Godeps/Readme | 5 + .../docker/distribution/Jenkinsfile | 8 + .../github.com/docker/distribution/Makefile | 41 +- .../github.com/docker/distribution/README.md | 7 +- .../docker/distribution/RELEASE-CHECKLIST.md | 44 - .../github.com/docker/distribution/blobs.go | 4 +- .../github.com/docker/distribution/circle.yml | 17 +- .../docker/distribution/cmd/digest/main.go | 4 +- .../configuration/configuration.go | 27 +- .../docker/distribution/context/doc.go | 2 +- .../distribution/contrib/compose/README.md | 6 +- .../docker-integration/docker-compose.yml | 2 +- .../contrib/docker-integration/golem.conf | 2 +- .../contrib/docker-integration/helpers.bash | 34 +- .../docker-integration/malevolent.bats | 6 +- .../contrib/docker-integration/plugins.bats | 103 - .../docker-integration/run_multiversion.sh | 5 +- .../contrib/docker-integration/tls.bats | 19 +- .../contrib/docker-integration/token.bats | 16 +- .../tokenserver-oauth/Dockerfile | 2 +- .../docker-integration/tokenserver/Dockerfile | 2 +- .../distribution/contrib/token-server/main.go | 54 +- .../contrib/token-server/token.go | 34 +- .../docker/distribution/digest/digest.go | 139 + .../docker/distribution/digest/digest_test.go | 82 + .../docker/distribution/digest/digester.go | 155 + .../digester_resumable_test.go | 5 +- .../docker/distribution/digest/doc.go | 42 + .../distribution/{digestset => digest}/set.go | 30 +- .../{digestset => digest}/set_test.go | 25 +- .../docker/distribution/digest/verifiers.go | 44 + .../distribution/digest/verifiers_test.go | 49 + .../docker/distribution/docs/Dockerfile | 9 + .../docker/distribution/docs/Makefile | 38 + .../docker/distribution/docs/README.md | 16 - .../docker/distribution/docs/architecture.md | 8 +- .../docker/distribution/docs/compatibility.md | 84 + .../docker/distribution/docs/configuration.md | 2655 +++++++++++------ .../docker/distribution/docs/deploying.md | 237 ++ .../docker/distribution/docs/deprecated.md | 27 + .../distribution/docs/garbage-collection.md | 137 + .../docker/distribution/docs/glossary.md | 70 + .../docker/distribution/docs/help.md | 24 + .../docs/images/notifications.gliffy | 1 + .../docs/images/notifications.png | Bin 0 -> 37836 bytes .../docs/images/notifications.svg | 1 + .../docs/images/v2-registry-auth.png | Bin 0 -> 12590 bytes .../docker/distribution/docs/index.md | 67 + .../docker/distribution/docs/insecure.md | 116 + .../docker/distribution/docs/introduction.md | 55 + .../docker/distribution/docs/menu.md | 23 + .../docker/distribution/docs/migration.md | 30 + .../docker/distribution/docs/notifications.md | 350 +++ .../distribution/docs/recipes/apache.md | 215 ++ .../docker/distribution/docs/recipes/index.md | 37 + .../docker/distribution/docs/recipes/menu.md | 21 + .../distribution/docs/recipes/mirror.md | 74 + .../docker/distribution/docs/recipes/nginx.md | 190 ++ .../docs/recipes/osx-setup-guide.md | 81 + .../recipes/osx/com.docker.registry.plist | 42 + .../distribution/docs/recipes/osx/config.yml | 16 + .../docker/distribution/docs/spec/api.md | 18 +- .../docker/distribution/docs/spec/api.md.tmpl | 18 +- .../distribution/docs/spec/auth/index.md | 15 +- .../docker/distribution/docs/spec/auth/jwt.md | 15 +- .../distribution/docs/spec/auth/oauth.md | 16 +- .../distribution/docs/spec/auth/scope.md | 31 +- .../distribution/docs/spec/auth/token.md | 19 +- .../distribution/docs/spec/implementations.md | 8 +- .../docker/distribution/docs/spec/index.md | 15 +- .../docker/distribution/docs/spec/json.md | 16 +- .../distribution/docs/spec/manifest-v2-1.md | 16 +- .../distribution/docs/spec/manifest-v2-2.md | 25 +- .../docker/distribution/docs/spec/menu.md | 20 +- .../docs/storage-drivers/azure.md | 78 + .../docs/storage-drivers/filesystem.md | 24 + .../distribution/docs/storage-drivers/gcs.md | 78 + .../docs/storage-drivers/index.md | 66 + .../docs/storage-drivers/inmemory.md | 23 + .../distribution/docs/storage-drivers/menu.md | 13 + .../distribution/docs/storage-drivers/oss.md | 126 + .../distribution/docs/storage-drivers/s3.md | 320 ++ .../docs/storage-drivers/swift.md | 268 ++ .../github.com/docker/distribution/errors.go | 4 +- .../distribution/health/checks/checks.go | 15 +- .../docker/distribution/health/doc.go | 8 +- .../docker/distribution/health/health_test.go | 4 +- .../manifest/manifestlist/manifestlist.go | 4 +- .../manifest/schema1/config_builder.go | 16 +- .../manifest/schema1/config_builder_test.go | 8 +- .../distribution/manifest/schema1/manifest.go | 2 +- .../manifest/schema1/reference_builder.go | 2 +- .../schema1/reference_builder_test.go | 4 +- .../distribution/manifest/schema2/builder.go | 32 +- .../manifest/schema2/builder_test.go | 10 +- .../distribution/manifest/schema2/manifest.go | 17 +- .../manifest/schema2/manifest_test.go | 18 +- .../docker/distribution/manifests.go | 34 +- .../distribution/notifications/bridge.go | 2 +- .../distribution/notifications/bridge_test.go | 14 +- .../distribution/notifications/endpoint.go | 2 +- .../distribution/notifications/event.go | 4 +- .../distribution/notifications/http_test.go | 20 +- .../distribution/notifications/listener.go | 2 +- .../notifications/listener_test.go | 4 +- .../notifications/metrics_test.go | 28 - .../distribution/notifications/sinks.go | 2 +- .../docker/distribution/reference/helpers.go | 42 - .../distribution/reference/normalize.go | 170 -- .../distribution/reference/normalize_test.go | 625 ---- .../distribution/reference/reference.go | 205 +- .../distribution/reference/reference_test.go | 193 +- .../docker/distribution/reference/regexp.go | 41 +- .../distribution/reference/regexp_test.go | 68 +- .../docker/distribution/registry.go | 2 +- .../registry/api/v2/descriptors.go | 2 +- .../registry/api/v2/headerparser.go | 161 - .../registry/api/v2/headerparser_test.go | 161 - .../distribution/registry/api/v2/urls.go | 53 +- .../distribution/registry/api/v2/urls_test.go | 282 +- .../docker/distribution/registry/auth/auth.go | 38 +- .../registry/auth/silly/access_test.go | 2 +- .../registry/auth/token/accesscontroller.go | 2 - .../distribution/registry/auth/token/token.go | 24 - .../registry/auth/token/token_test.go | 23 +- .../client/auth/{challenge => }/addr.go | 2 +- .../auth/{challenge => }/authchallenge.go | 20 +- .../{challenge => }/authchallenge_test.go | 6 +- .../registry/client/auth/session.go | 20 +- .../registry/client/auth/session_test.go | 23 +- .../distribution/registry/client/errors.go | 42 +- .../registry/client/repository.go | 10 +- .../registry/client/repository_test.go | 34 +- .../registry/client/transport/http_reader.go | 1 - .../registry/handlers/api_test.go | 160 +- .../distribution/registry/handlers/app.go | 81 +- .../registry/handlers/app_test.go | 6 +- .../distribution/registry/handlers/blob.go | 2 +- .../registry/handlers/blobupload.go | 8 +- .../distribution/registry/handlers/context.go | 64 +- .../handlers/{manifests.go => images.go} | 144 +- .../distribution/registry/proxy/proxyauth.go | 41 +- .../registry/proxy/proxyblobstore.go | 2 +- .../registry/proxy/proxyblobstore_test.go | 4 +- .../registry/proxy/proxymanifeststore.go | 2 +- .../registry/proxy/proxymanifeststore_test.go | 11 +- .../registry/proxy/proxyregistry.go | 11 +- .../docker/distribution/registry/registry.go | 2 +- .../registry/storage/blob_test.go | 16 +- .../registry/storage/blobserver.go | 2 +- .../registry/storage/blobstore.go | 4 +- .../registry/storage/blobwriter.go | 25 +- .../storage/cache/cachecheck/suite.go | 14 +- .../cache/cachedblobdescriptorstore.go | 2 +- .../registry/storage/cache/memory/memory.go | 4 +- .../registry/storage/cache/redis/redis.go | 4 +- .../registry/storage/catalog_test.go | 8 +- .../registry/storage/driver/azure/azure.go | 59 +- .../registry/storage/driver/base/base.go | 2 +- .../registry/storage/driver/base/regulator.go | 6 +- .../storage/driver/base/regulator_test.go | 67 - .../registry/storage/driver/oss/oss.go | 33 +- .../registry/storage/driver/s3-aws/s3.go | 89 +- .../registry/storage/driver/s3-aws/s3_test.go | 6 +- .../storage/driver/s3-aws/s3_v2_signer.go | 16 +- .../registry/storage/driver/s3-goamz/s3.go | 49 +- .../registry/storage/driver/swift/swift.go | 6 +- .../storage/driver/testsuites/testsuites.go | 49 +- .../registry/storage/filereader_test.go | 8 +- .../registry/storage/garbagecollect.go | 37 +- .../registry/storage/garbagecollect_test.go | 6 +- .../registry/storage/linkedblobstore.go | 4 +- .../registry/storage/linkedblobstore_test.go | 8 +- .../registry/storage/manifestlisthandler.go | 2 +- .../registry/storage/manifeststore.go | 2 +- .../registry/storage/manifeststore_test.go | 4 +- .../distribution/registry/storage/paths.go | 2 +- .../registry/storage/paths_test.go | 2 +- .../registry/storage/purgeuploads.go | 6 +- .../storage/schema2manifesthandler.go | 90 +- .../storage/schema2manifesthandler_test.go | 5 +- .../registry/storage/signedmanifesthandler.go | 2 +- .../distribution/registry/storage/tagstore.go | 2 +- .../registry/storage/tagstore_test.go | 2 +- .../distribution/registry/storage/vacuum.go | 4 +- .../docker/distribution/testutil/manifests.go | 4 +- .../docker/distribution/testutil/tarfile.go | 2 +- .../docker/distribution/vendor.conf | 43 - .../docker/distribution/version/version.go | 2 +- vendor/github.com/gorilla/context/.travis.yml | 22 +- vendor/github.com/gorilla/context/README.md | 3 + .../gorilla/context/context_test.go | 4 +- vendor/github.com/gorilla/context/doc.go | 6 + vendor/github.com/gorilla/mux/.travis.yml | 24 +- vendor/github.com/gorilla/mux/README.md | 300 +- vendor/github.com/gorilla/mux/bench_test.go | 28 + .../github.com/gorilla/mux/context_gorilla.go | 26 + .../gorilla/mux/context_gorilla_test.go | 40 + .../github.com/gorilla/mux/context_native.go | 24 + .../gorilla/mux/context_native_test.go | 32 + vendor/github.com/gorilla/mux/doc.go | 56 +- vendor/github.com/gorilla/mux/mux.go | 257 +- vendor/github.com/gorilla/mux/mux_test.go | 1068 +++++-- vendor/github.com/gorilla/mux/old_test.go | 18 +- vendor/github.com/gorilla/mux/regexp.go | 122 +- vendor/github.com/gorilla/mux/route.go | 144 +- .../opencontainers/go-digest/.mailmap | 1 + .../opencontainers/go-digest/.pullapprove.yml | 12 + .../opencontainers/go-digest/.travis.yml | 4 + .../opencontainers/go-digest/CONTRIBUTING.md | 72 + .../opencontainers/go-digest/LICENSE.code | 191 ++ .../opencontainers/go-digest/LICENSE.docs | 425 +++ .../opencontainers/go-digest/MAINTAINERS | 9 + .../opencontainers/go-digest/README.md | 104 + .../opencontainers/go-digest/algorithm.go | 192 ++ .../go-digest/algorithm_test.go | 114 + .../opencontainers/go-digest/digest.go | 156 + .../opencontainers/go-digest/digest_test.go | 134 + .../opencontainers/go-digest/digester.go | 39 + .../opencontainers/go-digest/doc.go | 56 + .../opencontainers/go-digest/verifiers.go | 45 + .../go-digest/verifiers_test.go | 80 + 229 files changed, 10186 insertions(+), 4841 deletions(-) create mode 100644 vendor/github.com/docker/distribution/Godeps/Godeps.json create mode 100644 vendor/github.com/docker/distribution/Godeps/Readme create mode 100644 vendor/github.com/docker/distribution/Jenkinsfile delete mode 100644 vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md delete mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/plugins.bats create mode 100644 vendor/github.com/docker/distribution/digest/digest.go create mode 100644 vendor/github.com/docker/distribution/digest/digest_test.go create mode 100644 vendor/github.com/docker/distribution/digest/digester.go rename vendor/github.com/docker/distribution/{registry/storage => digest}/digester_resumable_test.go (83%) create mode 100644 vendor/github.com/docker/distribution/digest/doc.go rename vendor/github.com/docker/distribution/{digestset => digest}/set.go (90%) rename vendor/github.com/docker/distribution/{digestset => digest}/set_test.go (93%) create mode 100644 vendor/github.com/docker/distribution/digest/verifiers.go create mode 100644 vendor/github.com/docker/distribution/digest/verifiers_test.go create mode 100644 vendor/github.com/docker/distribution/docs/Dockerfile create mode 100644 vendor/github.com/docker/distribution/docs/Makefile delete mode 100644 vendor/github.com/docker/distribution/docs/README.md create mode 100644 vendor/github.com/docker/distribution/docs/compatibility.md create mode 100644 vendor/github.com/docker/distribution/docs/deploying.md create mode 100644 vendor/github.com/docker/distribution/docs/deprecated.md create mode 100644 vendor/github.com/docker/distribution/docs/garbage-collection.md create mode 100644 vendor/github.com/docker/distribution/docs/glossary.md create mode 100644 vendor/github.com/docker/distribution/docs/help.md create mode 100644 vendor/github.com/docker/distribution/docs/images/notifications.gliffy create mode 100644 vendor/github.com/docker/distribution/docs/images/notifications.png create mode 100644 vendor/github.com/docker/distribution/docs/images/notifications.svg create mode 100644 vendor/github.com/docker/distribution/docs/images/v2-registry-auth.png create mode 100644 vendor/github.com/docker/distribution/docs/index.md create mode 100644 vendor/github.com/docker/distribution/docs/insecure.md create mode 100644 vendor/github.com/docker/distribution/docs/introduction.md create mode 100644 vendor/github.com/docker/distribution/docs/menu.md create mode 100644 vendor/github.com/docker/distribution/docs/migration.md create mode 100644 vendor/github.com/docker/distribution/docs/notifications.md create mode 100644 vendor/github.com/docker/distribution/docs/recipes/apache.md create mode 100644 vendor/github.com/docker/distribution/docs/recipes/index.md create mode 100644 vendor/github.com/docker/distribution/docs/recipes/menu.md create mode 100644 vendor/github.com/docker/distribution/docs/recipes/mirror.md create mode 100644 vendor/github.com/docker/distribution/docs/recipes/nginx.md create mode 100644 vendor/github.com/docker/distribution/docs/recipes/osx-setup-guide.md create mode 100644 vendor/github.com/docker/distribution/docs/recipes/osx/com.docker.registry.plist create mode 100644 vendor/github.com/docker/distribution/docs/recipes/osx/config.yml create mode 100644 vendor/github.com/docker/distribution/docs/storage-drivers/azure.md create mode 100644 vendor/github.com/docker/distribution/docs/storage-drivers/filesystem.md create mode 100644 vendor/github.com/docker/distribution/docs/storage-drivers/gcs.md create mode 100644 vendor/github.com/docker/distribution/docs/storage-drivers/index.md create mode 100644 vendor/github.com/docker/distribution/docs/storage-drivers/inmemory.md create mode 100644 vendor/github.com/docker/distribution/docs/storage-drivers/menu.md create mode 100644 vendor/github.com/docker/distribution/docs/storage-drivers/oss.md create mode 100644 vendor/github.com/docker/distribution/docs/storage-drivers/s3.md create mode 100644 vendor/github.com/docker/distribution/docs/storage-drivers/swift.md delete mode 100644 vendor/github.com/docker/distribution/notifications/metrics_test.go delete mode 100644 vendor/github.com/docker/distribution/reference/helpers.go delete mode 100644 vendor/github.com/docker/distribution/reference/normalize.go delete mode 100644 vendor/github.com/docker/distribution/reference/normalize_test.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/v2/headerparser.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/v2/headerparser_test.go rename vendor/github.com/docker/distribution/registry/client/auth/{challenge => }/addr.go (97%) rename vendor/github.com/docker/distribution/registry/client/auth/{challenge => }/authchallenge.go (91%) rename vendor/github.com/docker/distribution/registry/client/auth/{challenge => }/authchallenge_test.go (97%) rename vendor/github.com/docker/distribution/registry/handlers/{manifests.go => images.go} (72%) delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/base/regulator_test.go delete mode 100644 vendor/github.com/docker/distribution/vendor.conf create mode 100644 vendor/github.com/gorilla/mux/context_gorilla.go create mode 100644 vendor/github.com/gorilla/mux/context_gorilla_test.go create mode 100644 vendor/github.com/gorilla/mux/context_native.go create mode 100644 vendor/github.com/gorilla/mux/context_native_test.go create mode 100644 vendor/github.com/opencontainers/go-digest/.mailmap create mode 100644 vendor/github.com/opencontainers/go-digest/.pullapprove.yml create mode 100644 vendor/github.com/opencontainers/go-digest/.travis.yml create mode 100644 vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md create mode 100644 vendor/github.com/opencontainers/go-digest/LICENSE.code create mode 100644 vendor/github.com/opencontainers/go-digest/LICENSE.docs create mode 100644 vendor/github.com/opencontainers/go-digest/MAINTAINERS create mode 100644 vendor/github.com/opencontainers/go-digest/README.md create mode 100644 vendor/github.com/opencontainers/go-digest/algorithm.go create mode 100644 vendor/github.com/opencontainers/go-digest/algorithm_test.go create mode 100644 vendor/github.com/opencontainers/go-digest/digest.go create mode 100644 vendor/github.com/opencontainers/go-digest/digest_test.go create mode 100644 vendor/github.com/opencontainers/go-digest/digester.go create mode 100644 vendor/github.com/opencontainers/go-digest/doc.go create mode 100644 vendor/github.com/opencontainers/go-digest/verifiers.go create mode 100644 vendor/github.com/opencontainers/go-digest/verifiers_test.go diff --git a/glide.lock b/glide.lock index 329589e79..e9ea224cb 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 098ae92d825f7f80973ee99791e9e3f968f6f453b1da855d89f36cdbcdc27c6f -updated: 2017-07-06T18:45:39.5860504-07:00 +hash: 4db824e9063d9008f1a1716a8ffb6bfac74dce1abb4089f42e28c985b88fa74b +updated: 2017-07-12T14:14:05.525802785-07:00 imports: - name: code.cloudfoundry.org/bytefmt version: f4415fafc5619dd75599a54a7c91fb3948ad58bd @@ -40,7 +40,7 @@ imports: subpackages: - cli/config/configfile - name: github.com/docker/distribution - version: f86db6b22663a27ba4d278220b7e34be528b1e79 + version: 99cb7c0946d2f5a38015443e515dc916295064d7 subpackages: - context - digest @@ -140,9 +140,9 @@ imports: subpackages: - query - name: github.com/gorilla/context - version: 14f550f51af52180c2eefed15e5fd18d63c0a64a + version: 08b5f424b9271eedf6f9f0ce86cb9396ed337a42 - name: github.com/gorilla/mux - version: e444e69cbd2e2e3e0749a2f3c717cec491552bbf + version: 0a192a193177452756c362c20087ddafcf6829c4 - name: github.com/hashicorp/go-cleanhttp version: 3573b8b52aa7b37b9358d966a898feb387f62437 - name: github.com/hashicorp/hcl @@ -196,6 +196,8 @@ imports: version: d0303fe809921458f417bcf828397a65db30a7e4 - name: github.com/Nvveen/Gotty version: cd527374f1e5bff4938207604a14f2e38a9cf512 +- name: github.com/opencontainers/go-digest + version: 279bed98673dd5bef374d3b6e4b09e2af76183bf - name: github.com/opencontainers/runc version: ea35825a6350511ab93fe24e69c0723d6728616d subpackages: @@ -211,6 +213,8 @@ imports: version: b938d81255b5473c57635324295cb0fe398c7a58 - name: github.com/PuerkitoBio/urlesc version: bbf7a2afc14f93e1e0a5c06df524fbd75e5031e5 +- name: github.com/sirupsen/logrus + version: ba1b36c82c5e05c4f912a88eab0dcd91a171688f - name: github.com/Sirupsen/logrus version: ba1b36c82c5e05c4f912a88eab0dcd91a171688f repo: https://github.com/sirupsen/logrus diff --git a/glide.yaml b/glide.yaml index 3b9db327f..50066b91c 100644 --- a/glide.yaml +++ b/glide.yaml @@ -70,3 +70,6 @@ import: - package: github.com/mattn/go-sqlite3 testImport: - package: github.com/vrischmann/envconfig +- package: github.com/opencontainers/go-digest + branch: master + \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/AUTHORS b/vendor/github.com/docker/distribution/AUTHORS index 252ff8aa2..9e80e062b 100644 --- a/vendor/github.com/docker/distribution/AUTHORS +++ b/vendor/github.com/docker/distribution/AUTHORS @@ -1,8 +1,6 @@ -a-palchikov Aaron Lehmann Aaron Schlesinger Aaron Vinson -Adam Duke Adam Enger Adrian Mouat Ahmet Alp Balkan @@ -21,7 +19,6 @@ Anis Elleuch Anton Tiurin Antonio Mercado Antonio Murdaca -Anusha Ragunathan Arien Holthuizen Arnaud Porterie Arthur Baars @@ -29,16 +26,12 @@ Asuka Suzuki Avi Miller Ayose Cazorla BadZen -Ben Bodenmiller Ben Firshman bin liu Brian Bland burnettk Carson A -Cezar Sa Espinola -Charles Smith Chris Dillon -cuiwei13 cyli Daisuke Fujita Daniel Huhn @@ -55,14 +48,11 @@ Diogo Mónica DJ Enriquez Donald Huang Doug Davis -Edgar Lee Eric Yang -Fabio Berchtold Fabio Huser farmerworking Felix Yan Florentin Raud -Frank Chen Frederick F. Kautz IV gabriell nascimento Gleb Schukin @@ -74,23 +64,16 @@ HuKeping Ian Babrou igayoso Jack Griffin -James Findley Jason Freidman -Jason Heiss Jeff Nickoloff -Jess Frazelle Jessie Frazelle jhaohai Jianqing Wang -Jihoon Chung -Joao Fernandes -John Mulhausen John Starks Jon Johnson Jon Poler Jonathan Boulle Jordan Liggitt -Josh Chorlton Josh Hawn Julien Fernandez Ke Xu @@ -101,30 +84,22 @@ Kenny Leung Li Yi Liu Hua liuchang0812 -Lloyd Ramey Louis Kottmann Luke Carpenter -Marcus Martins Mary Anthony Matt Bentley Matt Duch Matt Moore Matt Robenolt -Matthew Green Michael Prokop Michal Minar -Michal Minář -Mike Brown Miquel Sabaté -Misty Stanley-Jones -Misty Stanley-Jones Morgan Bauer moxiegirl Nathan Sullivan nevermosby Nghia Tran Nikita Tarasov -Noah Treuhaft Nuutti Kotivuori Oilbeater Olivier Gambier @@ -133,23 +108,17 @@ Omer Cohen Patrick Devine Phil Estes Philip Misiowiec -Pierre-Yves Ritschard -Qiao Anran -Randy Barlow Richard Scothern Rodolfo Carvalho Rusty Conover Sean Boran Sebastiaan van Stijn -Sebastien Coavoux Serge Dubrouski Sharif Nassar Shawn Falkner-Horine Shreyas Karnik Simon Thulbourn -spacexnice Spencer Rinehart -Stan Hu Stefan Majewsky Stefan Weil Stephen J Day @@ -165,8 +134,6 @@ Tonis Tiigi Tony Holdstock-Brown Trevor Pounds Troels Thomsen -Victor Vieux -Victoria Bialas Vincent Batts Vincent Demeester Vincent Giersch @@ -175,8 +142,6 @@ weiyuan.yl xg.song xiekeyang Yann ROBERT -yaoyao.xyy -yuexiao-wang yuzou zhouhaibing089 姜继忠 diff --git a/vendor/github.com/docker/distribution/BUILDING.md b/vendor/github.com/docker/distribution/BUILDING.md index d52ed0d94..d9577022b 100644 --- a/vendor/github.com/docker/distribution/BUILDING.md +++ b/vendor/github.com/docker/distribution/BUILDING.md @@ -11,7 +11,7 @@ Most people should use the [official Registry docker image](https://hub.docker.c People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. -OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md). +OS X users who want to run natively can do so following [the instructions here](osx-setup-guide.md). ### Gotchas @@ -71,7 +71,9 @@ commands, such as `go test`, should work per package (please see A `Makefile` has been provided as a convenience to support repeatable builds. Please install the following into `GOPATH` for it to work: - go get github.com/golang/lint/golint + go get github.com/tools/godep github.com/golang/lint/golint + +**TODO(stevvooe):** Add a `make setup` command to Makefile to run this. Have to think about how to interact with Godeps properly. Once these commands are available in the `GOPATH`, run `make` to get a full build: @@ -103,8 +105,8 @@ build: + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template + binaries -The above provides a repeatable build using the contents of the vendor -directory. This includes formatting, vetting, linting, building, +The above provides a repeatable build using the contents of the vendored +Godeps directory. This includes formatting, vetting, linting, building, testing and generating tagged binaries. We can verify this worked by running the registry binary generated in the "./bin" directory: diff --git a/vendor/github.com/docker/distribution/CHANGELOG.md b/vendor/github.com/docker/distribution/CHANGELOG.md index e7b16b3c2..3445c090c 100644 --- a/vendor/github.com/docker/distribution/CHANGELOG.md +++ b/vendor/github.com/docker/distribution/CHANGELOG.md @@ -1,82 +1,9 @@ # Changelog -## 2.6.0 (2017-01-18) - -#### Storage -- S3: fixed bug in delete due to read-after-write inconsistency -- S3: allow EC2 IAM roles to be used when authorizing region endpoints -- S3: add Object ACL Support -- S3: fix delete method's notion of subpaths -- S3: use multipart upload API in `Move` method for performance -- S3: add v2 signature signing for legacy S3 clones -- Swift: add simple heuristic to detect incomplete DLOs during read ops -- Swift: support different user and tenant domains -- Swift: bulk deletes in chunks -- Aliyun OSS: fix delete method's notion of subpaths -- Aliyun OSS: optimize data copy after upload finishes -- Azure: close leaking response body -- Fix storage drivers dropping non-EOF errors when listing repositories -- Compare path properly when listing repositories in catalog -- Add a foreign layer URL host whitelist -- Improve catalog enumerate runtime - -#### Registry -- Export `storage.CreateOptions` in top-level package -- Enable notifications to endpoints that use self-signed certificates -- Properly validate multi-URL foreign layers -- Add control over validation of URLs in pushed manifests -- Proxy mode: fix socket leak when pull is cancelled -- Tag service: properly handle error responses on HEAD request -- Support for custom authentication URL in proxying registry -- Add configuration option to disable access logging -- Add notification filtering by target media type -- Manifest: `References()` returns all children -- Honor `X-Forwarded-Port` and Forwarded headers -- Reference: Preserve tag and digest in With* functions -- Add policy configuration for enforcing repository classes - -#### Client -- Changes the client Tags `All()` method to follow links -- Allow registry clients to connect via HTTP2 -- Better handling of OAuth errors in client - -#### Spec -- Manifest: clarify relationship between urls and foreign layers -- Authorization: add support for repository classes - -#### Manifest -- Override media type returned from `Stat()` for existing manifests -- Add plugin mediatype to distribution manifest - -#### Docs -- Document `TOOMANYREQUESTS` error code -- Document required Let's Encrypt port -- Improve documentation around implementation of OAuth2 -- Improve documentation for configuration - -#### Auth -- Add support for registry type in scope -- Add support for using v2 ping challenges for v1 -- Add leeway to JWT `nbf` and `exp` checking -- htpasswd: dynamically parse htpasswd file -- Fix missing auth headers with PATCH HTTP request when pushing to default port - -#### Dockerfile -- Update to go1.7 -- Reorder Dockerfile steps for better layer caching - -#### Notes - -Documentation has moved to the documentation repository at -`github.com/docker/docker.github.io/tree/master/registry` - -The registry is go 1.7 compliant, and passes newer, more restrictive `lint` and `vet` ing. - - ## 2.5.0 (2016-06-14) -#### Storage -- Ensure uploads directory is cleaned after upload is committed +### Storage +- Ensure uploads directory is cleaned after upload is commited - Add ability to cap concurrent operations in filesystem driver - S3: Add 'us-gov-west-1' to the valid region list - Swift: Handle ceph not returning Last-Modified header for HEAD requests @@ -96,13 +23,13 @@ The registry is go 1.7 compliant, and passes newer, more restrictive `lint` and - Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported - Clarify API documentation around catalog fetch behavior -#### API +### API - Support returning HTTP 429 (Too Many Requests) -#### Documentation +### Documentation - Update auth documentation examples to show "expires in" as int -#### Docker Image +### Docker Image - Use Alpine Linux as base image diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile index ac8dbca2f..bc3c78577 100644 --- a/vendor/github.com/docker/distribution/Dockerfile +++ b/vendor/github.com/docker/distribution/Dockerfile @@ -1,11 +1,8 @@ -FROM golang:1.8-alpine +FROM golang:1.6-alpine ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution ENV DOCKER_BUILDTAGS include_oss include_gcs -ARG GOOS=linux -ARG GOARCH=amd64 - RUN set -ex \ && apk add --no-cache make git diff --git a/vendor/github.com/docker/distribution/Godeps/Godeps.json b/vendor/github.com/docker/distribution/Godeps/Godeps.json new file mode 100644 index 000000000..92252c697 --- /dev/null +++ b/vendor/github.com/docker/distribution/Godeps/Godeps.json @@ -0,0 +1,458 @@ +{ + "ImportPath": "github.com/docker/distribution", + "GoVersion": "go1.6", + "GodepVersion": "v74", + "Packages": [ + "./..." + ], + "Deps": [ + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/storage", + "Comment": "v1.2-334-g95361a2", + "Rev": "95361a2573b1fa92a00c5fc2707a80308483c6f9" + }, + { + "ImportPath": "github.com/Sirupsen/logrus", + "Comment": "v0.7.3", + "Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d" + }, + { + "ImportPath": "github.com/Sirupsen/logrus/formatters/logstash", + "Comment": "v0.7.3", + "Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/awserr", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/client", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/defaults", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/request", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/session", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/signer/v4", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/endpoints", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/waiter", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/cloudfront/sign", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/s3", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/bugsnag/bugsnag-go", + "Comment": "v1.0.2-5-gb1d1530", + "Rev": "b1d153021fcd90ca3f080db36bec96dc690fb274" + }, + { + "ImportPath": "github.com/bugsnag/bugsnag-go/errors", + "Comment": "v1.0.2-5-gb1d1530", + "Rev": "b1d153021fcd90ca3f080db36bec96dc690fb274" + }, + { + "ImportPath": "github.com/bugsnag/osext", + "Rev": "0dd3f918b21bec95ace9dc86c7e70266cfc5c702" + }, + { + "ImportPath": "github.com/bugsnag/panicwrap", + "Comment": "1.0.0-2-ge2c2850", + "Rev": "e2c28503fcd0675329da73bf48b33404db873782" + }, + { + "ImportPath": "github.com/denverdino/aliyungo/common", + "Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502" + }, + { + "ImportPath": "github.com/denverdino/aliyungo/oss", + "Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502" + }, + { + "ImportPath": "github.com/denverdino/aliyungo/util", + "Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502" + }, + { + "ImportPath": "github.com/docker/goamz/aws", + "Rev": "f0a21f5b2e12f83a505ecf79b633bb2035cf6f85" + }, + { + "ImportPath": "github.com/docker/goamz/s3", + "Rev": "f0a21f5b2e12f83a505ecf79b633bb2035cf6f85" + }, + { + "ImportPath": "github.com/docker/libtrust", + "Rev": "fa567046d9b14f6aa788882a950d69651d230b21" + }, + { + "ImportPath": "github.com/garyburd/redigo/internal", + "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" + }, + { + "ImportPath": "github.com/garyburd/redigo/redis", + "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" + }, + { + "ImportPath": "github.com/golang/protobuf/proto", + "Rev": "8d92cf5fc15a4382f8964b08e1f42a75c0591aa3" + }, + { + "ImportPath": "github.com/gorilla/context", + "Rev": "14f550f51af52180c2eefed15e5fd18d63c0a64a" + }, + { + "ImportPath": "github.com/gorilla/handlers", + "Rev": "60c7bfde3e33c201519a200a4507a158cc03a17b" + }, + { + "ImportPath": "github.com/gorilla/mux", + "Rev": "e444e69cbd2e2e3e0749a2f3c717cec491552bbf" + }, + { + "ImportPath": "github.com/inconshreveable/mousetrap", + "Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" + }, + { + "ImportPath": "github.com/mitchellh/mapstructure", + "Rev": "482a9fd5fa83e8c4e7817413b80f3eb8feec03ef" + }, + { + "ImportPath": "github.com/ncw/swift", + "Rev": "ce444d6d47c51d4dda9202cd38f5094dd8e27e86" + }, + { + "ImportPath": "github.com/ncw/swift/swifttest", + "Rev": "ce444d6d47c51d4dda9202cd38f5094dd8e27e86" + }, + { + "ImportPath": "github.com/spf13/cobra", + "Rev": "312092086bed4968099259622145a0c9ae280064" + }, + { + "ImportPath": "github.com/spf13/pflag", + "Rev": "5644820622454e71517561946e3d94b9f9db6842" + }, + { + "ImportPath": "github.com/stevvooe/resumable", + "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" + }, + { + "ImportPath": "github.com/stevvooe/resumable/sha256", + "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" + }, + { + "ImportPath": "github.com/stevvooe/resumable/sha512", + "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" + }, + { + "ImportPath": "github.com/yvasiyarov/go-metrics", + "Rev": "57bccd1ccd43f94bb17fdd8bf3007059b802f85e" + }, + { + "ImportPath": "github.com/yvasiyarov/gorelic", + "Comment": "v0.0.6-8-ga9bba5b", + "Rev": "a9bba5b9ab508a086f9a12b8c51fab68478e2128" + }, + { + "ImportPath": "github.com/yvasiyarov/newrelic_platform_go", + "Rev": "b21fdbd4370f3717f3bbd2bf41c223bc273068e6" + }, + { + "ImportPath": "golang.org/x/crypto/bcrypt", + "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" + }, + { + "ImportPath": "golang.org/x/crypto/blowfish", + "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" + }, + { + "ImportPath": "golang.org/x/crypto/ocsp", + "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" + }, + { + "ImportPath": "golang.org/x/net/context", + "Rev": "4876518f9e71663000c348837735820161a42df7" + }, + { + "ImportPath": "golang.org/x/net/context/ctxhttp", + "Rev": "4876518f9e71663000c348837735820161a42df7" + }, + { + "ImportPath": "golang.org/x/net/http2", + "Rev": "4876518f9e71663000c348837735820161a42df7" + }, + { + "ImportPath": "golang.org/x/net/http2/hpack", + "Rev": "4876518f9e71663000c348837735820161a42df7" + }, + { + "ImportPath": "golang.org/x/net/internal/timeseries", + "Rev": "4876518f9e71663000c348837735820161a42df7" + }, + { + "ImportPath": "golang.org/x/net/trace", + "Rev": "4876518f9e71663000c348837735820161a42df7" + }, + { + "ImportPath": "golang.org/x/oauth2", + "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" + }, + { + "ImportPath": "golang.org/x/oauth2/google", + "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" + }, + { + "ImportPath": "golang.org/x/oauth2/internal", + "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" + }, + { + "ImportPath": "golang.org/x/oauth2/jws", + "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" + }, + { + "ImportPath": "golang.org/x/oauth2/jwt", + "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" + }, + { + "ImportPath": "golang.org/x/time/rate", + "Rev": "a4bde12657593d5e90d0533a3e4fd95e635124cb" + }, + { + "ImportPath": "google.golang.org/api/gensupport", + "Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54" + }, + { + "ImportPath": "google.golang.org/api/googleapi", + "Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54" + }, + { + "ImportPath": "google.golang.org/api/googleapi/internal/uritemplates", + "Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54" + }, + { + "ImportPath": "google.golang.org/api/storage/v1", + "Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54" + }, + { + "ImportPath": "google.golang.org/appengine", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/app_identity", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/base", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/datastore", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/log", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/modules", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/remote_api", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/cloud", + "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" + }, + { + "ImportPath": "google.golang.org/cloud/compute/metadata", + "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" + }, + { + "ImportPath": "google.golang.org/cloud/internal", + "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" + }, + { + "ImportPath": "google.golang.org/cloud/internal/opts", + "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" + }, + { + "ImportPath": "google.golang.org/cloud/storage", + "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" + }, + { + "ImportPath": "google.golang.org/grpc", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/codes", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/credentials", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/grpclog", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/internal", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/metadata", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/naming", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/peer", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/transport", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "gopkg.in/check.v1", + "Rev": "64131543e7896d5bcc6bd5a76287eb75ea96c673" + }, + { + "ImportPath": "gopkg.in/yaml.v2", + "Rev": "bef53efd0c76e49e6de55ead051f886bea7e9420" + }, + { + "ImportPath": "rsc.io/letsencrypt", + "Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c" + }, + { + "ImportPath": "rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme", + "Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c" + }, + { + "ImportPath": "rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1", + "Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c" + }, + { + "ImportPath": "rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher", + "Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c" + }, + { + "ImportPath": "rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json", + "Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c" + } + ] +} diff --git a/vendor/github.com/docker/distribution/Godeps/Readme b/vendor/github.com/docker/distribution/Godeps/Readme new file mode 100644 index 000000000..4cdaa53d5 --- /dev/null +++ b/vendor/github.com/docker/distribution/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/vendor/github.com/docker/distribution/Jenkinsfile b/vendor/github.com/docker/distribution/Jenkinsfile new file mode 100644 index 000000000..fa29520b5 --- /dev/null +++ b/vendor/github.com/docker/distribution/Jenkinsfile @@ -0,0 +1,8 @@ +// Only run on Linux atm +wrappedNode(label: 'docker') { + deleteDir() + stage "checkout" + checkout scm + + documentationChecker("docs") +} diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile index 7c6f9c7a6..a0602d0b2 100644 --- a/vendor/github.com/docker/distribution/Makefile +++ b/vendor/github.com/docker/distribution/Makefile @@ -13,7 +13,7 @@ endif GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" -.PHONY: all build binaries clean dep-restore dep-save dep-validate fmt lint test test-full vet +.PHONY: clean all fmt vet lint build test binaries .DEFAULT: all all: fmt vet lint build test binaries @@ -27,25 +27,22 @@ version/version.go: # Required for go 1.5 to build GO15VENDOREXPERIMENT := 1 -# Go files -GOFILES=$(shell find . -type f -name '*.go') - # Package list -PKGS=$(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/) +PKGS := $(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/) # Resolving binary dependencies for specific targets -GOLINT=$(shell which golint || echo '') -VNDR=$(shell which vndr || echo '') +GOLINT := $(shell which golint || echo '') +GODEP := $(shell which godep || echo '') -${PREFIX}/bin/registry: $(GOFILES) +${PREFIX}/bin/registry: $(wildcard **/*.go) @echo "+ $@" @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry -${PREFIX}/bin/digest: $(GOFILES) +${PREFIX}/bin/digest: $(wildcard **/*.go) @echo "+ $@" @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest -${PREFIX}/bin/registry-api-descriptor-template: $(GOFILES) +${PREFIX}/bin/registry-api-descriptor-template: $(wildcard **/*.go) @echo "+ $@" @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template @@ -86,14 +83,24 @@ clean: @echo "+ $@" @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template" -dep-validate: +dep-save: + @echo "+ $@" + $(if $(GODEP), , \ + $(error Please install godep: go get github.com/tools/godep)) + @$(GODEP) save $(PKGS) + +dep-restore: + @echo "+ $@" + $(if $(GODEP), , \ + $(error Please install godep: go get github.com/tools/godep)) + @$(GODEP) restore -v + +dep-validate: dep-restore @echo "+ $@" - $(if $(VNDR), , \ - $(error Please install vndr: go get github.com/lk4d4/vndr)) @rm -Rf .vendor.bak @mv vendor .vendor.bak - @$(VNDR) + @rm -Rf Godeps + @$(GODEP) save ./... @test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \ - (echo >&2 "+ inconsistent dependencies! what you have in vendor.conf does not match with what you have in vendor" && false) - @rm -Rf vendor - @mv .vendor.bak vendor + (echo >&2 "+ borked dependencies! what you have in Godeps/Godeps.json does not match with what you have in vendor" && false) + @rm -Rf .vendor.bak diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md index 998878850..99fd59457 100644 --- a/vendor/github.com/docker/distribution/README.md +++ b/vendor/github.com/docker/distribution/README.md @@ -19,7 +19,7 @@ This repository contains the following components: | **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | | **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | | **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | -| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. | +| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. | ### How does this integrate with Docker engine? @@ -68,7 +68,7 @@ others, it is not. For example, users with their own software products may want to maintain a registry for private, company images. Also, you may wish to deploy your own image repository for images used to test or in continuous integration. For these -use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md) +use cases and others, [deploying your own registry instance](docs/deploying.md) may be the better choice. ### Migration to Registry 2.0 @@ -76,7 +76,8 @@ may be the better choice. For those who have previously deployed their own registry based on the Registry 1.0 implementation and wish to deploy a Registry 2.0 while retaining images, data migration is required. A tool to assist with migration efforts has been -created. For more information see [docker/migrator](https://github.com/docker/migrator). +created. For more information see [docker/migrator] +(https://github.com/docker/migrator). ## Contribute diff --git a/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md b/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md deleted file mode 100644 index 73eba5a87..000000000 --- a/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md +++ /dev/null @@ -1,44 +0,0 @@ -## Registry Release Checklist - -10. Compile release notes detailing features and since the last release. - - Update the `CHANGELOG.md` file and create a PR to master with the updates. -Once that PR has been approved by maintainers the change may be cherry-picked -to the release branch (new release branches may be forked from this commit). - -20. Update the version file: `https://github.com/docker/distribution/blob/master/version/version.go` - -30. Update the `MAINTAINERS` (if necessary), `AUTHORS` and `.mailmap` files. - -``` -make AUTHORS -``` - -40. Create a signed tag. - - Distribution uses semantic versioning. Tags are of the format -`vx.y.z[-rcn]`. You will need PGP installed and a PGP key which has been added -to your Github account. The comment for the tag should include the release -notes, use previous tags as a guide for formatting consistently. Run -`git tag -s vx.y.z[-rcn]` to create tag and `git -v vx.y.z[-rcn]` to verify tag, -check comment and correct commit hash. - -50. Push the signed tag - -60. Create a new [release](https://github.com/docker/distribution/releases). In the case of a release candidate, tick the `pre-release` checkbox. - -70. Update the registry binary in [distribution library image repo](https://github.com/docker/distribution-library-image) by running the update script and opening a pull request. - -80. Update the official image. Add the new version in the [official images repo](https://github.com/docker-library/official-images) by appending a new version to the `registry/registry` file with the git hash pointed to by the signed tag. Update the major version to point to the latest version and the minor version to point to new patch release if necessary. -e.g. to release `2.3.1` - - `2.3.1 (new)` - - `2.3.0 -> 2.3.0` can be removed - - `2 -> 2.3.1` - - `2.3 -> 2.3.1` - -90. Build a new distribution/registry image on [Docker hub](https://hub.docker.com/u/distribution/dashboard) by adding a new automated build with the new tag and re-building the images. - diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go index 01d309029..1f91ae21e 100644 --- a/vendor/github.com/docker/distribution/blobs.go +++ b/vendor/github.com/docker/distribution/blobs.go @@ -8,8 +8,8 @@ import ( "time" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" - "github.com/opencontainers/go-digest" ) var ( @@ -152,7 +152,7 @@ type BlobProvider interface { // BlobServer can serve blobs via http. type BlobServer interface { - // ServeBlob attempts to serve the blob, identified by dgst, via http. The + // ServeBlob attempts to serve the blob, identifed by dgst, via http. The // service may decide to redirect the client elsewhere or serve the data // directly. // diff --git a/vendor/github.com/docker/distribution/circle.yml b/vendor/github.com/docker/distribution/circle.yml index ddc76c86c..52348a4bc 100644 --- a/vendor/github.com/docker/distribution/circle.yml +++ b/vendor/github.com/docker/distribution/circle.yml @@ -8,7 +8,7 @@ machine: post: # go - - gvm install go1.8 --prefer-binary --name=stable + - gvm install go1.6 --prefer-binary --name=stable environment: # Convenient shortcuts to "common" locations @@ -34,7 +34,7 @@ dependencies: override: # Install dependencies for every copied clone/go version - - gvm use stable && go get github.com/lk4d4/vndr: + - gvm use stable && go get github.com/tools/godep: pwd: $BASE_STABLE post: @@ -49,15 +49,14 @@ test: # - gvm use old && go version - gvm use stable && go version + # todo(richard): replace with a more robust vendoring solution. Removed due to a fundamental disagreement in godep philosophies. # Ensure validation of dependencies - - git fetch origin: - pwd: $BASE_STABLE - - gvm use stable && if test -n "`git diff --stat=1000 origin/master | grep -E \"^[[:space:]]*vendor\"`"; then make dep-validate; fi: - pwd: $BASE_STABLE + # - gvm use stable && if test -n "`git diff --stat=1000 master | grep -Ei \"vendor|godeps\"`"; then make dep-validate; fi: + # pwd: $BASE_STABLE # First thing: build everything. This will catch compile errors, and it's # also necessary for go vet to work properly (see #807). - - gvm use stable && go install $(go list ./... | grep -v "/vendor/"): + - gvm use stable && godep go install $(go list ./... | grep -v "/vendor/"): pwd: $BASE_STABLE # FMT @@ -74,12 +73,12 @@ test: override: # Test stable, and report - - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': + - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': timeout: 1000 pwd: $BASE_STABLE # Test stable with race - - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | grep -v "registry/handlers" | grep -v "registry/storage/driver" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; go test -race -tags "$DOCKER_BUILDTAGS" -test.short $PACKAGE': + - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | grep -v "registry/handlers" | grep -v "registry/storage/driver" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -race -tags "$DOCKER_BUILDTAGS" -test.short $PACKAGE': timeout: 1000 pwd: $BASE_STABLE post: diff --git a/vendor/github.com/docker/distribution/cmd/digest/main.go b/vendor/github.com/docker/distribution/cmd/digest/main.go index 20f64ddb4..49426a886 100644 --- a/vendor/github.com/docker/distribution/cmd/digest/main.go +++ b/vendor/github.com/docker/distribution/cmd/digest/main.go @@ -7,8 +7,8 @@ import ( "log" "os" + "github.com/docker/distribution/digest" "github.com/docker/distribution/version" - "github.com/opencontainers/go-digest" ) var ( @@ -32,7 +32,7 @@ func init() { func usage() { fmt.Fprintf(os.Stderr, "usage: %s [files...]\n", os.Args[0]) - fmt.Fprint(os.Stderr, ` + fmt.Fprintf(os.Stderr, ` Calculate the digest of one or more input files, emitting the result to standard out. If no files are provided, the digest of stdin will be calculated. diff --git a/vendor/github.com/docker/distribution/configuration/configuration.go b/vendor/github.com/docker/distribution/configuration/configuration.go index cdc996b9d..55b9fcba1 100644 --- a/vendor/github.com/docker/distribution/configuration/configuration.go +++ b/vendor/github.com/docker/distribution/configuration/configuration.go @@ -1,7 +1,6 @@ package configuration import ( - "errors" "fmt" "io" "io/ioutil" @@ -133,7 +132,7 @@ type Configuration struct { // HTTP2 configuration options HTTP2 struct { - // Specifies whether the registry should disallow clients attempting + // Specifies wether the registry should disallow clients attempting // to connect via http2. If set to true, only http/1.1 is supported. Disabled bool `yaml:"disabled,omitempty"` } `yaml:"http2,omitempty"` @@ -189,11 +188,8 @@ type Configuration struct { // Validation configures validation options for the registry. Validation struct { - // Enabled enables the other options in this section. This field is - // deprecated in favor of Disabled. + // Enabled enables the other options in this section. Enabled bool `yaml:"enabled,omitempty"` - // Disabled disables the other options in this section. - Disabled bool `yaml:"disabled,omitempty"` // Manifests configures manifest validation. Manifests struct { // URLs configures validation for URLs in pushed manifests. @@ -207,19 +203,6 @@ type Configuration struct { } `yaml:"urls,omitempty"` } `yaml:"manifests,omitempty"` } `yaml:"validation,omitempty"` - - // Policy configures registry policy options. - Policy struct { - // Repository configures policies for repositories - Repository struct { - // Classes is a list of repository classes which the - // registry allows content for. This class is matched - // against the configuration media type inside uploaded - // manifests. When non-empty, the registry will enforce - // the class in authorized resources. - Classes []string `yaml:"classes"` - } `yaml:"repository,omitempty"` - } `yaml:"policy,omitempty"` } // LogHook is composed of hook Level and Type. @@ -236,7 +219,7 @@ type LogHook struct { // Levels set which levels of log message will let hook executed. Levels []string `yaml:"levels,omitempty"` - // MailOptions allows user to configure email parameters. + // MailOptions allows user to configurate email parameters. MailOptions MailOptions `yaml:"options,omitempty"` } @@ -330,7 +313,7 @@ type Health struct { type v0_1Configuration Configuration // UnmarshalYAML implements the yaml.Unmarshaler interface -// Unmarshals a string of the form X.Y into a Version, validating that X and Y can represent unsigned integers +// Unmarshals a string of the form X.Y into a Version, validating that X and Y can represent uints func (version *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { var versionString string err := unmarshal(&versionString) @@ -628,7 +611,7 @@ func Parse(rd io.Reader) (*Configuration, error) { v0_1.Loglevel = Loglevel("info") } if v0_1.Storage.Type() == "" { - return nil, errors.New("No storage configuration provided") + return nil, fmt.Errorf("No storage configuration provided") } return (*Configuration)(v0_1), nil } diff --git a/vendor/github.com/docker/distribution/context/doc.go b/vendor/github.com/docker/distribution/context/doc.go index 9b623074e..3b4ab8882 100644 --- a/vendor/github.com/docker/distribution/context/doc.go +++ b/vendor/github.com/docker/distribution/context/doc.go @@ -64,7 +64,7 @@ // Note that this only affects the new context, the previous context, with the // version field, can be used independently. Put another way, the new logger, // added to the request context, is unique to that context and can have -// request scoped variables. +// request scoped varaibles. // // HTTP Requests // diff --git a/vendor/github.com/docker/distribution/contrib/compose/README.md b/vendor/github.com/docker/distribution/contrib/compose/README.md index 45050b70d..a9522fd24 100644 --- a/vendor/github.com/docker/distribution/contrib/compose/README.md +++ b/vendor/github.com/docker/distribution/contrib/compose/README.md @@ -123,13 +123,13 @@ to the 1.0 registry. Requests from newer clients will route to the 2.0 registry. 4. Use `curl` to list the image in the registry. - $ curl -v -X GET http://localhost:5000/v2/registry_one/tags/list + $ curl -v -X GET http://localhost:32777/v2/registry1/tags/list * Hostname was NOT found in DNS cache * Trying 127.0.0.1... * Connected to localhost (127.0.0.1) port 32777 (#0) > GET /v2/registry1/tags/list HTTP/1.1 > User-Agent: curl/7.36.0 - > Host: localhost:5000 + > Host: localhost:32777 > Accept: */* > < HTTP/1.1 200 OK @@ -138,7 +138,7 @@ to the 1.0 registry. Requests from newer clients will route to the 2.0 registry. < Date: Tue, 14 Apr 2015 22:34:13 GMT < Content-Length: 39 < - {"name":"registry_one","tags":["latest"]} + {"name":"registry1","tags":["latest"]} * Connection #0 to host localhost left intact This example refers to the specific port assigned to the 2.0 registry. You saw diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml b/vendor/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml index 374197acc..4d4f3856f 100644 --- a/vendor/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml @@ -64,7 +64,7 @@ registryv2tokenoauthnotls: - ./tokenserver-oauth/certs/signing.cert:/etc/docker/registry/tokenbundle.pem tokenserveroauth: build: "tokenserver-oauth" - command: "--debug -addr 0.0.0.0:5559 -issuer registry-test -passwd .htpasswd -tlscert tls.cert -tlskey tls.key -key sign.key -realm http://auth.localregistry:5559 -enforce-class" + command: "--debug -addr 0.0.0.0:5559 -issuer registry-test -passwd .htpasswd -tlscert tls.cert -tlskey tls.key -key sign.key -realm http://auth.localregistry:5559" ports: - "5559" malevolent: diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/golem.conf b/vendor/github.com/docker/distribution/contrib/docker-integration/golem.conf index eb1757076..99c8d600c 100644 --- a/vendor/github.com/docker/distribution/contrib/docker-integration/golem.conf +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/golem.conf @@ -1,6 +1,6 @@ [[suite]] dind=true - images=[ "nginx:1.9", "dmcgowan/token-server:simple", "dmcgowan/token-server:oauth", "dmcgowan/malevolent:0.1.0", "dmcgowan/ncat:latest" ] + images=[ "nginx:1.9", "dmcgowan/token-server:simple", "dmcgowan/token-server:oauth", "dmcgowan/malevolent:0.1.0" ] [[suite.pretest]] command="sh ./install_certs.sh /etc/generated_certs.d" diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/helpers.bash b/vendor/github.com/docker/distribution/contrib/docker-integration/helpers.bash index 8760f9cf3..e1813d3e5 100644 --- a/vendor/github.com/docker/distribution/contrib/docker-integration/helpers.bash +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/helpers.bash @@ -32,44 +32,18 @@ function basic_auth_version_check() { fi } -email="a@nowhere.com" - -# docker_t_login calls login with email depending on version -function docker_t_login() { - # Only pass email field pre 1.11, no deprecation warning - parse_version "$GOLEM_DIND_VERSION" - v=$version - parse_version "1.11.0" - if [ "$v" -lt "$version" ]; then - run docker_t login -e $email $@ - else - run docker_t login $@ - fi -} - # login issues a login to docker to the provided server # uses user, password, and email variables set outside of function # requies bats function login() { rm -f /root/.docker/config.json - - docker_t_login -u $user -p $password $1 + run docker_t login -u $user -p $password -e $email $1 if [ "$status" -ne 0 ]; then echo $output fi [ "$status" -eq 0 ] - - # Handle different deprecation warnings - parse_version "$GOLEM_DIND_VERSION" - v=$version - parse_version "1.11.0" - if [ "$v" -lt "$version" ]; then - # First line is WARNING about credential save or email deprecation (maybe both) - [ "${lines[2]}" = "Login Succeeded" -o "${lines[1]}" = "Login Succeeded" ] - else - [ "${lines[0]}" = "Login Succeeded" ] - fi - + # First line is WARNING about credential save or email deprecation (maybe both) + [ "${lines[2]}" = "Login Succeeded" -o "${lines[1]}" = "Login Succeeded" ] } function login_oauth() { @@ -118,7 +92,7 @@ function docker_t() { docker exec dockerdaemon docker $@ } -# build creates a new docker image id from another image +# build reates a new docker image id from another image function build() { docker exec -i dockerdaemon docker build --no-cache -t $1 - < $plugindir/config.json <: +// +// An example of a sha256 digest representation follows: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// In this case, the string "sha256" is the algorithm and the hex bytes are +// the "digest". +// +// Because the Digest type is simply a string, once a valid Digest is +// obtained, comparisons are cheap, quick and simple to express with the +// standard equality operator. +// +// Verification +// +// The main benefit of using the Digest type is simple verification against a +// given digest. The Verifier interface, modeled after the stdlib hash.Hash +// interface, provides a common write sink for digest verification. After +// writing is complete, calling the Verifier.Verified method will indicate +// whether or not the stream of bytes matches the target digest. +// +// Missing Features +// +// In addition to the above, we intend to add the following features to this +// package: +// +// 1. A Digester type that supports write sink digest calculation. +// +// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. +// +package digest diff --git a/vendor/github.com/docker/distribution/digestset/set.go b/vendor/github.com/docker/distribution/digest/set.go similarity index 90% rename from vendor/github.com/docker/distribution/digestset/set.go rename to vendor/github.com/docker/distribution/digest/set.go index 71327dca7..4b9313c1a 100644 --- a/vendor/github.com/docker/distribution/digestset/set.go +++ b/vendor/github.com/docker/distribution/digest/set.go @@ -1,12 +1,10 @@ -package digestset +package digest import ( "errors" "sort" "strings" "sync" - - digest "github.com/opencontainers/go-digest" ) var ( @@ -46,7 +44,7 @@ func NewSet() *Set { // values or short values. This function does not test equality, // rather whether the second value could match against the first // value. -func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { +func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool { if len(hex) == len(shortHex) { if hex != shortHex { return false @@ -66,7 +64,7 @@ func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool // If no digests could be found ErrDigestNotFound will be returned // with an empty digest value. If multiple matches are found // ErrDigestAmbiguous will be returned with an empty digest value. -func (dst *Set) Lookup(d string) (digest.Digest, error) { +func (dst *Set) Lookup(d string) (Digest, error) { dst.mutex.RLock() defer dst.mutex.RUnlock() if len(dst.entries) == 0 { @@ -74,11 +72,11 @@ func (dst *Set) Lookup(d string) (digest.Digest, error) { } var ( searchFunc func(int) bool - alg digest.Algorithm + alg Algorithm hex string ) - dgst, err := digest.Parse(d) - if err == digest.ErrDigestInvalidFormat { + dgst, err := ParseDigest(d) + if err == ErrDigestInvalidFormat { hex = d searchFunc = func(i int) bool { return dst.entries[i].val >= d @@ -110,7 +108,7 @@ func (dst *Set) Lookup(d string) (digest.Digest, error) { // Add adds the given digest to the set. An error will be returned // if the given digest is invalid. If the digest already exists in the // set, this operation will be a no-op. -func (dst *Set) Add(d digest.Digest) error { +func (dst *Set) Add(d Digest) error { if err := d.Validate(); err != nil { return err } @@ -141,7 +139,7 @@ func (dst *Set) Add(d digest.Digest) error { // Remove removes the given digest from the set. An err will be // returned if the given digest is invalid. If the digest does // not exist in the set, this operation will be a no-op. -func (dst *Set) Remove(d digest.Digest) error { +func (dst *Set) Remove(d Digest) error { if err := d.Validate(); err != nil { return err } @@ -169,10 +167,10 @@ func (dst *Set) Remove(d digest.Digest) error { } // All returns all the digests in the set -func (dst *Set) All() []digest.Digest { +func (dst *Set) All() []Digest { dst.mutex.RLock() defer dst.mutex.RUnlock() - retValues := make([]digest.Digest, len(dst.entries)) + retValues := make([]Digest, len(dst.entries)) for i := range dst.entries { retValues[i] = dst.entries[i].digest } @@ -185,10 +183,10 @@ func (dst *Set) All() []digest.Digest { // entire value of digest if uniqueness cannot be achieved without the // full value. This function will attempt to make short codes as short // as possible to be unique. -func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { +func ShortCodeTable(dst *Set, length int) map[Digest]string { dst.mutex.RLock() defer dst.mutex.RUnlock() - m := make(map[digest.Digest]string, len(dst.entries)) + m := make(map[Digest]string, len(dst.entries)) l := length resetIdx := 0 for i := 0; i < len(dst.entries); i++ { @@ -224,9 +222,9 @@ func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { } type digestEntry struct { - alg digest.Algorithm + alg Algorithm val string - digest digest.Digest + digest Digest } type digestEntries []*digestEntry diff --git a/vendor/github.com/docker/distribution/digestset/set_test.go b/vendor/github.com/docker/distribution/digest/set_test.go similarity index 93% rename from vendor/github.com/docker/distribution/digestset/set_test.go rename to vendor/github.com/docker/distribution/digest/set_test.go index 89c5729d0..e9dab8795 100644 --- a/vendor/github.com/docker/distribution/digestset/set_test.go +++ b/vendor/github.com/docker/distribution/digest/set_test.go @@ -1,23 +1,20 @@ -package digestset +package digest import ( "crypto/sha256" - _ "crypto/sha512" "encoding/binary" "math/rand" "testing" - - digest "github.com/opencontainers/go-digest" ) -func assertEqualDigests(t *testing.T, d1, d2 digest.Digest) { +func assertEqualDigests(t *testing.T, d1, d2 Digest) { if d1 != d2 { t.Fatalf("Digests do not match:\n\tActual: %s\n\tExpected: %s", d1, d2) } } func TestLookup(t *testing.T) { - digests := []digest.Digest{ + digests := []Digest{ "sha256:1234511111111111111111111111111111111111111111111111111111111111", "sha256:1234111111111111111111111111111111111111111111111111111111111111", "sha256:1234611111111111111111111111111111111111111111111111111111111111", @@ -91,7 +88,7 @@ func TestLookup(t *testing.T) { } func TestAddDuplication(t *testing.T) { - digests := []digest.Digest{ + digests := []Digest{ "sha256:1234111111111111111111111111111111111111111111111111111111111111", "sha256:1234511111111111111111111111111111111111111111111111111111111111", "sha256:1234611111111111111111111111111111111111111111111111111111111111", @@ -113,7 +110,7 @@ func TestAddDuplication(t *testing.T) { t.Fatal("Invalid dset size") } - if err := dset.Add(digest.Digest("sha256:1234511111111111111111111111111111111111111111111111111111111111")); err != nil { + if err := dset.Add(Digest("sha256:1234511111111111111111111111111111111111111111111111111111111111")); err != nil { t.Fatal(err) } @@ -121,7 +118,7 @@ func TestAddDuplication(t *testing.T) { t.Fatal("Duplicate digest insert allowed") } - if err := dset.Add(digest.Digest("sha384:123451111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")); err != nil { + if err := dset.Add(Digest("sha384:123451111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")); err != nil { t.Fatal(err) } @@ -173,7 +170,7 @@ func TestAll(t *testing.T) { } } - all := map[digest.Digest]struct{}{} + all := map[Digest]struct{}{} for _, dgst := range dset.All() { all[dgst] = struct{}{} } @@ -197,7 +194,7 @@ func assertEqualShort(t *testing.T, actual, expected string) { } func TestShortCodeTable(t *testing.T) { - digests := []digest.Digest{ + digests := []Digest{ "sha256:1234111111111111111111111111111111111111111111111111111111111111", "sha256:1234511111111111111111111111111111111111111111111111111111111111", "sha256:1234611111111111111111111111111111111111111111111111111111111111", @@ -230,15 +227,15 @@ func TestShortCodeTable(t *testing.T) { assertEqualShort(t, dump[digests[7]], "653") } -func createDigests(count int) ([]digest.Digest, error) { +func createDigests(count int) ([]Digest, error) { r := rand.New(rand.NewSource(25823)) - digests := make([]digest.Digest, count) + digests := make([]Digest, count) for i := range digests { h := sha256.New() if err := binary.Write(h, binary.BigEndian, r.Int63()); err != nil { return nil, err } - digests[i] = digest.NewDigest("sha256", h) + digests[i] = NewDigest("sha256", h) } return digests, nil } diff --git a/vendor/github.com/docker/distribution/digest/verifiers.go b/vendor/github.com/docker/distribution/digest/verifiers.go new file mode 100644 index 000000000..9af3be134 --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/verifiers.go @@ -0,0 +1,44 @@ +package digest + +import ( + "hash" + "io" +) + +// Verifier presents a general verification interface to be used with message +// digests and other byte stream verifications. Users instantiate a Verifier +// from one of the various methods, write the data under test to it then check +// the result with the Verified method. +type Verifier interface { + io.Writer + + // Verified will return true if the content written to Verifier matches + // the digest. + Verified() bool +} + +// NewDigestVerifier returns a verifier that compares the written bytes +// against a passed in digest. +func NewDigestVerifier(d Digest) (Verifier, error) { + if err := d.Validate(); err != nil { + return nil, err + } + + return hashVerifier{ + hash: d.Algorithm().Hash(), + digest: d, + }, nil +} + +type hashVerifier struct { + digest Digest + hash hash.Hash +} + +func (hv hashVerifier) Write(p []byte) (n int, err error) { + return hv.hash.Write(p) +} + +func (hv hashVerifier) Verified() bool { + return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) +} diff --git a/vendor/github.com/docker/distribution/digest/verifiers_test.go b/vendor/github.com/docker/distribution/digest/verifiers_test.go new file mode 100644 index 000000000..c342d6e7c --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/verifiers_test.go @@ -0,0 +1,49 @@ +package digest + +import ( + "bytes" + "crypto/rand" + "io" + "testing" +) + +func TestDigestVerifier(t *testing.T) { + p := make([]byte, 1<<20) + rand.Read(p) + digest := FromBytes(p) + + verifier, err := NewDigestVerifier(digest) + if err != nil { + t.Fatalf("unexpected error getting digest verifier: %s", err) + } + + io.Copy(verifier, bytes.NewReader(p)) + + if !verifier.Verified() { + t.Fatalf("bytes not verified") + } +} + +// TestVerifierUnsupportedDigest ensures that unsupported digest validation is +// flowing through verifier creation. +func TestVerifierUnsupportedDigest(t *testing.T) { + unsupported := Digest("bean:0123456789abcdef") + + _, err := NewDigestVerifier(unsupported) + if err == nil { + t.Fatalf("expected error when creating verifier") + } + + if err != ErrDigestUnsupported { + t.Fatalf("incorrect error for unsupported digest: %v", err) + } +} + +// TODO(stevvooe): Add benchmarks to measure bytes/second throughput for +// DigestVerifier. +// +// The relevant benchmark for comparison can be run with the following +// commands: +// +// go test -bench . crypto/sha1 +// diff --git a/vendor/github.com/docker/distribution/docs/Dockerfile b/vendor/github.com/docker/distribution/docs/Dockerfile new file mode 100644 index 000000000..fcc634229 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/Dockerfile @@ -0,0 +1,9 @@ +FROM docs/base:oss +MAINTAINER Docker Docs + +ENV PROJECT=registry + +# To get the git info for this repo +COPY . /src +RUN rm -rf /docs/content/$PROJECT/ +COPY . /docs/content/$PROJECT/ diff --git a/vendor/github.com/docker/distribution/docs/Makefile b/vendor/github.com/docker/distribution/docs/Makefile new file mode 100644 index 000000000..585bc871a --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/Makefile @@ -0,0 +1,38 @@ +.PHONY: all default docs docs-build docs-shell shell test + +# to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs) +DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) + +# to allow `make DOCSPORT=9000 docs` +DOCSPORT := 8000 + +# Get the IP ADDRESS +DOCKER_IP=$(shell python -c "import urlparse ; print urlparse.urlparse('$(DOCKER_HOST)').hostname or ''") +HUGO_BASE_URL=$(shell test -z "$(DOCKER_IP)" && echo localhost || echo "$(DOCKER_IP)") +HUGO_BIND_IP=0.0.0.0 + +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) +GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g") +DOCKER_DOCS_IMAGE := registry-docs$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN)) + +DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE + +# for some docs workarounds (see below in "docs-build" target) +GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) + +default: docs + +docs: docs-build + $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) + +docs-draft: docs-build + $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --buildDrafts="true" --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) + +docs-shell: docs-build + $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash + +docs-build: + docker build -t "$(DOCKER_DOCS_IMAGE)" . + +test: docs-build + $(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" diff --git a/vendor/github.com/docker/distribution/docs/README.md b/vendor/github.com/docker/distribution/docs/README.md deleted file mode 100644 index b26dc3754..000000000 --- a/vendor/github.com/docker/distribution/docs/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# The docs have been moved! - -The documentation for Registry has been merged into -[the general documentation repo](https://github.com/docker/docker.github.io). -Commit history has been preserved. - -The docs for Registry are now here: -https://github.com/docker/docker.github.io/tree/master/registry - -> Note: The definitive [./spec directory](spec/) directory and -[configuration.md](configuration.md) file will be maintained in this repository -and be refreshed periodically in -[the general documentation repo](https://github.com/docker/docker.github.io). - -As always, the docs in the general repo remain open-source and we appreciate -your feedback and pull requests! diff --git a/vendor/github.com/docker/distribution/docs/architecture.md b/vendor/github.com/docker/distribution/docs/architecture.md index c2aaa9f2d..392517608 100644 --- a/vendor/github.com/docker/distribution/docs/architecture.md +++ b/vendor/github.com/docker/distribution/docs/architecture.md @@ -1,6 +1,8 @@ ---- -published: false ---- + # Architecture diff --git a/vendor/github.com/docker/distribution/docs/compatibility.md b/vendor/github.com/docker/distribution/docs/compatibility.md new file mode 100644 index 000000000..cba7e378d --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/compatibility.md @@ -0,0 +1,84 @@ + + +# Registry Compatibility + +## Synopsis +*If a manifest is pulled by _digest_ from a registry 2.3 with Docker Engine 1.9 +and older, and the manifest was pushed with Docker Engine 1.10, a security check +will cause the Engine to receive a manifest it cannot use and the pull will fail.* + +## Registry Manifest Support + +Historically, the registry has supported a [single manifest type](./spec/manifest-v2-1.md) +known as _Schema 1_. + +With the move toward multiple architecture images the distribution project +introduced two new manifest types: Schema 2 manifests and manifest lists. The +registry 2.3 supports all three manifest types and in order to be compatible +with older Docker engines will, in certain cases, do an on-the-fly +transformation of a manifest before serving the JSON in the response. + +This conversion has some implications for pulling manifests by digest and this +document enumerate these implications. + + +## Content Addressable Storage (CAS) + +Manifests are stored and retrieved in the registry by keying off a digest +representing a hash of the contents. One of the advantages provided by CAS is +security: if the contents are changed, then the digest will no longer match. +This prevents any modification of the manifest by a MITM attack or an untrusted +third party. + +When a manifest is stored by the registry, this digest is returned in the HTTP +response headers and, if events are configured, delivered within the event. The +manifest can either be retrieved by the tag, or this digest. + +For registry versions 2.2.1 and below, the registry will always store and +serve _Schema 1_ manifests. The Docker Engine 1.10 will first +attempt to send a _Schema 2_ manifest, falling back to sending a +Schema 1 type manifest when it detects that the registry does not +support the new version. + + +## Registry v2.3 + +### Manifest Push with Docker 1.9 and Older + +The Docker Engine will construct a _Schema 1_ manifest which the +registry will persist to disk. + +When the manifest is pulled by digest or tag with any docker version, a +_Schema 1_ manifest will be returned. + +### Manifest Push with Docker 1.10 + +The docker engine will construct a _Schema 2_ manifest which the +registry will persist to disk. + +When the manifest is pulled by digest or tag with Docker Engine 1.10, a +_Schema 2_ manifest will be returned. The Docker Engine 1.10 +understands the new manifest format. + +When the manifest is pulled by *tag* with Docker Engine 1.9 and older, the +manifest is converted on-the-fly to _Schema 1_ and sent in the +response. The Docker Engine 1.9 is compatible with this older format. + +*When the manifest is pulled by _digest_ with Docker Engine 1.9 and older, the +same rewriting process will not happen in the registry. If this were to happen +the digest would no longer match the hash of the manifest and would violate the +constraints of CAS.* + +For this reason if a manifest is pulled by _digest_ from a registry 2.3 with Docker +Engine 1.9 and older, and the manifest was pushed with Docker Engine 1.10, a +security check will cause the Engine to receive a manifest it cannot use and the +pull will fail. diff --git a/vendor/github.com/docker/distribution/docs/configuration.md b/vendor/github.com/docker/distribution/docs/configuration.md index c7f9023fb..fb3de48e2 100644 --- a/vendor/github.com/docker/distribution/docs/configuration.md +++ b/vendor/github.com/docker/distribution/docs/configuration.md @@ -1,486 +1,497 @@ ---- -title: "Configuring a registry" -description: "Explains how to configure a registry" -keywords: registry, on-prem, images, tags, repository, distribution, configuration ---- + -The Registry configuration is based on a YAML file, detailed below. While it -comes with sane default values out of the box, you should review it exhaustively -before moving your systems to production. +# Registry Configuration Reference + +The Registry configuration is based on a YAML file, detailed below. While it comes with sane default values out of the box, you are heavily encouraged to review it exhaustively before moving your systems to production. ## Override specific configuration options -In a typical setup where you run your Registry from the official image, you can -specify a configuration variable from the environment by passing `-e` arguments -to your `docker run` stanza or from within a Dockerfile using the `ENV` -instruction. +In a typical setup where you run your Registry from the official image, you can specify a configuration variable from the environment by passing `-e` arguments to your `docker run` stanza, or from within a Dockerfile using the `ENV` instruction. To override a configuration option, create an environment variable named -`REGISTRY_variable` where `variable` is the name of the configuration option +`REGISTRY_variable` where *`variable`* is the name of the configuration option and the `_` (underscore) represents indention levels. For example, you can configure the `rootdirectory` of the `filesystem` storage backend: -```none -storage: - filesystem: - rootdirectory: /var/lib/registry -``` + storage: + filesystem: + rootdirectory: /var/lib/registry To override this value, set an environment variable like this: -```none -REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere -``` + REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere This variable overrides the `/var/lib/registry` value to the `/somewhere` directory. -> **Note**: Create a base configuration file with environment variables that can -> be configured to tweak individual values. Overriding configuration sections -> with environment variables is not recommended. +>**NOTE**: It is highly recommended to create a base configuration file with which environment variables can be used to tweak individual values. Overriding configuration sections with environment variables is not recommended. ## Overriding the entire configuration file -If the default configuration is not a sound basis for your usage, or if you are -having issues overriding keys from the environment, you can specify an alternate -YAML configuration file by mounting it as a volume in the container. +If the default configuration is not a sound basis for your usage, or if you are having issues overriding keys from the environment, you can specify an alternate YAML configuration file by mounting it as a volume in the container. -Typically, create a new configuration file from scratch,named `config.yml`, then -specify it in the `docker run` command: +Typically, create a new configuration file from scratch, and call it `config.yml`, then: -```bash -$ docker run -d -p 5000:5000 --restart=always --name registry \ - -v `pwd`/config.yml:/etc/docker/registry/config.yml \ - registry:2 -``` + docker run -d -p 5000:5000 --restart=always --name registry \ + -v `pwd`/config.yml:/etc/docker/registry/config.yml \ + registry:2 -Use this -[example YAML file](https://github.com/docker/distribution/blob/master/cmd/registry/config-example.yml) -as a starting point. +You can (and probably should) use [this as a starting point](https://github.com/docker/distribution/blob/master/cmd/registry/config-example.yml). ## List of configuration options -These are all configuration options for the registry. Some options in the list -are mutually exclusive. Read the detailed reference information about each -option before finalizing your configuration. +This section lists all the registry configuration options. Some options in +the list are mutually exclusive. So, make sure to read the detailed reference +information about each option that appears later in this page. -```none -version: 0.1 -log: - accesslog: - disabled: true - level: debug - formatter: text - fields: - service: registry - environment: staging - hooks: - - type: mail - disabled: true - levels: - - panic - options: - smtp: - addr: mail.example.com:25 - username: mailuser - password: password - insecure: true - from: sender@example.com - to: - - errors@example.com -loglevel: debug # deprecated: use "log" -storage: - filesystem: - rootdirectory: /var/lib/registry - maxthreads: 100 - azure: - accountname: accountname - accountkey: base64encodedaccountkey - container: containername - gcs: - bucket: bucketname - keyfile: /path/to/keyfile - rootdirectory: /gcs/object/name/prefix - chunksize: 5242880 - s3: - accesskey: awsaccesskey - secretkey: awssecretkey - region: us-west-1 - regionendpoint: http://myobjects.local - bucket: bucketname - encrypt: true - keyid: mykeyid - secure: true - v4auth: true - chunksize: 5242880 - multipartcopychunksize: 33554432 - multipartcopymaxconcurrency: 100 - multipartcopythresholdsize: 33554432 - rootdirectory: /s3/object/name/prefix - swift: - username: username - password: password - authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth - tenant: tenantname - tenantid: tenantid - domain: domain name for Openstack Identity v3 API - domainid: domain id for Openstack Identity v3 API - insecureskipverify: true - region: fr - container: containername - rootdirectory: /swift/object/name/prefix - oss: - accesskeyid: accesskeyid - accesskeysecret: accesskeysecret - region: OSS region name - endpoint: optional endpoints - internal: optional internal endpoint - bucket: OSS bucket - encrypt: optional data encryption setting - secure: optional ssl setting - chunksize: optional size valye - rootdirectory: optional root directory - inmemory: # This driver takes no parameters - delete: - enabled: false - redirect: - disable: false - cache: - blobdescriptor: redis - maintenance: - uploadpurging: - enabled: true - age: 168h - interval: 24h - dryrun: false - readonly: - enabled: false -auth: - silly: - realm: silly-realm - service: silly-service - token: - realm: token-realm - service: token-service - issuer: registry-token-issuer - rootcertbundle: /root/certs/bundle - htpasswd: - realm: basic-realm - path: /path/to/htpasswd -middleware: - registry: - - name: ARegistryMiddleware - options: - foo: bar - repository: - - name: ARepositoryMiddleware - options: - foo: bar - storage: - - name: cloudfront - options: - baseurl: https://my.cloudfronted.domain.com/ - privatekey: /path/to/pem - keypairid: cloudfrontkeypairid - duration: 3000s - storage: - - name: redirect - options: - baseurl: https://example.com/ -reporting: - bugsnag: - apikey: bugsnagapikey - releasestage: bugsnagreleasestage - endpoint: bugsnagendpoint - newrelic: - licensekey: newreliclicensekey - name: newrelicname - verbose: true -http: - addr: localhost:5000 - prefix: /my/nested/registry/ - host: https://myregistryaddress.org:5000 - secret: asecretforlocaldevelopment - relativeurls: false - tls: - certificate: /path/to/x509/public - key: /path/to/x509/private - clientcas: - - /path/to/ca.pem - - /path/to/another/ca.pem - letsencrypt: - cachefile: /path/to/cache-file - email: emailused@letsencrypt.com - debug: - addr: localhost:5001 - headers: - X-Content-Type-Options: [nosniff] - http2: - disabled: false -notifications: - endpoints: - - name: alistener - disabled: false - url: https://my.listener.com/event - headers: - timeout: 1s - threshold: 10 - backoff: 1s - ignoredmediatypes: - - application/octet-stream -redis: - addr: localhost:6379 - password: asecret - db: 0 - dialtimeout: 10ms - readtimeout: 10ms - writetimeout: 10ms - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s -health: - storagedriver: - enabled: true - interval: 10s - threshold: 3 - file: - - file: /path/to/checked/file - interval: 10s - http: - - uri: http://server.to.check/must/return/200 + version: 0.1 + log: + accesslog: + disabled: true + level: debug + formatter: text + fields: + service: registry + environment: staging + hooks: + - type: mail + disabled: true + levels: + - panic + options: + smtp: + addr: mail.example.com:25 + username: mailuser + password: password + insecure: true + from: sender@example.com + to: + - errors@example.com + loglevel: debug # deprecated: use "log" + storage: + filesystem: + rootdirectory: /var/lib/registry + maxthreads: 100 + azure: + accountname: accountname + accountkey: base64encodedaccountkey + container: containername + gcs: + bucket: bucketname + keyfile: /path/to/keyfile + rootdirectory: /gcs/object/name/prefix + chunksize: 5242880 + s3: + accesskey: awsaccesskey + secretkey: awssecretkey + region: us-west-1 + regionendpoint: http://myobjects.local + bucket: bucketname + encrypt: true + keyid: mykeyid + secure: true + v4auth: true + chunksize: 5242880 + multipartcopychunksize: 33554432 + multipartcopymaxconcurrency: 100 + multipartcopythresholdsize: 33554432 + rootdirectory: /s3/object/name/prefix + swift: + username: username + password: password + authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth + tenant: tenantname + tenantid: tenantid + domain: domain name for Openstack Identity v3 API + domainid: domain id for Openstack Identity v3 API + insecureskipverify: true + region: fr + container: containername + rootdirectory: /swift/object/name/prefix + oss: + accesskeyid: accesskeyid + accesskeysecret: accesskeysecret + region: OSS region name + endpoint: optional endpoints + internal: optional internal endpoint + bucket: OSS bucket + encrypt: optional data encryption setting + secure: optional ssl setting + chunksize: optional size valye + rootdirectory: optional root directory + inmemory: # This driver takes no parameters + delete: + enabled: false + redirect: + disable: false + cache: + blobdescriptor: redis + maintenance: + uploadpurging: + enabled: true + age: 168h + interval: 24h + dryrun: false + readonly: + enabled: false + auth: + silly: + realm: silly-realm + service: silly-service + token: + realm: token-realm + service: token-service + issuer: registry-token-issuer + rootcertbundle: /root/certs/bundle + htpasswd: + realm: basic-realm + path: /path/to/htpasswd + middleware: + registry: + - name: ARegistryMiddleware + options: + foo: bar + repository: + - name: ARepositoryMiddleware + options: + foo: bar + storage: + - name: cloudfront + options: + baseurl: https://my.cloudfronted.domain.com/ + privatekey: /path/to/pem + keypairid: cloudfrontkeypairid + duration: 3000s + storage: + - name: redirect + options: + baseurl: https://example.com/ + reporting: + bugsnag: + apikey: bugsnagapikey + releasestage: bugsnagreleasestage + endpoint: bugsnagendpoint + newrelic: + licensekey: newreliclicensekey + name: newrelicname + verbose: true + http: + addr: localhost:5000 + prefix: /my/nested/registry/ + host: https://myregistryaddress.org:5000 + secret: asecretforlocaldevelopment + relativeurls: false + tls: + certificate: /path/to/x509/public + key: /path/to/x509/private + clientcas: + - /path/to/ca.pem + - /path/to/another/ca.pem + letsencrypt: + cachefile: /path/to/cache-file + email: emailused@letsencrypt.com + debug: + addr: localhost:5001 headers: - Authorization: [Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==] - statuscode: 200 - timeout: 3s - interval: 10s - threshold: 3 - tcp: - - addr: redis-server.domain.com:6379 - timeout: 3s - interval: 10s - threshold: 3 -proxy: - remoteurl: https://registry-1.docker.io - username: [username] - password: [password] -compatibility: - schema1: - signingkeyfile: /etc/registry/key.json -validation: - manifests: - urls: - allow: - - ^https?://([^/]+\.)*example\.com/ - deny: - - ^https?://www\.example\.com/ -``` + X-Content-Type-Options: [nosniff] + http2: + disabled: false + notifications: + endpoints: + - name: alistener + disabled: false + url: https://my.listener.com/event + headers: + timeout: 500 + threshold: 5 + backoff: 1000 + ignoredmediatypes: + - application/octet-stream + redis: + addr: localhost:6379 + password: asecret + db: 0 + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 + file: + - file: /path/to/checked/file + interval: 10s + http: + - uri: http://server.to.check/must/return/200 + headers: + Authorization: [Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==] + statuscode: 200 + timeout: 3s + interval: 10s + threshold: 3 + tcp: + - addr: redis-server.domain.com:6379 + timeout: 3s + interval: 10s + threshold: 3 + proxy: + remoteurl: https://registry-1.docker.io + username: [username] + password: [password] + compatibility: + schema1: + signingkeyfile: /etc/registry/key.json + validation: + enabled: true + manifests: + urls: + allow: + - ^https?://([^/]+\.)*example\.com/ + deny: + - ^https?://www\.example\.com/ In some instances a configuration option is **optional** but it contains child -options marked as **required**. In these cases, you can omit the parent with +options marked as **required**. This indicates that you can omit the parent with all its children. However, if the parent is included, you must also include all the children marked **required**. -## `version` +## version -```none -version: 0.1 -``` + version: 0.1 The `version` option is **required**. It specifies the configuration's version. It is expected to remain a top-level field, to allow for a consistent version check before parsing the remainder of the configuration file. -## `log` +## log The `log` subsection configures the behavior of the logging system. The logging system outputs everything to stdout. You can adjust the granularity and format with this configuration section. -```none -log: - accesslog: - disabled: true - level: debug - formatter: text - fields: - service: registry - environment: staging -``` + log: + accesslog: + disabled: true + level: debug + formatter: text + fields: + service: registry + environment: staging -| Parameter | Required | Description | -|-------------|----------|-------------| -| `level` | no | Sets the sensitivity of logging output. Permitted values are `error`, `warn`, `info`, and `debug`. The default is `info`. | -| `formatter` | no | This selects the format of logging output. The format primarily affects how keyed attributes for a log line are encoded. Options are `text`, `json`, and `logstash`. The default is `text`. | -| `fields` | no | A map of field names to values. These are added to every log line for the context. This is useful for identifying log messages source after being mixed in other systems. | + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ level + + no + + Sets the sensitivity of logging output. Permitted values are + error, warn, info and + debug. The default is info. +
+ formatter + + no + + This selects the format of logging output. The format primarily affects how keyed + attributes for a log line are encoded. Options are text, json or + logstash. The default is text. +
+ fields + + no + + A map of field names to values. These are added to every log line for + the context. This is useful for identifying log messages source after + being mixed in other systems. +
-### `accesslog` +### accesslog -```none -accesslog: - disabled: true -``` + accesslog: + disabled: true Within `log`, `accesslog` configures the behavior of the access logging system. By default, the access logging system outputs to stdout in [Combined Log Format](https://httpd.apache.org/docs/2.4/logs.html#combined). Access logging can be disabled by setting the boolean flag `disabled` to `true`. -## `hooks` +## hooks -```none -hooks: - - type: mail - levels: - - panic - options: - smtp: - addr: smtp.sendhost.com:25 - username: sendername - password: password - insecure: true - from: name@sendhost.com - to: - - name@receivehost.com -``` + hooks: + - type: mail + levels: + - panic + options: + smtp: + addr: smtp.sendhost.com:25 + username: sendername + password: password + insecure: true + from: name@sendhost.com + to: + - name@receivehost.com The `hooks` subsection configures the logging hooks' behavior. This subsection includes a sequence handler which you can use for sending mail, for example. Refer to `loglevel` to configure the level of messages printed. -## `loglevel` +## loglevel > **DEPRECATED:** Please use [log](#log) instead. -```none -loglevel: debug -``` + loglevel: debug Permitted values are `error`, `warn`, `info` and `debug`. The default is `info`. -## `storage` +## storage -```none -storage: - filesystem: - rootdirectory: /var/lib/registry - azure: - accountname: accountname - accountkey: base64encodedaccountkey - container: containername - gcs: - bucket: bucketname - keyfile: /path/to/keyfile - rootdirectory: /gcs/object/name/prefix - s3: - accesskey: awsaccesskey - secretkey: awssecretkey - region: us-west-1 - regionendpoint: http://myobjects.local - bucket: bucketname - encrypt: true - keyid: mykeyid - secure: true - v4auth: true - chunksize: 5242880 - multipartcopychunksize: 33554432 - multipartcopymaxconcurrency: 100 - multipartcopythresholdsize: 33554432 - rootdirectory: /s3/object/name/prefix - swift: - username: username - password: password - authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth - tenant: tenantname - tenantid: tenantid - domain: domain name for Openstack Identity v3 API - domainid: domain id for Openstack Identity v3 API - insecureskipverify: true - region: fr - container: containername - rootdirectory: /swift/object/name/prefix - oss: - accesskeyid: accesskeyid - accesskeysecret: accesskeysecret - region: OSS region name - endpoint: optional endpoints - internal: optional internal endpoint - bucket: OSS bucket - encrypt: optional data encryption setting - secure: optional ssl setting - chunksize: optional size valye - rootdirectory: optional root directory - inmemory: - delete: - enabled: false - cache: - blobdescriptor: inmemory - maintenance: - uploadpurging: - enabled: true - age: 168h - interval: 24h - dryrun: false - readonly: - enabled: false - redirect: - disable: false -``` + storage: + filesystem: + rootdirectory: /var/lib/registry + azure: + accountname: accountname + accountkey: base64encodedaccountkey + container: containername + gcs: + bucket: bucketname + keyfile: /path/to/keyfile + rootdirectory: /gcs/object/name/prefix + s3: + accesskey: awsaccesskey + secretkey: awssecretkey + region: us-west-1 + regionendpoint: http://myobjects.local + bucket: bucketname + encrypt: true + keyid: mykeyid + secure: true + v4auth: true + chunksize: 5242880 + multipartcopychunksize: 33554432 + multipartcopymaxconcurrency: 100 + multipartcopythresholdsize: 33554432 + rootdirectory: /s3/object/name/prefix + swift: + username: username + password: password + authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth + tenant: tenantname + tenantid: tenantid + domain: domain name for Openstack Identity v3 API + domainid: domain id for Openstack Identity v3 API + insecureskipverify: true + region: fr + container: containername + rootdirectory: /swift/object/name/prefix + oss: + accesskeyid: accesskeyid + accesskeysecret: accesskeysecret + region: OSS region name + endpoint: optional endpoints + internal: optional internal endpoint + bucket: OSS bucket + encrypt: optional data encryption setting + secure: optional ssl setting + chunksize: optional size valye + rootdirectory: optional root directory + inmemory: + delete: + enabled: false + cache: + blobdescriptor: inmemory + maintenance: + uploadpurging: + enabled: true + age: 168h + interval: 24h + dryrun: false + redirect: + disable: false -The `storage` option is **required** and defines which storage backend is in -use. You must configure exactly one backend. If you configure more, the registry -returns an error. You can choose any of these backend storage drivers: +The storage option is **required** and defines which storage backend is in use. +You must configure one backend; if you configure more, the registry returns an error. You can choose any of these backend storage drivers: -| Storage driver | Description | -|---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `filesystem` | Uses the local disk to store registry files. It is ideal for development and may be appropriate for some small-scale production applications. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/filesystem.md). | -| `azure` | Uses Microsoft Azure Blob Storage. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/azure.md). | -| `gcs` | Uses Google Cloud Storage. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/gcs.md). | -| `s3` | Uses Amazon Simple Storage Service (S3) and compatible Storage Services. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/s3.md). | -| `swift` | Uses Openstack Swift object storage. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/swift.md). | -| `oss` | Uses Aliyun OSS for object storage. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/oss.md). | +| Storage driver | Description +| ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `filesystem` | Uses the local disk to store registry files. It is ideal for development and may be appropriate for some small-scale production applications. See the [driver's reference documentation](storage-drivers/filesystem.md). | +| `azure` | Uses Microsoft's Azure Blob Storage. See the [driver's reference documentation](storage-drivers/azure.md). | +| `gcs` | Uses Google Cloud Storage. See the [driver's reference documentation](storage-drivers/gcs.md). | +| `s3` | Uses Amazon's Simple Storage Service (S3) and compatible Storage Services. See the [driver's reference documentation](storage-drivers/s3.md). | +| `swift` | Uses Openstack Swift object storage. See the [driver's reference documentation](storage-drivers/swift.md). | +| `oss` | Uses Aliyun OSS for object storage. See the [driver's reference documentation](storage-drivers/oss.md). | -For testing only, you can use the [`inmemory` storage -driver](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/inmemory.md). -If you would like to run a registry from volatile memory, use the -[`filesystem` driver](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/filesystem.md) -on a ramdisk. +For purely tests purposes, you can use the [`inmemory` storage +driver](storage-drivers/inmemory.md). If you would like to run a registry from +volatile memory, use the [`filesystem` driver](storage-drivers/filesystem.md) on +a ramdisk. -If you are deploying a registry on Windows, a Windows volume mounted from the -host is not recommended. Instead, you can use a S3 or Azure backing -data-store. If you do use a Windows volume, the length of the `PATH` to -the mount point must be within the `MAX_PATH` limits (typically 255 characters), -or this error will occur: +If you are deploying a registry on Windows, be aware that a Windows volume +mounted from the host is not recommended. Instead, you can use a S3, or Azure, +backing data-store. If you do use a Windows volume, you must ensure that the +`PATH` to the mount point is within Windows' `MAX_PATH` limits (typically 255 +characters). Failure to do so can result in the following error message: -```none -mkdir /XXX protocol error and your registry will not function properly. -``` + mkdir /XXX protocol error and your registry will not function properly. -### `maintenance` +### Maintenance -Currently, upload purging and read-only mode are the only `maintenance` -functions available. +Currently upload purging and read-only mode are the only maintenance functions available. +These and future maintenance functions which are related to storage can be configured under +the maintenance section. -### `uploadpurging` +### Upload Purging -Upload purging is a background process that periodically removes orphaned files -from the upload directories of the registry. Upload purging is enabled by -default. To configure upload directory purging, the following parameters must -be set. +Upload purging is a background process that periodically removes orphaned files from the upload +directories of the registry. Upload purging is enabled by default. To +configure upload directory purging, the following parameters +must be set. -| Parameter | Required | Description | -|------------|----------|----------------------------------------------------------------------------------------------------| -| `enabled` | yes | Set to `true` to enable upload purging. Defaults to `true`. | -| `age` | yes | Upload directories which are older than this age will be deleted.Defaults to `168h` (1 week). | -| `interval` | yes | The interval between upload directory purging. Defaults to `24h`. | -| `dryrun` | yes | Set `dryrun` to `true` to obtain a summary of what directories will be deleted. Defaults to `false`.| +| Parameter | Required | Description + --------- | -------- | ----------- +`enabled` | yes | Set to true to enable upload purging. Default=true. | +`age` | yes | Upload directories which are older than this age will be deleted. Default=168h (1 week) +`interval` | yes | The interval between upload directory purging. Default=24h. +`dryrun` | yes | dryrun can be set to true to obtain a summary of what directories will be deleted. Default=false. -> **Note**: `age` and `interval` are strings containing a number with optional -fraction and a unit suffix. Some examples: `45m`, `2h10m`, `168h`. +Note: `age` and `interval` are strings containing a number with optional fraction and a unit suffix: e.g. 45m, 2h10m, 168h (1 week). -### `readonly` +### Read-only mode If the `readonly` section under `maintenance` has `enabled` set to `true`, clients will not be allowed to write to the registry. This mode is useful to @@ -490,153 +501,239 @@ restarted with readonly's `enabled` set to true. After the garbage collection pass finishes, the registry may be restarted again, this time with `readonly` removed from the configuration (or set to false). -### `delete` +### delete -Use the `delete` structure to enable the deletion of image blobs and manifests +Use the `delete` subsection to enable the deletion of image blobs and manifests by digest. It defaults to false, but it can be enabled by writing the following on the configuration file: -```none -delete: - enabled: true -``` + delete: + enabled: true -### `cache` +### cache -Use the `cache` structure to enable caching of data accessed in the storage +Use the `cache` subsection to enable caching of data accessed in the storage backend. Currently, the only available cache provides fast access to layer -metadata, which uses the `blobdescriptor` field if configured. +metadata. This, if configured, uses the `blobdescriptor` field. -You can set `blobdescriptor` field to `redis` or `inmemory`. If set to `redis`,a -Redis pool caches layer metadata. If set to `inmemory`, an in-memory map caches -layer metadata. +You can set `blobdescriptor` field to `redis` or `inmemory`. The `redis` value uses +a Redis pool to cache layer metadata. The `inmemory` value uses an in memory +map. -> **NOTE**: Formerly, `blobdescriptor` was known as `layerinfo`. While these -> are equivalent, `layerinfo` has been deprecated. +>**NOTE**: Formerly, `blobdescriptor` was known as `layerinfo`. While these +>are equivalent, `layerinfo` has been deprecated, in favor or +>`blobdescriptor`. -### `redirect` +### redirect The `redirect` subsection provides configuration for managing redirects from content backends. For backends that support it, redirecting is enabled by -default. In certain deployment scenarios, you may decide to route all data -through the Registry, rather than redirecting to the backend. This may be more -efficient when using a backend that is not co-located or when a registry -instance is aggressively caching. +default. Certain deployment scenarios may prefer to route all data through the +Registry, rather than redirecting to the backend. This may be more efficient +when using a backend that is not co-located or when a registry instance is +doing aggressive caching. -To disable redirects, add a single flag `disable`, set to `true` +Redirects can be disabled by adding a single flag `disable`, set to `true` under the `redirect` section: -```none -redirect: - disable: true -``` + redirect: + disable: true -## `auth` -```none -auth: - silly: - realm: silly-realm - service: silly-service - token: - realm: token-realm - service: token-service - issuer: registry-token-issuer - rootcertbundle: /root/certs/bundle - htpasswd: - realm: basic-realm - path: /path/to/htpasswd -``` +## auth -The `auth` option is **optional**. Possible auth providers include: + auth: + silly: + realm: silly-realm + service: silly-service + token: + realm: token-realm + service: token-service + issuer: registry-token-issuer + rootcertbundle: /root/certs/bundle + htpasswd: + realm: basic-realm + path: /path/to/htpasswd -- [`silly`](#silly) -- [`token`](#token) -- [`htpasswd`](#htpasswd) +The `auth` option is **optional**. There are +currently 3 possible auth providers, `silly`, `token` and `htpasswd`. You can configure only +one `auth` provider. -You can configure only one authentication provider. +### silly -### `silly` - -The `silly` authentication provider is only appropriate for development. It simply checks -for the existence of the `Authorization` header in the HTTP request. It does not -check the header's value. If the header does not exist, the `silly` auth -responds with a challenge response, echoing back the realm, service, and scope -for which access was denied. +The `silly` auth is only for development purposes. It simply checks for the +existence of the `Authorization` header in the HTTP request. It has no regard for +the header's value. If the header does not exist, the `silly` auth responds with a +challenge response, echoing back the realm, service, and scope that access was +denied for. The following values are used to configure the response: -| Parameter | Required | Description | -|-----------|----------|-------------------------------------------------------| -| `realm` | yes | The realm in which the registry server authenticates. | -| `service` | yes | The service being authenticated. | - -### `token` - -Token-based authentication allows you to decouple the authentication system from -the registry. It is an established authentication paradigm with a high degree of -security. - -| Parameter | Required | Description | -|-----------|----------|-------------------------------------------------------| -| `realm` | yes | The realm in which the registry server authenticates. | -| `service` | yes | The service being authenticated. | -| `issuer` | yes | The name of the token issuer. The issuer inserts this into the token so it must match the value configured for the issuer. | -| `rootcertbundle` | yes | The absolute path to the root certificate bundle. This bundle contains the public part of the certificates used to sign authentication tokens. | + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ realm + + yes + + The realm in which the registry server authenticates. +
+ service + + yes + + The service being authenticated. +
-For more information about Token based authentication configuration, see the -[specification](spec/auth/token.md). -### `htpasswd` +### token -The _htpasswd_ authentication backed allows you to configure basic -authentication using an -[Apache htpasswd file](https://httpd.apache.org/docs/2.4/programs/htpasswd.html). -The only supported password format is -[`bcrypt`](http://en.wikipedia.org/wiki/Bcrypt). Entries with other hash types -are ignored. The `htpasswd` file is loaded once, at startup. If the file is -invalid, the registry will display an error and will not start. +Token based authentication allows the authentication system to be decoupled from +the registry. It is a well established authentication paradigm with a high +degree of security. -> **Warning**: Only use the `htpasswd` authentication scheme with TLS -> configured, since basic authentication sends passwords as part of the HTTP + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ realm + + yes + + The realm in which the registry server authenticates. +
+ service + + yes + + The service being authenticated. +
+ issuer + + yes + +The name of the token issuer. The issuer inserts this into +the token so it must match the value configured for the issuer. +
+ rootcertbundle + + yes + +The absolute path to the root certificate bundle. This bundle contains the +public part of the certificates that is used to sign authentication tokens. +
+ +For more information about Token based authentication configuration, see the [specification](spec/auth/token.md). + +### htpasswd + +The _htpasswd_ authentication backed allows one to configure basic auth using an +[Apache htpasswd +file](https://httpd.apache.org/docs/2.4/programs/htpasswd.html). Only +[`bcrypt`](http://en.wikipedia.org/wiki/Bcrypt) format passwords are supported. +Entries with other hash types will be ignored. The htpasswd file is loaded once, +at startup. If the file is invalid, the registry will display an error and will +not start. + +> __WARNING:__ This authentication scheme should only be used with TLS +> configured, since basic authentication sends passwords as part of the http > header. -| Parameter | Required | Description | -|-----------|----------|-------------------------------------------------------| -| `realm` | yes | The realm in which the registry server authenticates. | -| `path` | yes | The path to the `htpasswd` file to load at startup. | + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ realm + + yes + + The realm in which the registry server authenticates. +
+ path + + yes + + Path to htpasswd file to load at startup. +
-## `middleware` +## middleware -The `middleware` structure is **optional**. Use this option to inject middleware at -named hook points. Each middleware must implement the same interface as the -object it is wrapping. For instance, a registry middleware must implement the -`distribution.Namespace` interface, while a repository middleware must implement -`distribution.Repository`, and a storage middleware must implement +The `middleware` option is **optional**. Use this option to inject middleware at +named hook points. All middleware must implement the same interface as the +object they're wrapping. This means a registry middleware must implement the +`distribution.Namespace` interface, repository middleware must implement +`distribution.Repository`, and storage middleware must implement `driver.StorageDriver`. -This is an example configuration of the `cloudfront` middleware, a storage -middleware: +An example configuration of the `cloudfront` middleware, a storage middleware: -```none -middleware: - registry: - - name: ARegistryMiddleware - options: - foo: bar - repository: - - name: ARepositoryMiddleware - options: - foo: bar - storage: - - name: cloudfront - options: - baseurl: https://my.cloudfronted.domain.com/ - privatekey: /path/to/pem - keypairid: cloudfrontkeypairid - duration: 3000s -``` + middleware: + registry: + - name: ARegistryMiddleware + options: + foo: bar + repository: + - name: ARepositoryMiddleware + options: + foo: bar + storage: + - name: cloudfront + options: + baseurl: https://my.cloudfronted.domain.com/ + privatekey: /path/to/pem + keypairid: cloudfrontkeypairid + duration: 3000s Each middleware entry has `name` and `options` entries. The `name` must correspond to the name under which the middleware registers itself. The @@ -646,134 +743,382 @@ it supports any interesting structures desired, leaving it up to the middleware initialization function to best determine how to handle the specific interpretation of the options. -### `cloudfront` +### cloudfront + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ baseurl + + yes + + SCHEME://HOST[/PATH] at which Cloudfront is served. +
+ privatekey + + yes + + Private Key for Cloudfront provided by AWS. +
+ keypairid + + yes + + Key pair ID provided by AWS. +
+ duration + + no + + Specify a `duration` by providing an integer and a unit. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. For example, `3000s` is a valid duration; there should be no space between the integer and unit. If you do not specify a `duration` or specify an integer without a time unit, this defaults to 20 minutes. +
-| Parameter | Required | Description | -|-----------|----------|-------------------------------------------------------| -| `baseurl` | yes | The `SCHEME://HOST[/PATH]` at which Cloudfront is served. | -| `privatekey` | yes | The private key for Cloudfront, provided by AWS. | -| `keypairid` | yes | The key pair ID provided by AWS. | -| `duration` | no | An integer and unit for the duration of the Cloudfront session. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, or `h`. For example, `3000s` is valid, but `3000 s` is not. If you do not specify a `duration` or you specify an integer without a time unit, the duration defaults to `20m` (20 minutes).| +### redirect -### `redirect` +In place of the `cloudfront` storage middleware, the `redirect` +storage middleware can be used to specify a custom URL to a location +of a proxy for the layer stored by the S3 storage driver. -You can use the `redirect` storage middleware to specify a custom URL to a -location of a proxy for the layer stored by the S3 storage driver. +| Parameter | Required | Description | +| --- | --- | --- | +| baseurl | yes | `SCHEME://HOST` at which layers are served. Can also contain port. For example, `https://example.com:5443`. | -| Parameter | Required | Description | -|-----------|----------|-------------------------------------------------------------------------------------------------------------| -| `baseurl` | yes | `SCHEME://HOST` at which layers are served. Can also contain port. For example, `https://example.com:5443`. | +## reporting -## `reporting` - -``` -reporting: - bugsnag: - apikey: bugsnagapikey - releasestage: bugsnagreleasestage - endpoint: bugsnagendpoint - newrelic: - licensekey: newreliclicensekey - name: newrelicname - verbose: true -``` + reporting: + bugsnag: + apikey: bugsnagapikey + releasestage: bugsnagreleasestage + endpoint: bugsnagendpoint + newrelic: + licensekey: newreliclicensekey + name: newrelicname + verbose: true The `reporting` option is **optional** and configures error and metrics -reporting tools. At the moment only two services are supported: +reporting tools. At the moment only two services are supported, [New +Relic](http://newrelic.com/) and [Bugsnag](http://bugsnag.com), a valid +configuration may contain both. -- [Bugsnag](#bugsnag) -- [New Relic](#new-relic) +### bugsnag -A valid configuration may contain both. - -### `bugsnag` - -| Parameter | Required | Description | -|-----------|----------|-------------------------------------------------------| -| `apikey` | yes | The API Key provided by Bugsnag. | -| `releasestage` | no | Tracks where the registry is deployed, using a string like `production`, `staging`, or `development`.| -| `endpoint`| no | The enterprise Bugsnag endpoint. | - -### `newrelic` - -| Parameter | Required | Description | -|-----------|----------|-------------------------------------------------------| -| `licensekey` | yes | License key provided by New Relic. | -| `name` | no | New Relic application name. | -| `verbose`| no | Set to `true` to enable New Relic debugging output on `stdout`. | - -## `http` - -```none -http: - addr: localhost:5000 - net: tcp - prefix: /my/nested/registry/ - host: https://myregistryaddress.org:5000 - secret: asecretforlocaldevelopment - relativeurls: false - tls: - certificate: /path/to/x509/public - key: /path/to/x509/private - clientcas: - - /path/to/ca.pem - - /path/to/another/ca.pem - letsencrypt: - cachefile: /path/to/cache-file - email: emailused@letsencrypt.com - debug: - addr: localhost:5001 - headers: - X-Content-Type-Options: [nosniff] - http2: - disabled: false -``` - -The `http` option details the configuration for the HTTP server that hosts the -registry. - -| Parameter | Required | Description | -|-----------|----------|-------------------------------------------------------| -| `addr` | yes | The address for which the server should accept connections. The form depends on a network type (see the `net` option). Use `HOST:PORT` for TCP and `FILE` for a UNIX socket. | -| `net` | no | The network used to create a listening socket. Known networks are `unix` and `tcp`. | -| `prefix` | no | If the server does not run at the root path, set this to the value of the prefix. The root path is the section before `v2`. It requires both preceding and trailing slashes, such as in the example `/path/`. | -| `host` | no | A fully-qualified URL for an externally-reachable address for the registry. If present, it is used when creating generated URLs. Otherwise, these URLs are derived from client requests. | -| `secret` | no | A random piece of data used to sign state that may be stored with the client to protect against tampering. For production environments you should generate a random piece of data using a cryptographically secure random generator. If you omit the secret, the registry will automatically generate a secret when it starts. **If you are building a cluster of registries behind a load balancer, you MUST ensure the secret is the same for all registries.**| -| `relativeurls`| no | If `true`, the registry returns relative URLs in Location headers. The client is responsible for resolving the correct URL. **This option is not compatible with Docker 1.7 and earlier.**| + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ apikey + + yes + + API Key provided by Bugsnag +
+ releasestage + + no + + Tracks where the registry is deployed, for example, + production,staging, or + development. +
+ endpoint + + no + + Specify the enterprise Bugsnag endpoint. +
-### `tls` +### newrelic -The `tls` structure within `http` is **optional**. Use this to configure TLS -for the server. If you already have a web server running on -the same host as the registry, you may prefer to configure TLS on that web server + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ licensekey + + yes + + License key provided by New Relic. +
+ name + + no + + New Relic application name. +
+ verbose + + no + + Enable New Relic debugging output on stdout. +
+ +## http + + http: + addr: localhost:5000 + net: tcp + prefix: /my/nested/registry/ + host: https://myregistryaddress.org:5000 + secret: asecretforlocaldevelopment + relativeurls: false + tls: + certificate: /path/to/x509/public + key: /path/to/x509/private + clientcas: + - /path/to/ca.pem + - /path/to/another/ca.pem + letsencrypt: + cachefile: /path/to/cache-file + email: emailused@letsencrypt.com + debug: + addr: localhost:5001 + headers: + X-Content-Type-Options: [nosniff] + http2: + disabled: false + +The `http` option details the configuration for the HTTP server that hosts the registry. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ addr + + yes + + The address for which the server should accept connections. The form depends on a network type (see net option): + HOST:PORT for tcp and FILE for a unix socket. +
+ net + + no + + The network which is used to create a listening socket. Known networks are unix and tcp. + The default empty value means tcp. +
+ prefix + + no + +If the server does not run at the root path use this value to specify the +prefix. The root path is the section before v2. It +should have both preceding and trailing slashes, for example /path/. +
+ host + + no + +This parameter specifies an externally-reachable address for the registry, as a +fully qualified URL. If present, it is used when creating generated URLs. +Otherwise, these URLs are derived from client requests. +
+ secret + + yes + +A random piece of data. This is used to sign state that may be stored with the +client to protect against tampering. For production environments you should generate a +random piece of data using a cryptographically secure random generator. This +configuration parameter may be omitted, in which case the registry will automatically +generate a secret at launch. +

+WARNING: If you are building a cluster of registries behind a load balancer, you MUST +ensure the secret is the same for all registries. +

+ relativeurls + + no + + Specifies that the registry should return relative URLs in Location headers. + The client is responsible for resolving the correct URL. This option is not + compatible with Docker 1.7 and earlier. +
+ + +### tls + +The `tls` struct within `http` is **optional**. Use this to configure TLS +for the server. If you already have a server such as Nginx or Apache running on +the same host as the registry, you may prefer to configure TLS termination there and proxy connections to the registry server. -| Parameter | Required | Description | -|-----------|----------|-------------------------------------------------------| -| `certificate` | yes | Absolute path to the x509 certificate file. | -| `key` | yes | Absolute path to the x509 private key file. | -| `clientcas` | no | An array of absolute paths to x509 CA files. | + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ certificate + + yes + + Absolute path to x509 cert file +
+ key + + yes + + Absolute path to x509 private key file. +
+ clientcas + + no + + An array of absolute paths to an x509 CA file +
-### `letsencrypt` +### letsencrypt -The `letsencrypt` structure within `tls` is **optional**. Use this to configure -TLS certificates provided by -[Let's Encrypt](https://letsencrypt.org/how-it-works/). +The `letsencrypt` struct within `tls` is **optional**. Use this to configure TLS +certificates provided by [Let's Encrypt](https://letsencrypt.org/how-it-works/). ->**NOTE**: When using Let's Encrypt, ensure that the outward-facing address is -> accessible on port `443`. The registry defaults to listening on port `5000`. -> If you run the registry as a container, consider adding the flag `-p 443:5000` -> to the `docker run` command or using a similar setting in a cloud -> configuration. +>**NOTE**: When using Let's Encrypt ensure that the outward facing address is +> accessible on port `443`. The registry defaults to listening on `5000`, if +> run as a container consider adding the flag `-p 443:5000` to the `docker run` +> command or similar setting in cloud configuration. -| Parameter | Required | Description | -|-----------|----------|-------------------------------------------------------| -| `cachefile` | yes | Absolute path to a file where the Let's Encrypt agent can cache data. | -| `email` | yes | The email address used to register with Let's Encrypt. | + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ cachefile + + yes + + Absolute path to a file for the Let's Encrypt agent to cache data +
+ email + + yes + + Email used to register with Let's Encrypt. +
-### `debug` +### debug The `debug` option is **optional** . Use it to configure a debug server that can be helpful in diagnosing problems. The debug endpoint can be used for @@ -781,10 +1126,11 @@ monitoring registry metrics and health, as well as profiling. Sensitive information may be available via the debug endpoint. Please be certain that access to the debug endpoint is locked down in a production environment. -The `debug` section takes a single required `addr` parameter, which specifies -the `HOST:PORT` on which the debug server should accept connections. +The `debug` section takes a single, required `addr` parameter. This parameter +specifies the `HOST:PORT` on which the debug server should accept connections. -### `headers` + +### headers The `headers` option is **optional** . Use it to specify headers that the HTTP server should include in responses. This can be used for security headers such @@ -796,325 +1142,846 @@ header's payload values. Including `X-Content-Type-Options: [nosniff]` is recommended, so that browsers will not interpret content as HTML if they are directed to load a page from the -registry. This header is included in the example configuration file. +registry. This header is included in the example configuration files. -### `http2` +### http2 -The `http2` structure within `http` is **optional**. Use this to control http2 +The `http2` struct within `http` is **optional**. Use this to control http2 settings for the registry. -| Parameter | Required | Description | -|-----------|----------|-------------------------------------------------------| -| `disabled` | no | If `true`, then `http2` support is disabled. | + + + + + + + + + + + +
ParameterRequiredDescription
+ disabled + + no + + A boolean that determines if http2 support should be disabled +
-## `notifications` +## notifications -```none -notifications: - endpoints: - - name: alistener - disabled: false - url: https://my.listener.com/event - headers: - timeout: 1s - threshold: 10 - backoff: 1s - ignoredmediatypes: - - application/octet-stream -``` + notifications: + endpoints: + - name: alistener + disabled: false + url: https://my.listener.com/event + headers: + timeout: 500 + threshold: 5 + backoff: 1000 + ignoredmediatypes: + - application/octet-stream The notifications option is **optional** and currently may contain a single option, `endpoints`. -### `endpoints` +### endpoints -The `endpoints` structure contains a list of named services (URLs) that can -accept event notifications. +Endpoints is a list of named services (URLs) that can accept event notifications. -| Parameter | Required | Description | -|-----------|----------|-------------------------------------------------------| -| `name` | yes | A human-readable name for the service. | -| `disabled` | no | If `true`, notifications are disabled for the service.| -| `url` | yes | The URL to which events should be published. | -| `headers` | yes | A list of static headers to add to each request. Each header's name is a key beneath `headers`, and each value is a list of payloads for that header name. Values must always be lists. | -| `timeout` | yes | A value for the HTTP timeout. A positive integer and an optional suffix indicating the unit of time, which may be `ns`, `us`, `ms`, `s`, `m`, or `h`. If you omit the unit of time, `ns` is used. | -| `threshold` | yes | An integer specifying how long to wait before backing off a failure. | -| `backoff` | yes | How long the system backs off before retrying after a failure. A positive integer and an optional suffix indicating the unit of time, which may be `ns`, `us`, `ms`, `s`, `m`, or `h`. If you omit the unit of time, `ns` is used. | -| `ignoredmediatypes`|no| A list of target media types to ignore. Events with these target media types are not published to the endpoint. | - -## `redis` - -```none -redis: - addr: localhost:6379 - password: asecret - db: 0 - dialtimeout: 10ms - readtimeout: 10ms - writetimeout: 10ms - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s -``` - -Declare parameters for constructing the `redis` connections. Registry instances -may use the Redis instance for several applications. Currently, it caches -information about immutable blobs. Most of the `redis` options control -how the registry connects to the `redis` instance. You can control the pool's -behavior with the [pool](#pool) subsection. - -You should configure Redis with the **allkeys-lru** eviction policy, because the -registry does not set an expiration value on keys. - -| Parameter | Required | Description | -|-----------|----------|-------------------------------------------------------| -| `addr` | yes | The address (host and port) of the Redis instance. | -| `password`| no | A password used to authenticate to the Redis instance.| -| `db` | no | The name of the database to use for each connection. | -| `dialtimeout` | no | The timeout for connecting to the Redis instance. | -| `readtimeout` | no | The timeout for reading from the Redis instance. | -| `writetimeout` | no | The timeout for writing to the Redis instance. | - -### `pool` - -```none -pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s -``` - -Use these settings to configure the behavior of the Redis connection pool. - -| Parameter | Required | Description | -|-----------|----------|-------------------------------------------------------| -| `maxidle` | no | The maximum number of idle connections in the pool. | -| `maxactive`| no | The maximum number of connections which can be open before blocking a connection request. | -| `idletimeout`| no | How long to wait before closing inactive connections. | - -## `health` - -```none -health: - storagedriver: - enabled: true - interval: 10s - threshold: 3 - file: - - file: /path/to/checked/file - interval: 10s - http: - - uri: http://server.to.check/must/return/200 - headers: - Authorization: [Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==] - statuscode: 200 - timeout: 3s - interval: 10s - threshold: 3 - tcp: - - addr: redis-server.domain.com:6379 - timeout: 3s - interval: 10s - threshold: 3 -``` - -The health option is **optional**, and contains preferences for a periodic -health check on the storage driver's backend storage, as well as optional -periodic checks on local files, HTTP URIs, and/or TCP servers. The results of -the health checks are available at the `/debug/health` endpoint on the debug -HTTP server if the debug HTTP server is enabled (see http section). - -### `storagedriver` - -The `storagedriver` structure contains options for a health check on the -configured storage driver's backend storage. The health check is only active -when `enabled` is set to `true`. - -| Parameter | Required | Description | -|-----------|----------|-------------------------------------------------------| -| `enabled` | yes | Set to `true` to enable storage driver health checks or `false` to disable them. | -| `interval`| no | How long to wait between repetitions of the storage driver health check. A positive integer and an optional suffix indicating the unit of time. The suffix is one of `ns`, `us`, `ms`, `s`, `m`, or `h`. Defaults to `10s` if the value is omitted. If you specify a value but omit the suffix, the value is interpreted as a number of nanoseconds. | -| `threshold`| no | A positive integer which represents the number of times the check must fail before the state is marked as unhealthy. If not specified, a single failure marks the state as unhealthy. | - -### `file` - -The `file` structure includes a list of paths to be periodically checked for the\ -existence of a file. If a file exists at the given path, the health check will -fail. You can use this mechanism to bring a registry out of rotation by creating -a file. - -| Parameter | Required | Description | -|-----------|----------|-------------------------------------------------------| -| `file` | yes | The path to check for existence of a file. | -| `interval`| no | How long to wait before repeating the check. A positive integer and an optional suffix indicating the unit of time. The suffix is one of `ns`, `us`, `ms`, `s`, `m`, or `h`. Defaults to `10s` if the value is omitted. If you specify a value but omit the suffix, the value is interpreted as a number of nanoseconds. | - -### `http` - -The `http` structure includes a list of HTTP URIs to periodically check with -`HEAD` requests. If a `HEAD` request does not complete or returns an unexpected -status code, the health check will fail. - -| Parameter | Required | Description | -|-----------|----------|-------------------------------------------------------| -| `uri` | yes | The URI to check. | -| `headers` | no | Static headers to add to each request. Each header's name is a key beneath `headers`, and each value is a list of payloads for that header name. Values must always be lists. | -| `statuscode` | no | The expected status code from the HTTP URI. Defaults to `200`. | -| `timeout` | no | How long to wait before timing out the HTTP request. A positive integer and an optional suffix indicating the unit of time. The suffix is one of `ns`, `us`, `ms`, `s`, `m`, or `h`. If you specify a value but omit the suffix, the value is interpreted as a number of nanoseconds. | -| `interval`| no | How long to wait before repeating the check. A positive integer and an optional suffix indicating the unit of time. The suffix is one of `ns`, `us`, `ms`, `s`, `m`, or `h`. Defaults to `10s` if the value is omitted. If you specify a value but omit the suffix, the value is interpreted as a number of nanoseconds. | -| `threshold`| no | The number of times the check must fail before the state is marked as unhealthy. If this field is not specified, a single failure marks the state as unhealthy. | - -### `tcp` - -The `tcp` structure includes a list of TCP addresses to periodically check using -TCP connection attempts. Addresses must include port numbers. If a connection -attempt fails, the health check will fail. - -| Parameter | Required | Description | -|-----------|----------|-------------------------------------------------------| -| `addr` | yes | The TCP address and port to connect to. | -| `timeout` | no | How long to wait before timing out the TCP connection. A positive integer and an optional suffix indicating the unit of time. The suffix is one of `ns`, `us`, `ms`, `s`, `m`, or `h`. If you specify a value but omit the suffix, the value is interpreted as a number of nanoseconds. | -| `interval`| no | How long to wait between repetitions of the check. A positive integer and an optional suffix indicating the unit of time. The suffix is one of `ns`, `us`, `ms`, `s`, `m`, or `h`. Defaults to `10s` if the value is omitted. If you specify a value but omit the suffix, the value is interpreted as a number of nanoseconds. | -| `threshold`| no | The number of times the check must fail before the state is marked as unhealthy. If this field is not specified, a single failure marks the state as unhealthy. | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ name + + yes + +A human readable name for the service. +
+ disabled + + no + +A boolean to enable/disable notifications for a service. +
+ url + + yes + +The URL to which events should be published. +
+ headers + + yes + + Static headers to add to each request. Each header's name should be a key + underneath headers, and each value is a list of payloads for that + header name. Note that values must always be lists. +
+ timeout + + yes + + An HTTP timeout value. This field takes a positive integer and an optional + suffix indicating the unit of time. Possible units are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. +
+ threshold + + yes + + An integer specifying how long to wait before backing off a failure. +
+ backoff + + yes + + How long the system backs off before retrying. This field takes a positive + integer and an optional suffix indicating the unit of time. Possible units + are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. +
+ ignoredmediatypes + + no + + List of target media types to ignore. An event whose target media type + is present in this list will not be published to the endpoint. +
-## `proxy` +## redis -``` -proxy: - remoteurl: https://registry-1.docker.io - username: [username] - password: [password] -``` + redis: + addr: localhost:6379 + password: asecret + db: 0 + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s -The `proxy` structure allows a registry to be configured as a pull-through cache -to Docker Hub. See -[mirror](https://github.com/docker/docker.github.io/tree/master/registry/recipes/mirror.md) -for more information. Pushing to a registry configured as a pull-through cache -is unsupported. +Declare parameters for constructing the redis connections. Registry instances +may use the Redis instance for several applications. The current purpose is +caching information about immutable blobs. Most of the options below control +how the registry connects to redis. You can control the pool's behavior +with the [pool](#pool) subsection. -| Parameter | Required | Description | -|-----------|----------|-------------------------------------------------------| -| `remoteurl`| yes | The URL for the repository on Docker Hub. | -| `username` | no | The username registered with Docker Hub which has access to the repository. | -| `password` | no | The password used to authenticate to Docker Hub using the username specified in `username`. | +It's advisable to configure Redis itself with the **allkeys-lru** eviction policy +as the registry does not set an expire value on keys. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ addr + + yes + + Address (host and port) of redis instance. +
+ password + + no + + A password used to authenticate to the redis instance. +
+ db + + no + + Selects the db for each connection. +
+ dialtimeout + + no + + Timeout for connecting to a redis instance. +
+ readtimeout + + no + + Timeout for reading from redis connections. +
+ writetimeout + + no + + Timeout for writing to redis connections. +
-To enable pulling private repositories (e.g. `batman/robin`) specify the -username (such as `batman`) and the password for that username. +### pool -> **Note**: These private repositories are stored in the proxy cache's storage. -> Take appropriate measures to protect access to the proxy cache. + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s -## `compatibility` +Configure the behavior of the Redis connection pool. -```none -compatibility: - schema1: - signingkeyfile: /etc/registry/key.json -``` + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ maxidle + + no + + Sets the maximum number of idle connections. +
+ maxactive + + no + + sets the maximum number of connections that should + be opened before blocking a connection request. +
+ idletimeout + + no + + sets the amount time to wait before closing + inactive connections. +
-Use the `compatibility` structure to configure handling of older and deprecated -features. Each subsection defines such a feature with configurable behavior. +## health -### `schema1` + health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 + file: + - file: /path/to/checked/file + interval: 10s + http: + - uri: http://server.to.check/must/return/200 + headers: + Authorization: [Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==] + statuscode: 200 + timeout: 3s + interval: 10s + threshold: 3 + tcp: + - addr: redis-server.domain.com:6379 + timeout: 3s + interval: 10s + threshold: 3 -| Parameter | Required | Description | -|-----------|----------|-------------------------------------------------------| -| `signingkeyfile` | no | The signing private key used to add signatures to `schema1` manifests. If no signing key is provided, a new ECDSA key is generated when the registry starts. | +The health option is **optional**. It may contain preferences for a periodic +health check on the storage driver's backend storage, and optional periodic +checks on local files, HTTP URIs, and/or TCP servers. The results of the health +checks are available at /debug/health on the debug HTTP server if the debug +HTTP server is enabled (see http section). -## `validation` +### storagedriver -```none -validation: - manifests: - urls: - allow: - - ^https?://([^/]+\.)*example\.com/ - deny: - - ^https?://www\.example\.com/ -``` +storagedriver contains options for a health check on the configured storage +driver's backend storage. enabled must be set to true for this health check to +be active. -### `disabled` + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ enabled + + yes + +"true" to enable the storage driver health check or "false" to disable it. +
+ interval + + no + + The length of time to wait between repetitions of the check. This field + takes a positive integer and an optional suffix indicating the unit of + time. Possible units are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. + The default value is 10 seconds if this field is omitted. +
+ threshold + + no + + An integer specifying the number of times the check must fail before the + check triggers an unhealthy state. If this filed is not specified, a + single failure will trigger an unhealthy state. +
-The `disabled` flag disables the other options in the `validation` -section. They are enabled by default. This option deprecates the `enabled` flag. +### file -### `manifests` +file is a list of paths to be periodically checked for the existence of a file. +If a file exists at the given path, the health check will fail. This can be +used as a way of bringing a registry out of rotation by creating a file. -Use the `manifests` subsection to configure validation of manifests. If -`disabled` is `false`, the validation allows nothing. + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ file + + yes + +The path to check for the existence of a file. +
+ interval + + no + + The length of time to wait between repetitions of the check. This field + takes a positive integer and an optional suffix indicating the unit of + time. Possible units are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. + The default value is 10 seconds if this field is omitted. +
-#### `urls` +### http -The `allow` and `deny` options are each a list of +http is a list of HTTP URIs to be periodically checked with HEAD requests. If +a HEAD request doesn't complete or returns an unexpected status code, the +health check will fail. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ uri + + yes + +The URI to check. +
+ headers + + no + + Static headers to add to each request. Each header's name should be a key + underneath headers, and each value is a list of payloads for that + header name. Note that values must always be lists. +
+ statuscode + + no + +Expected status code from the HTTP URI. Defaults to 200. +
+ timeout + + no + + The length of time to wait before timing out the HTTP request. This field + takes a positive integer and an optional suffix indicating the unit of + time. Possible units are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. +
+ interval + + no + + The length of time to wait between repetitions of the check. This field + takes a positive integer and an optional suffix indicating the unit of + time. Possible units are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. + The default value is 10 seconds if this field is omitted. +
+ threshold + + no + + An integer specifying the number of times the check must fail before the + check triggers an unhealthy state. If this filed is not specified, a + single failure will trigger an unhealthy state. +
+ +### tcp + +tcp is a list of TCP addresses to be periodically checked with connection +attempts. The addresses must include port numbers. If a connection attempt +fails, the health check will fail. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ addr + + yes + +The TCP address to connect to, including a port number. +
+ timeout + + no + + The length of time to wait before timing out the TCP connection. This + field takes a positive integer and an optional suffix indicating the unit + of time. Possible units are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. +
+ interval + + no + + The length of time to wait between repetitions of the check. This field + takes a positive integer and an optional suffix indicating the unit of + time. Possible units are: +
    +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • +
  • h (hours)
  • +
+ If you omit the suffix, the system interprets the value as nanoseconds. + The default value is 10 seconds if this field is omitted. +
+ threshold + + no + + An integer specifying the number of times the check must fail before the + check triggers an unhealthy state. If this filed is not specified, a + single failure will trigger an unhealthy state. +
+ +## Proxy + + proxy: + remoteurl: https://registry-1.docker.io + username: [username] + password: [password] + +Proxy enables a registry to be configured as a pull through cache to the official Docker Hub. See [mirror](recipes/mirror.md) for more information. Pushing to a registry configured as a pull through cache is currently unsupported. + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ remoteurl + + yes + + The URL of the official Docker Hub +
+ username + + no + + The username of the Docker Hub account +
+ password + + no + + The password for the official Docker Hub account +
+ +To enable pulling private repositories (e.g. `batman/robin`) a username and password for user `batman` must be specified. Note: These private repositories will be stored in the proxy cache's storage and relevant measures should be taken to protect access to this. + +## Compatibility + + compatibility: + schema1: + signingkeyfile: /etc/registry/key.json + +Configure handling of older and deprecated features. Each subsection +defines such a feature with configurable behavior. + +### Schema1 + + + + + + + + + + + + +
ParameterRequiredDescription
+ signingkeyfile + + no + + The signing private key used for adding signatures to schema1 manifests. + If no signing key is provided, a new ECDSA key will be generated on + startup. +
+ +## Validation + + validation: + enabled: true + manifests: + urls: + allow: + - ^https?://([^/]+\.)*example\.com/ + deny: + - ^https?://www\.example\.com/ + +### Enabled + +Use the `enabled` flag to enable the other options in the `validation` +section. They are disabled by default. + +### Manifests + +Use the `manifest` subsection to configure manifest validation. + +#### URLs + +The `allow` and `deny` options are both lists of [regular expressions](https://godoc.org/regexp/syntax) that restrict the URLs in pushed manifests. -If `allow` is unset, pushing a manifest containing URLs fails. +If `allow` is unset, pushing a manifest containing URLs will fail. -If `allow` is set, pushing a manifest succeeds only if all URLs match -one of the `allow` regular expressions **and** one of the following holds: - -1. `deny` is unset. -2. `deny` is set but no URLs within the manifest match any of the `deny` regular - expressions. +If `allow` is set, pushing a manifest will succeed only if all URLs within match +one of the `allow` regular expressions and one of the following holds: +1. `deny` is unset. +2. `deny` is set but no URLs within the manifest match any of the `deny` regular expressions. ## Example: Development configuration -You can use this simple example for local development: +The following is a simple example you can use for local development: -```none -version: 0.1 -log: - level: debug -storage: - filesystem: - rootdirectory: /var/lib/registry -http: - addr: localhost:5000 - secret: asecretforlocaldevelopment - debug: - addr: localhost:5001 -``` + version: 0.1 + log: + level: debug + storage: + filesystem: + rootdirectory: /var/lib/registry + http: + addr: localhost:5000 + secret: asecretforlocaldevelopment + debug: + addr: localhost:5001 -This example configures the registry instance to run on port `5000`, binding to -`localhost`, with the `debug` server enabled. Registry data is stored in the -`/var/lib/registry` directory. Logging is set to `debug` mode, which is the most +The above configures the registry instance to run on port `5000`, binding to +`localhost`, with the `debug` server enabled. Registry data storage is in the +`/var/lib/registry` directory. Logging is in `debug` mode, which is the most verbose. -See -[config-example.yml](https://github.com/docker/distribution/blob/master/cmd/registry/config-example.yml) -for another simple configuration. Both examples are generally useful for local -development. +A similar simple configuration is available at +[config-example.yml](https://github.com/docker/distribution/blob/master/cmd/registry/config-example.yml). +Both are generally useful for local development. ## Example: Middleware configuration -This example configures [Amazon Cloudfront](http://aws.amazon.com/cloudfront/) -as the storage middleware in a registry. Middleware allows the registry to serve -layers via a content delivery network (CDN). This reduces requests to the -storage layer. +This example illustrates how to configure storage middleware in a registry. +Middleware allows the registry to serve layers via a content delivery network +(CDN). This is useful for reducing requests to the storage layer. -Cloudfront requires the S3 storage driver. +The registry supports [Amazon +Cloudfront](http://aws.amazon.com/cloudfront/). You can only use Cloudfront in +conjunction with the S3 storage driver. -This is the configuration expressed in YAML: + + + + + + + + + + + + + + + + + +
ParameterDescription
nameThe storage middleware name. Currently cloudfront is an accepted value.
disabledSet to false to easily disable the middleware.
options: + A set of key/value options to configure the middleware. +
    +
  • baseurl: The Cloudfront base URL.
  • +
  • privatekey: The location of your AWS private key on the filesystem.
  • +
  • keypairid: The ID of your Cloudfront keypair.
  • +
  • duration: The duration in minutes for which the URL is valid. Default is 20.
  • +
+
-```none -middleware: - storage: - - name: cloudfront - disabled: false - options: - baseurl: http://d111111abcdef8.cloudfront.net - privatekey: /path/to/asecret.pem - keypairid: asecret - duration: 60s -``` +The following example illustrates these values: -See the configuration reference for [Cloudfront](#cloudfront) for more -information about configuration options. + middleware: + storage: + - name: cloudfront + disabled: false + options: + baseurl: http://d111111abcdef8.cloudfront.net + privatekey: /path/to/asecret.pem + keypairid: asecret + duration: 60 -> **Note**: Cloudfront keys exist separately from other AWS keys. See -> [the documentation on AWS credentials](http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) -> for more information. + +>**Note**: Cloudfront keys exist separately to other AWS keys. See +>[the documentation on AWS credentials](http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) +>for more information. diff --git a/vendor/github.com/docker/distribution/docs/deploying.md b/vendor/github.com/docker/distribution/docs/deploying.md new file mode 100644 index 000000000..2e8ce69e2 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/deploying.md @@ -0,0 +1,237 @@ + + +# Deploying a registry server + +You need to [install Docker version 1.6.0 or newer](/engine/installation/index.md). + +## Running on localhost + +Start your registry: + + docker run -d -p 5000:5000 --restart=always --name registry registry:2 + +You can now use it with docker. + +Get any image from the hub and tag it to point to your registry: + + docker pull ubuntu && docker tag ubuntu localhost:5000/ubuntu + +... then push it to your registry: + + docker push localhost:5000/ubuntu + +... then pull it back from your registry: + + docker pull localhost:5000/ubuntu + +To stop your registry, you would: + + docker stop registry && docker rm -v registry + +## Storage + +By default, your registry data is persisted as a [docker volume](/engine/tutorials/dockervolumes.md) on the host filesystem. Properly understanding volumes is essential if you want to stick with a local filesystem storage. + +Specifically, you might want to point your volume location to a specific place in order to more easily access your registry data. To do so you can: + + docker run -d -p 5000:5000 --restart=always --name registry \ + -v `pwd`/data:/var/lib/registry \ + registry:2 + +### Alternatives + +You should usually consider using [another storage backend](./storage-drivers/index.md) instead of the local filesystem. Use the [storage configuration options](./configuration.md#storage) to configure an alternate storage backend. + +Using one of these will allow you to more easily scale your registry, and leverage your storage redundancy and availability features. + +## Running a domain registry + +While running on `localhost` has its uses, most people want their registry to be more widely available. To do so, the Docker engine requires you to secure it using TLS, which is conceptually very similar to configuring your web server with SSL. + +### Get a certificate + +Assuming that you own the domain `myregistrydomain.com`, and that its DNS record points to the host where you are running your registry, you first need to get a certificate from a CA. + +Create a `certs` directory: + + mkdir -p certs + +Then move and/or rename your crt file to: `certs/domain.crt`, and your key file to: `certs/domain.key`. + +Make sure you stopped your registry from the previous steps, then start your registry again with TLS enabled: + + docker run -d -p 5000:5000 --restart=always --name registry \ + -v `pwd`/certs:/certs \ + -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \ + -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \ + registry:2 + +You should now be able to access your registry from another docker host: + + docker pull ubuntu + docker tag ubuntu myregistrydomain.com:5000/ubuntu + docker push myregistrydomain.com:5000/ubuntu + docker pull myregistrydomain.com:5000/ubuntu + +#### Gotcha + +A certificate issuer may supply you with an *intermediate* certificate. In this case, you must combine your certificate with the intermediate's to form a *certificate bundle*. You can do this using the `cat` command: + + cat domain.crt intermediate-certificates.pem > certs/domain.crt + +### Let's Encrypt + +The registry supports using Let's Encrypt to automatically obtain a browser-trusted certificate. For more +information on Let's Encrypt, see [https://letsencrypt.org/how-it-works/](https://letsencrypt.org/how-it-works/) and the relevant section of the [registry configuration](configuration.md#letsencrypt). + +### Alternatives + +While rarely advisable, you may want to use self-signed certificates instead, or use your registry in an insecure fashion. You will find instructions [here](insecure.md). + +## Load Balancing Considerations + +One may want to use a load balancer to distribute load, terminate TLS or +provide high availability. While a full load balancing setup is outside the +scope of this document, there are a few considerations that can make the process +smoother. + +The most important aspect is that a load balanced cluster of registries must +share the same resources. For the current version of the registry, this means +the following must be the same: + + - Storage Driver + - HTTP Secret + - Redis Cache (if configured) + +If any of these are different, the registry will have trouble serving requests. +As an example, if you're using the filesystem driver, all registry instances +must have access to the same filesystem root, which means they should be in +the same machine. For other drivers, such as s3 or azure, they should be +accessing the same resource, and will likely share an identical configuration. +The _HTTP Secret_ coordinates uploads, so also must be the same across +instances. Configuring different redis instances will work (at the time +of writing), but will not be optimal if the instances are not shared, causing +more requests to be directed to the backend. + +#### Important/Required HTTP-Headers +Getting the headers correct is very important. For all responses to any +request under the "/v2/" url space, the `Docker-Distribution-API-Version` +header should be set to the value "registry/2.0", even for a 4xx response. +This header allows the docker engine to quickly resolve authentication realms +and fallback to version 1 registries, if necessary. Confirming this is setup +correctly can help avoid problems with fallback. + +In the same train of thought, you must make sure you are properly sending the +`X-Forwarded-Proto`, `X-Forwarded-For` and `Host` headers to their "client-side" +values. Failure to do so usually makes the registry issue redirects to internal +hostnames or downgrading from https to http. + +A properly secured registry should return 401 when the "/v2/" endpoint is hit +without credentials. The response should include a `WWW-Authenticate` +challenge, providing guidance on how to authenticate, such as with basic auth +or a token service. If the load balancer has health checks, it is recommended +to configure it to consider a 401 response as healthy and any other as down. +This will secure your registry by ensuring that configuration problems with +authentication don't accidentally expose an unprotected registry. If you're +using a less sophisticated load balancer, such as Amazon's Elastic Load +Balancer, that doesn't allow one to change the healthy response code, health +checks can be directed at "/", which will always return a `200 OK` response. + +## Restricting access + +Except for registries running on secure local networks, registries should always implement access restrictions. + +### Native basic auth + +The simplest way to achieve access restriction is through basic authentication (this is very similar to other web servers' basic authentication mechanism). + +> **Warning**: You **cannot** use authentication with an insecure registry. You have to [configure TLS first](#running-a-domain-registry) for this to work. + +First create a password file with one entry for the user "testuser", with password "testpassword": + + mkdir auth + docker run --entrypoint htpasswd registry:2 -Bbn testuser testpassword > auth/htpasswd + +Make sure you stopped your registry from the previous step, then start it again: + + docker run -d -p 5000:5000 --restart=always --name registry \ + -v `pwd`/auth:/auth \ + -e "REGISTRY_AUTH=htpasswd" \ + -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \ + -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd \ + -v `pwd`/certs:/certs \ + -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \ + -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \ + registry:2 + +You should now be able to: + + docker login myregistrydomain.com:5000 + +And then push and pull images as an authenticated user. + +#### Gotcha + +Seeing X509 errors is usually a sign you are trying to use self-signed certificates, and failed to [configure your docker daemon properly](insecure.md). + +### Alternatives + +1. You may want to leverage more advanced basic auth implementations through a proxy design, in front of the registry. You will find examples of such patterns in the [recipes list](recipes/index.md). + +2. Alternatively, the Registry also supports delegated authentication, redirecting users to a specific, trusted token server. That approach requires significantly more investment, and only makes sense if you want to fully configure ACLs and more control over the Registry integration into your global authorization and authentication systems. + +You will find [background information here](spec/auth/token.md), and [configuration information here](configuration.md#auth). + +Beware that you will have to implement your own authentication service for this to work, or leverage a third-party implementation. + +## Managing with Compose + +As your registry configuration grows more complex, dealing with it can quickly become tedious. + +It's highly recommended to use [Docker Compose](/compose/index.md) to facilitate operating your registry. + +Here is a simple `docker-compose.yml` example that condenses everything explained so far: + +``` +registry: + restart: always + image: registry:2 + ports: + - 5000:5000 + environment: + REGISTRY_HTTP_TLS_CERTIFICATE: /certs/domain.crt + REGISTRY_HTTP_TLS_KEY: /certs/domain.key + REGISTRY_AUTH: htpasswd + REGISTRY_AUTH_HTPASSWD_PATH: /auth/htpasswd + REGISTRY_AUTH_HTPASSWD_REALM: Registry Realm + volumes: + - /path/data:/var/lib/registry + - /path/certs:/certs + - /path/auth:/auth +``` + +> **Warning**: replace `/path` by whatever directory that holds your `certs` and `auth` folder from above. + +You can then start your registry with a simple + + docker-compose up -d + +## Next + +You will find more specific and advanced informations in the following sections: + + - [Configuration reference](configuration.md) + - [Working with notifications](notifications.md) + - [Advanced "recipes"](recipes/index.md) + - [Registry API](spec/api.md) + - [Storage driver model](storage-drivers/index.md) + - [Token authentication](spec/auth/token.md) diff --git a/vendor/github.com/docker/distribution/docs/deprecated.md b/vendor/github.com/docker/distribution/docs/deprecated.md new file mode 100644 index 000000000..73bde497f --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/deprecated.md @@ -0,0 +1,27 @@ + + +# Docker Registry Deprecation + +This document details functionality or components which are deprecated within +the registry. + +### v2.5.0 + +The signature store has been removed from the registry. Since `v2.4.0` it has +been possible to configure the registry to generate manifest signatures rather +than load them from storage. In this version of the registry this becomes +the default behavior. Signatures which are attached to manifests on put are +not stored in the registry. This does not alter the functional behavior of +the registry. + +Old signatures blobs can be removed from the registry storage by running the +garbage-collect subcommand. diff --git a/vendor/github.com/docker/distribution/docs/garbage-collection.md b/vendor/github.com/docker/distribution/docs/garbage-collection.md new file mode 100644 index 000000000..2d03e7872 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/garbage-collection.md @@ -0,0 +1,137 @@ + + +# Garbage Collection + +As of v2.4.0 a garbage collector command is included within the registry binary. +This document describes what this command does and how and why it should be used. + +## What is Garbage Collection? + +From [wikipedia](https://en.wikipedia.org/wiki/Garbage_collection_(computer_science)): + +"In computer science, garbage collection (GC) is a form of automatic memory management. The +garbage collector, or just collector, attempts to reclaim garbage, or memory occupied by +objects that are no longer in use by the program." + +In the context of the Docker registry, garbage collection is the process of +removing blobs from the filesystem which are no longer referenced by a +manifest. Blobs can include both layers and manifests. + + +## Why Garbage Collection? + +Registry data can occupy considerable amounts of disk space and freeing up +this disk space is an oft-requested feature. Additionally for reasons of security it +can be desirable to ensure that certain layers no longer exist on the filesystem. + + +## Garbage Collection in the Registry + +Filesystem layers are stored by their content address in the Registry. This +has many advantages, one of which is that data is stored once and referred to by manifests. +See [here](compatibility.md#content-addressable-storage-cas) for more details. + +Layers are therefore shared amongst manifests; each manifest maintains a reference +to the layer. As long as a layer is referenced by one manifest, it cannot be garbage +collected. + +Manifests and layers can be 'deleted` with the registry API (refer to the API +documentation [here](spec/api.md#deleting-a-layer) and +[here](spec/api.md#deleting-an-image) for details). This API removes references +to the target and makes them eligible for garbage collection. It also makes them +unable to be read via the API. + +If a layer is deleted it will be removed from the filesystem when garbage collection +is run. If a manifest is deleted the layers to which it refers will be removed from +the filesystem if no other manifests refers to them. + + +### Example + +In this example manifest A references two layers: `a` and `b`. Manifest `B` references +layers `a` and `c`. In this state, nothing is eligible for garbage collection: + +``` +A -----> a <----- B + \--> b | + c <--/ +``` + +Manifest B is deleted via the API: + +``` +A -----> a B + \--> b + c +``` + +In this state layer `c` no longer has a reference and is eligible for garbage +collection. Layer `a` had one reference removed but will not be garbage +collected as it is still referenced by manifest `A`. The blob representing +manifest `B` will also be eligible for garbage collection. + +After garbage collection has been run manifest `A` and its blobs remain. + +``` +A -----> a + \--> b +``` + + +## How Garbage Collection works + +Garbage collection runs in two phases. First, in the 'mark' phase, the process +scans all the manifests in the registry. From these manifests, it constructs a +set of content address digests. This set is the 'mark set' and denotes the set +of blobs to *not* delete. Secondly, in the 'sweep' phase, the process scans all +the blobs and if a blob's content address digest is not in the mark set, the +process will delete it. + + +> **NOTE** You should ensure that the registry is in read-only mode or not running at +> all. If you were to upload an image while garbage collection is running, there is the +> risk that the image's layers will be mistakenly deleted, leading to a corrupted image. + +This type of garbage collection is known as stop-the-world garbage collection. In future +registry versions the intention is that garbage collection will be an automated background +action and this manual process will no longer apply. + + + +# Running garbage collection + +Garbage collection can be run as follows + +`bin/registry garbage-collect [--dry-run] /path/to/config.yml` + +The garbage-collect command accepts a `--dry-run` parameter, which will print the progress +of the mark and sweep phases without removing any data. Running with a log leve of `info` +will give a clear indication of what will and will not be deleted. + +_Sample output from a dry run garbage collection with registry log level set to `info`_ + +``` +hello-world +hello-world: marking manifest sha256:fea8895f450959fa676bcc1df0611ea93823a735a01205fd8622846041d0c7cf +hello-world: marking blob sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb +hello-world: marking blob sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4 +hello-world: marking configuration sha256:690ed74de00f99a7d00a98a5ad855ac4febd66412be132438f9b8dbd300a937d +ubuntu + +4 blobs marked, 5 blobs eligible for deletion +blob eligible for deletion: sha256:28e09fddaacbfc8a13f82871d9d66141a6ed9ca526cb9ed295ef545ab4559b81 +blob eligible for deletion: sha256:7e15ce58ccb2181a8fced7709e9893206f0937cc9543bc0c8178ea1cf4d7e7b5 +blob eligible for deletion: sha256:87192bdbe00f8f2a62527f36bb4c7c7f4eaf9307e4b87e8334fb6abec1765bcb +blob eligible for deletion: sha256:b549a9959a664038fc35c155a95742cf12297672ca0ae35735ec027d55bf4e97 +blob eligible for deletion: sha256:f251d679a7c61455f06d793e43c06786d7766c88b8c24edf242b2c08e3c3f599 +``` + diff --git a/vendor/github.com/docker/distribution/docs/glossary.md b/vendor/github.com/docker/distribution/docs/glossary.md new file mode 100644 index 000000000..8159b5202 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/glossary.md @@ -0,0 +1,70 @@ + + +# Glossary + +This page contains definitions for distribution related terms. + +
+

Blob

+
+
A blob is any kind of content that is stored by a Registry under a content-addressable identifier (a "digest").
+

+ Layers are a good example of "blobs". +

+
+ +

Image

+
+
An image is a named set of immutable data from which a Docker container can be created.
+

+ An image is represented by a json file called a manifest, and is conceptually a set of layers. + + Image names indicate the location where they can be pulled from and pushed to, as they usually start with a registry domain name and port. + +

+
+ +

Layer

+
+
A layer is a tar archive bundling partial content from a filesystem.
+

+ Layers from an image are usually extracted in order on top of each other to make up a root filesystem from which containers run out. +

+
+ +

Manifest

+
A manifest is the JSON representation of an image.
+ +

Namespace

+
A namespace is a collection of repositories with a common name prefix.
+

+ The namespace with an empty prefix is considered the Global Namespace. +

+
+ +

Registry

+
A registry is a service that let you store and deliver images.
+
+ +

Repository

+
+
A repository is a set of data containing all versions of a given image.
+
+ +

Scope

+
A scope is the portion of a namespace onto which a given authorization token is granted.
+ +

Tag

+
A tag is conceptually a "version" of a named image.
+

+ Example: `docker pull myimage:latest` instructs docker to pull the image "myimage" in version "latest". +

+ +
+ + +
diff --git a/vendor/github.com/docker/distribution/docs/help.md b/vendor/github.com/docker/distribution/docs/help.md new file mode 100644 index 000000000..77ec378f7 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/help.md @@ -0,0 +1,24 @@ + + +# Getting help + +If you need help, or just want to chat, you can reach us: + +- on irc: `#docker-distribution` on freenode +- on the [mailing list](https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution) (mail at ) + +If you want to report a bug: + +- be sure to first read about [how to contribute](https://github.com/docker/distribution/blob/master/CONTRIBUTING.md) +- you can then do so on the [GitHub project bugtracker](https://github.com/docker/distribution/issues) + +You can also find out more about the Docker's project [Getting Help resources](/opensource/get-help.md). diff --git a/vendor/github.com/docker/distribution/docs/images/notifications.gliffy b/vendor/github.com/docker/distribution/docs/images/notifications.gliffy new file mode 100644 index 000000000..5ecf4c3ae --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/images/notifications.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":737,"height":630,"nodeIndex":171,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":290,"y":83},"max":{"x":736.5,"y":630}},"objects":[{"x":699.0,"y":246.0,"rotation":0.0,"id":166,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-30.0,-12.0],[-30.0,59.5],[33.0,59.5],[33.0,131.0]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":632.0,"y":243.0,"rotation":0.0,"id":165,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-25.0,-11.0],[-25.0,64.5],[-88.0,64.5],[-88.0,140.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[]},{"x":512.0,"y":203.0,"rotation":0.0,"id":161,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-19.0,-3.0],[79.12746812182615,-3.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":589.9999999999999,"y":167.5,"rotation":0.0,"id":143,"width":101.11111111111111,"height":65.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":0.722222222222222,"y":0.0,"rotation":0.0,"id":144,"width":99.66666666666663,"height":16.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Broadcaster

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":290.0,"y":105.0,"rotation":0.0,"id":160,"width":210.0,"height":190.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":26,"lockAspectRatio":false,"lockShape":false,"children":[{"x":12.92581625076238,"y":17.018834253729665,"rotation":0.0,"id":155,"width":189.57418374923762,"height":151.48116574627034,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":25,"lockAspectRatio":false,"lockShape":false,"children":[{"x":97.57418374923762,"y":58.481165746270335,"rotation":90.0,"id":151,"width":149.0,"height":37.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":21,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":153,"magnitude":1},{"id":154,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":152,"width":149.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":151,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":151,"magnitude":1},{"id":154,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":151,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":153,"width":149.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":151,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Listener

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":67.5,"y":1.0,"rotation":0.0,"id":154,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":152,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":10.074195639419855,"y":17.481165746270335,"rotation":0.0,"id":150,"width":120.0,"height":119.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":20,"lockAspectRatio":false,"lockShape":false,"children":[{"x":1.0,"y":80.5,"rotation":0.0,"id":133,"width":117.0,"height":38.5,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":16,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":135,"magnitude":1},{"id":136,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":134,"width":117.0,"height":30.5,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":133,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":133,"magnitude":1},{"id":136,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":133,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":135,"width":117.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":133,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

handler

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":51.5,"y":1.0,"rotation":0.0,"id":136,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":134,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":129,"width":120.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":12,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":131,"magnitude":1},{"id":132,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":130,"width":120.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":129,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":129,"magnitude":1},{"id":132,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":129,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":131,"width":120.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":129,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":53.0,"y":31.0,"rotation":0.0,"id":132,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":130,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":125,"width":120.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":8,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":127,"magnitude":1},{"id":128,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":126,"width":120.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":125,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":125,"magnitude":1},{"id":128,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":125,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":127,"width":120.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":125,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

request

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":53.0,"y":31.0,"rotation":0.0,"id":128,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":126,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.5154455517800614,"y":0.5154455517799761,"rotation":90.39513704250749,"id":145,"width":150.0,"height":150.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":4,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":147,"magnitude":1},{"id":148,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":146,"width":150.0,"height":142.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":145,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":145,"magnitude":1},{"id":148,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":145,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":147,"width":150.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":145,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":68.0,"y":0.9999999999999432,"rotation":0.0,"id":148,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":146,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":156,"width":210.0,"height":190.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":159,"width":206.0,"height":16.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Registry instance

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]},{"x":473.0,"y":525.0,"rotation":0.0,"id":115,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":69,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":68,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":109,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[2.0,4.5],[2.0,11.533649282003012],[2.0,18.567298564006137],[2.0,25.60094784600915]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":665.0,"y":530.0,"rotation":0.0,"id":114,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":68,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":100,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":112,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-2.0,-0.5],[-2.0,6.533649282003012],[-2.0,13.567298564006137],[-2.0,20.60094784600915]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":598.0,"y":550.0,"rotation":0.0,"id":112,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v3.home.cloud","order":66,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cloud.network_v3","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":113,"width":116.00000000000001,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Remote

Endpoint_N

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":420.0,"y":550.0,"rotation":0.0,"id":109,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v3.home.cloud","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cloud.network_v3","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":111,"width":116.00000000000001,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Remote

Endpoint_1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":540.0,"y":438.5,"rotation":0.0,"id":104,"width":50.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":63,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

. . .

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[]},{"x":410.0,"y":379.5,"rotation":0.0,"id":103,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":62,"lockAspectRatio":false,"lockShape":false,"children":[{"x":15.0,"y":20.0,"rotation":0.0,"id":84,"width":100.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":45,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":78.0,"rotation":0.0,"id":80,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":41,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":82,"magnitude":1},{"id":83,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":81,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":80,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":80,"magnitude":1},{"id":83,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":80,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":82,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":80,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

http

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":83,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":81,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":76,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":37,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":78,"magnitude":1},{"id":79,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":77,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":76,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":76,"magnitude":1},{"id":79,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":76,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":78,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":76,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

retry

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":79,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":77,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":72,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":33,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":74,"magnitude":1},{"id":75,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":73,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":72,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":72,"magnitude":1},{"id":75,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":72,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":74,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":72,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

queue

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":75,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":73,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":68,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.download","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.download.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":71,"width":126.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Endpoint_1

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]},{"x":598.0,"y":379.5,"rotation":0.0,"id":102,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":61,"lockAspectRatio":false,"lockShape":false,"children":[{"x":15.0,"y":20.0,"rotation":0.0,"id":87,"width":100.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":60,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":78.0,"rotation":0.0,"id":88,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":56,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":90,"magnitude":1},{"id":91,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":89,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":88,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":88,"magnitude":1},{"id":91,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":88,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":90,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":88,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

http

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":91,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":89,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":92,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":94,"magnitude":1},{"id":95,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":93,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":92,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":92,"magnitude":1},{"id":95,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":92,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":94,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":92,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

retry

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":95,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":93,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":96,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":48,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":98,"magnitude":1},{"id":99,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":97,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":96,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":96,"magnitude":1},{"id":99,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":96,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":98,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":96,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

queue

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":99,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":97,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":100,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.download","order":46,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.download.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":101,"width":126.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Endpoint_N

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]}],"shapeStyles":{"com.gliffy.shape.sitemap.sitemap_v1.default":{"fill":"#ffffff","stroke":"#666666","strokeWidth":2},"com.gliffy.shape.network.network_v3.home":{"fill":"#000000"},"com.gliffy.shape.network.network_v3.business":{"fill":"#003366"},"com.gliffy.shape.basic.basic_v1.default":{"fill":"#FFFFFF","stroke":"#434343","strokeWidth":2}},"lineStyles":{"global":{"endArrow":1}},"textStyles":{"global":{"size":"14px"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.sitemap.sitemap_v2","com.gliffy.libraries.sitemap.sitemap_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.table.table_v2.default","com.gliffy.libraries.ui.ui_v3.navigation","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.ui.ui_v3.icon_symbols","com.gliffy.libraries.ui.ui_v2.forms_components","com.gliffy.libraries.ui.ui_v2.content","com.gliffy.libraries.ui.ui_v2.miscellaneous","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.uml.uml_v2.state_machine","com.gliffy.libraries.uml.uml_v2.deployment","com.gliffy.libraries.uml.uml_v2.use_case","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.component","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.images"]},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/docs/images/notifications.png b/vendor/github.com/docker/distribution/docs/images/notifications.png new file mode 100644 index 0000000000000000000000000000000000000000..09de8d2376d6f986374fceeb1e26389d3ab604df GIT binary patch literal 37836 zcmeEuWmHvN6fJQn>F!3lk#1=P6p@k;xP%DO4boh?q$LFv6_D>&l_)yJI41Ny!V`a_St)_x#pbfggsSP!oEjw4*>xITSZy^83F?0DgpwMGCC^w z%gYy6gWx}iPS2DcB77g9+C)H*K~RyGeeRC9lYx<~HFDGGo46)xRj+0{oGm+?tLmE? zf#*Rld7p;Hll&n~W)9vWeH-JEKoAx=gM484n#RxU1l0GIE;pB#;+7e8gPSv6gT;SD zy|RuBU4H9%6`hxjPt7;L8eGHt%!6PMWxv}G-7@5ng#=n30!HlJ$B!VJwT%A1110pQ z36jt7`%wS=^V5VW)PKKhVuY~v(0oO=2>f?0)*c&VCK)97X8|9H$UNpbq)hhqPzD@T zA`;Nue)iE3!%LOe1E%f{=@7&Dr~kXUdk})c6eDr7&A;m-D5So*8>b&c;-O#8PF%RL z(7)^Z<@5>NT}=jwhF(VFR9S(S^56AkG%P6pJADjZHmlc1h7|07*9W(U{eQYWsuf~L znMMD***Z5Rvpna`pU;YnB)=Ngxwf5~;c9<3U`rEv^KQIQpM_GPW%1_f=*jub<=!W) zeC=m7j;lefN@z0PXw)mIuOkL%VpMJi{)}!REc5VmclwilmF>?Dav`Bl>Rfj}s%MBD zUL6hQJ#7of&hkDCGjJYOD7Bk!42b8mp%k`FfW}B_H(vg(Bl{kf(Gt~h&`HbVwAQD> zX`HYpLq}IN>zXEU&_<}iEx~j02|2Tf1si+aUky^H+Z<2mz9J@>;1Id=i+$taK>ryE49hccH5B1Qq9)4{O1yG zyT9&BdL9pL{HlDzk*gGg)9G-1GLg@ULoN3FsN&7EP<#QWUa7pg=ql0qZq4Ax2*PTDZ+fN z-lJJn*Ox^-os~+GF5S# zy0rA)E2nIy^#T7E6|Ye$ZKiT;S<>wvHWBPKff10p?^aA8kwnSNH@-!_{IiG4X;A$% z=I_PC57fr1!^hZ|f1Cy-g^k1P7r_1Eq&xpqd99?|7U-0Ohe{!AOZDnkxs`EeG_`-- zUZXfw(f#NC+u!TdV`8K(He|q`k;i$fY4jU@^VW6Jc&WDTO_i~D*A!v%1!fW!dX=}<_ENST5%Y@ZPfdTYLN@4DO@HAPIq0yqu z`|YGJAx3Xcv`!*QU$h?iXZc*H#Aqm1V^fJdjd|&PpwQnrSpq#FJ3rgXFM42PuUH?7 zYt`Ie%Z9!V1v5jD;+=dE`@BKl*QYTp@$A^aFOC|g4|eRa|!XvnyLZhdl$oUdK@lixp11=&NM z25%rGj!_BLYboz>x-+#EDWQLK>c7|;Vtr#Gd>(?9A5wIl!Ut7E5SFqRK&_7*FRYq- zt$vVpw;NE9GW9wl7FWPF>#dkD;-D0=8B$QgB;mxvTECh9d!6+ujzQNC1$q+HzNMEx zyS2J042vo;#%gWRt-ucSc>Orm#Ygwa^epspBAXr{3Z(Y za;3O5M(z4!p{%5~ykt0)K`#n2KN>5Owf5+C1)Iz*VZq9#tL%90KSs60=}G3H#W4;0 z@T-s#qr{fXIfVvuWNW-IzjeXbO#93F`g(5&tqtoI4q3ip}1_aT# z4o$RJhu-_mX!+EurJkz+iw@}_&Ad0iDy-Wn*q#m;2SQgy2%O2Ked@VmTN!GW)>Z@` z-q7EM9pw~1OL=n95LVe@<6={Cr&hd$^P~00Q51qLNI&y+crvCdY@fAJ%xN#ank@bR z60d{nB8A;uJ}URPIKLjUMAVlHTB+yv9bq7;a443d~3D7XNzdWZ$p0wVi}r| zEr$l%lwjH770gw0ZA4s(~7MV8n~Qwzxze@piV17%!Pj<_rdYDFPbe`valznss#wP<(me#TW(ampW~T{=Zj}~GJZAZ`}bPxwcngC$Q#d(>8p9o zdy~fOj;fOc_h?8gk8N@l^dyz8Wow4Em2l_DBKc)jO3tv;`NaI=Frh{stHe|F9%Z5{ z!neuS4-+C$aUoXgy}>o_bGa+l7dh5MW}#?Dz)@QAj7)N)Y*0$Spyv5kqlVg3S5h9M z0=Y@DahSWD4c}F`QN^rGU&+$17Z2{|>lBUC@XXLxo*8eDaunfbZN5?L^#5Rr&VQAo zll856=l~g4B74vricTG4{y{j?4qFzQQl}En96&sRXC{ykO+4uyw7r`AG6@kGwW}{x zz=iC`mwK6mzXvhC8$)#!=&E)$_KbceZ)Rq)L!vu}GCkJFse3y6@fBI@M@(RK5#d?3 zcUig{e_!k3S5q$pIoGp%ecW}pv9@1eHaWi{7I)FZOmq&6 zO*ZmWS@-7q6R8y0C&B9Zi}&xMsCyM46x}I)9Hq7-JoG3wSf-Mdxh+A^>Rh+Ci@~H@PXz`P zC_`~!HT1f}P6fIND6(9AGE_&0hmh6*JfA!)*g>3-?X#Sb5XwUHi($ zdWV@T(cL}OMTZ$oQ`|O2~3MgF$|%&^aJcIXp@y zmy7kw!YIz$S^ou{yPKZUa0CU>{BQBWcX4Z906@?z>lhD~JM_d322=1&o#YM`Hw}Wp z_|9s+3wkLW4{vr%t$_~VN*OSP>Le%kQaO|6wlXzIr5*n@jVK|%4_w)pw zmvbCbCQ|SL(m^5hPUblDesoW=q$ExFgdq4n%Nf!{es`mM`*Z2tVj-c>y*WILnFQyn&nAul>S=fZHnvOHm(gC(`FLR{vw zu0B;^t@CV3R&T%g%cINFUBYnW#izoyB~7m&iYoO&j`4U*p3H~@${dJ;VTZLY+e9)E zBwX)mQ&KTaUa*nGr%b?#BD%j9rPF^qS|8Lg=}18H>}w|v-QSri8xhJ0xUd`fjO}&U zu7x!Vo@|>_j*X}4j$yG&ha5rxNSOk}ddEr2_~wjV)&BYVRo|^mF%Difcvgf<|FoI- zZYZIULrXyCPsO|-sxh!wYdjp!WUR=*OJ$hOCpV6_Lij{tSWs5zyosoKPy&p58AEbi%$xj;NX~MOhU7z4>kdPq(g_fnsr&8s+rF_E#N*K!q zzKOd-J!QX~^x(uEhPd0>=q%nZPo7*{yxEw?jN)z`5_Ab7WfGW|IfUvJzPCZeGAWf2tW_$n&#M2bGJLSm@kQMK*FP_8Hx!k~ z5v&m|PaTOkg{C^zLyCDGtObwMOl#MB@AI?A5Smu7PRDCM0j;8#15epIB zapbew$*$T!TbC|;p*2ZEyxDp-sCjgYW!Q2p+qJ50tn3w5iB4W`k#Owm6)6noP-4W& z2odamVqG>r<#p$xk$NyCXAIU@0Pa>wPP*ht&pS}2wJ9|vV>S$>1%#~!pB|WG^AzEA z_4bYwq==n9YD~fXH=vi{r5-3Wq>xX!VIPID%N!!VE+2m$7`FhUXjece6;l&I9-Goy)% zy09QfnFEXhO7Q8dfj=;4MI+b(HRo$-=3Ap*MYSDClHoC={s{;Q*J6$jLfk9sYE_g96|zLHKike&XX<&X=(NwnyH-yMWO_z_f=hkt(jHH6nRw7yp* zIF!P6a|YjQx7eHY{<{9TylPS zu+5kY^{d90*E7A;*ZNX(O#tF{-<>OW@Rdrd^=>!9@7YuaCBQSw6MtAQAH0FUY(N zLu)Z~>KAkz)ljNI3*-Zv06YyQd2o|>-;iwf;0gK5tN%DD8%N&DOe< zLOXG$e)#^lw*&5yzQOBH(wphZZt=bP+@g~D-?iH-sg{{>phCU3Q)W3hP8r~Nvega# z^!40o;(Nw}S3}unNW*A%+v*pH*3^oI-=4EY;hTE56YHR_*=M~y)oXSD0iYt!do@v~ zClsI2?B?qHa7^bVhUY+o=gCid;aTT+iL3oq{F)YffygjQ*ZNAwRv7lryeG6)^No_~ z-4`d@=1Xm1Nc16oxp`5%8}!0fKjmMB=ze&JLN53^;WXBI^vj3Evyi)dMhO-T_b9Tq zP}Qqp1}cFdhn@ts3yiFCPQU_;=@9>#cMi^U*6{MA^cV6`jHY5cBSUuEp`2~-a`OPi z>?$bzHS-jsD8BAPxyjD~%Fn`RvAbJz+^WH~l z=liX=tB1Wj6ZWS@RhbJQLc)VbB;mrAb#i_-z|PMaJYC3zZPN>k8eX7~O6sFCk=U++ zk|L5^z?qfO6q{b~GoS+4TcDlMiJ(gE(tILG{Cm;Vhs+dI^{=QII-j0`(E4GM&WIDw zYx7g`aYh?>ykleIp@YYQf4&rHo;TOyTA0gD%?y4Eb2rL>2T~us}k9iKi zVU)H*G`rx5ap6kr``u%wqEGKI)~!=Z6FLORa0|_;>GOm^#cQq2>8c`_+!b#%A{no- z-|0JXmn|kxtXiFUtdyGfzP6*5e8stc0iYe^AOqCZ%%br44wkkV2m@1#_nMa2z3fPb zZoOXT~X%HiYK%d z)jz{mYB^(SPAs_&s=yA%pP3?S$x!m+Z&v?)ppb2xKBj-(Gc_Y)R$`ye`=MWtms12j z=oJJoW0Kn4WCI=HukZ4L{|sYOrsMB1(Z_dDenA6-yBKY(GKJfqAL|8HW{u7|C*SRM z@l(RESD(ryZnXh<=3`9L&rgp>lF2k5h5EgvTna+E7!^>M$|LTy0dpjx1cDgV<2+~% ztGL=|@Y()k?3CuO9sAXX{E_^~kbP|~vT#PzMHcRXG_lT-JK*xGtY z&lnF62iXtm7Y#>whwGB>(-6wV$}B_32rAY~$4EjBH3TG-pt1Oj=>=JzpM# zItjfg_s^&-*0NmfNhG9WxhTclL9&>mNqlM?ZOzD~I4vFVVDQNc%QshlIJ^2cf5Pcc+Fz8deBOvlly2 zsgi_iTFF$0m6{&_>}!gFCk+I{DqF*^TKP$hk@WiVuvQzT0YzNU5nRCo*7h37fv>^WwJm_H_@Q*~}{$`-82H zmPmXv`&S?uwaCo{xAz|1Wr0-srhYm8)`p?y^u=WFJB8PDNjY`#MtD#}nMpElLGw!h zJiUr9cq1b+d4fDnw#I6EW-`_8m1dys!cP$^3GtTn&X63lH&&tR)7=lsYY&E|_y zmz9c5)VFN>d*hYL2ZeEGb8uo8B*1w1kL@vw*Q!VDT-hSRw zzy_KFHi;df!g^x-$iA0kxgyx;aT2G726wm}NG>ZyzSq_oMjMLoh2=oFU&sp8C6!b@ z5>cJGe_Emhs2OdmzZcs@Qrs$wdYz#Nis-!Tu~wS3&Q{IKR)DMelEALTJ~Cx;0e&tj z)0CVOIai{G5UlhneA>2U&U9I>|^I*3ktga9-^z0}BT z!aCm^)X#Sjbkn<9wW7J~plGNw?Hxac59lkw{^}Od@n>U#sc=56r&w7uNFE*@?_&Wn z_(&m6tQxg5Tc=a}mJJ)e4?YI^@hD97F+y+RnCsAuCn-8c9he`7P%lSIUA)41X<15; zB80I*hWPD8w3bK&r8Hvb;b*n9mFqwAG3Y^*uy}Pb6+rPZNM9evr@uX&@@8TWz{3T* z_uYLN4fQ0dR&z=J8mIN1vx8-5Qkh7WGm;)* zF__pKbpQMj2Ko{gS=(3U22 z4QBT&0wJU*{tJ(R8K`v_LERQFhb#G9%z1+jE?VF-DsJ=`VBOdG_FSc%z^?39jZ@w| zy1Btius$?z$#an8&HGaL*kk|9IK`Yc0!WkRl(LVe5v(h>Qc}I~^6y3Ry(@s*vVC+4 zU)%=_cG-Xv!`o2tphneH{%5-0tn9XUK7gK^JzD}Sexe+Xs64aUdnSNSbO@9wH{b7a z$2Y~F(kX@>+%1R1JI&XBSbe0g7^9@mEvqQo)%n z*45KdQ!EtoM{E5Y52Uc_T5)Z$`l3)E*yjPfxf0cSO~v9r599q=vrsXmnYJbJD2&2>bM$L`w0t(st_6@c z9M2X*Fg=?ND4a%ky-s)b)sSri-gWEOTm5_)i=gN)b+&+<@w$Z~@|#w^7z|aDuo_=T zxF(cL*tXi_vE}UV$r3rGI{Oh@5+5;bWyJ(-q;ySJ1Yz!l>L$}hk^nzi);1?VN!6aA ztH{JEMN@s1>rJU@56%H0Q0xho8|PX}vEb4fAd*~kDJ7-)xS+2nOe5yVBd?MCprH^f z&pABI6x{=2SH2cr30R9|U&;qbhrY*`V+$Rv=zO-$PieawQ3;&RjiWyJ>@2td*=)NO zv7kp?J{J42!N+Eb`Om^Jb`)+H#(y1mx?o?K%a{?inbUbwxSPLukLI6eUz@C0gF;Z9 z315e*+Ln%1_seEd^WMfJG-eE+sp_Y3y|=pPrVs*t8D)XCV$tdOdKPIPOANKRHXPe4 z(Ri3mP6H(4@`3cq_>3YVw3xmfU|sGF*~J_{EdSYN;OnDZuF>@_p2Tz-n>^&3--@bw zGp)VOq&7(E&>Y<_Z+tHT*R}*g%HU7DQeD%e(7T7qY2O+s>dDww?aut#e)$*-WiyI+ zi?RW~!2JgZJI)^rat@k*)L4B1Y3h;3W9D50?2$S*yF#Ta+oyTz;brI8rq_r<`~IvC z=}LI_@JE8XAD35PXk#MimCsCJ~)TK_-eO7JO(SGmiH`J1L?! zsrdpLEf2-^LAswH4`nedvvsOWC=FdVhoYFmrODGqX|^?*KH?-0M^}2y?>^12ItA)F zzMV877&Oij4BZCX>8U>5jBhWk@zW{^+t9>S>)-Ufm&TcK znJ@%GT!HEwh#h_vwNl1vnven`r-dltIc4%_!5t5on;b^9H8&z^w3t9hL8j3H(6_L3 zmm)owuA8ErfC#C+_1?6hR&%+eO$w>3LWb8^k~sT^%9U#&ia+A^XHjDe%Y`0}qS5WZ zicBW^W)#v_?&(T2k+PR^jd*B>uV-h_3W9?X(?d!W$B+Oa{Y==pOUzg@|(OwL}_!Pv9b#Sx8CjA~(}D2yoXJwvRoRj89~mAX47Bf*34KaQ4$p zGPfZ`F%9+T5js?IqG-`FtxBEk-t{Uzg!?uDR}U*gTUD)S^Pl2YM#C%!W~*)kMYX zIpt2!O7O~~+jHi-PLYw=)=7--m7LdEN~I@IN1Z=OD=ypHV0@PV@rQ!v*XR2Mi<$hM zJ1OFwM<+=`+e>PZH-E*fm8ExC@b<(OKuVwz5Ab8x5WD-B|r}wsl~vB9mk(zleA0d&$9M_ z^?$|7HxWTwIDy5@FzK8Nid(UlD-~Dw<7rtSkC}=47TE& zkyz+J6voQKKfp4=56mmM#Lv=&`^J;j3|*?ErG}%9i#cE z(O&1*58uBDOZFVRkj2FiT33#X^3Ka%o*(FIzfIol(x<5|FNJz|A3rJ%MU%w!F3#8_ zw@f#wzkl_H)GWTUWv(|8lNyUV^1)a9rX}uU)WSu*-E(@OJ^|cRRZ;&@GQdR*%cF|yiQ0-xdH2)$yZYo(r6u!6NziiMR zPVDj=&1e5PI+o|u_{C5P`$7%b z^*Z`T197FkNa7xwm26S04@D?fT2ela)$C?$C3%v~$|n?~{K<19Fpjru5l3jg`L*4# z8GWp70Y)Ir*I9X<+VVZanEs&RE!rioF)f|23Ye_hWmyPMAC7-yG^O0cc>PzLwA?@U z0yBA*6nUuRe)E$3f>8_e;91VF(&28nbn@G`Vt-3L72B&n8l}nbDJYg8O#}3!5NYNK zIbt3Pd3$F)ANR(ZskLWe%H!7duDRYYG*T2%nnwed%@TzbtdF*T9!Exzi6_3-3x2aC zARGeps$x;%gQ!m0gDxgmo|y5M;&w;E%I;je=l52ir=1h(M7eLQ-=Cyw&)OTT^9CUGuHWE=`Q zKv{q(ic$q6)9zI~6=Jxf;|y7k90bsMM7WgdM5hI8a|A9wRan~9`~#?j**hgo)es=} zGzHZoO1e2u8INGm6ULrrC$7g&byabo@54+++MWjXQEw}Jbt)l>sr5Ee3Q zyU!`UmP6n6YTJDUAB4vVVb_~L<)wp1P~POIMa%k2%A9C>vc?Wm9>xMti{j7l8fQ}2 zlKt+8LOP6A5-1_&QA{g#$3#8qMUUV&IUrlhICw?wIRdBm1}x<^O7QV}U<#%de<8<1 zR!b~kXJiq-^^8SW`#!|s*Bso{O`Msc`5P>{>e1@zY!<&@jxZaVhq5?~eW54~Q16)s zts%Ii-jU7v%XHenUxLrs7r-Z}I$1I(0m1hompC4h6Sh>l(10 z8YXQPB6zFtGJ(C;orV@wlY^|;U-k*92Gffly6AzJ_dM?hOKMvv8=Ksd zBC8c>b?e~Dw7W7Wt|hWsL?Vijk^47F>}Y~px|~x zgX$dD-BL{cpKRZ-m}#$~vw(IYI%fm5MV5Rmaq~{qj%ChygXbF37eFlVC-H`g=wm4H zm&uSB9~1dK6y~_arv<9*PMHq?YG!&$h4?a@AI3`GLk4Se(~z5ON8Eoeo|)Lj5H9bf zl~UC<^leknv(VsR1!)K>PJ|jZV}9n8O{}utWjN+9COyN=$r8TkQb@IduIHJU3;&^ zSPqs``An_i3hN1AdZyuL`odpO+J5M5Lz&E#6G^X*HE&9fqOF%BtewtFDcga^rik@! zcC-ak8aWb2CNhGrBb&v_QAa^!O~L89w%HqkVbmiA_1L zc@L2#p_vVQnvL<{)E0Smr#0+*i63Tu-!3e`CT}s5I-+);HJ;D?7}+$+!|QU1XL;b3 z23Jl5K~pP$J2-?F+DwQAc&>C4)~@j86KOm%&dOHn&Eg}1wXKq0w07IgmBGVBt4@+~ zs&M&}1^o;t+0)itHsoRSHr~AHQMl`|BbrA*ywEm;YWmc>;=1m{1|6lCX5Vtsc|fzv zVBWI##0vPM*n-kkz}s@5|(7T zPJFJ@2BR9sPZtk(Tw)l>8sJMuW(H(3huoe>rK4ZceJ1Zk*ZNkN{yb}6etI^Ey|oNT z|FL$Xbe+ohTkaAA$>8lSL?L;p$-crvoqXN`rSfq%-!BpLmDN>@(>5c(VNf-02E(~% z0ytM}TK{uxdS@dTQeuTd;zjOn5hMC$%wFc9?urY*N3`~tEN~vW2?I#);b( zZHDWQTy!DDP6_ioC`B!J21_vyWP22=cHZ}pr;lg3%zp9VTa5sRKhwgBDEk-vFl@># z?6CR6!7I3m=cftr<^cSw!00wnq-sZCKkMnU=_ms-E@9WE#4`*E><*oV!7%t3h-sR-1_RTybo=*1T}Du!8Vpb znv!X%k~Ba5W=ycDXCW7CZcW~Y9rks?hKr?l^$n7{rE94Ls2(ZGK{ea0*lf>4*GOzW zP7R+c5j8qu4H_}$FBX=-bEs|^aj=R9vNDdAP{bo7CcHGiRH?9CiutLjGRu4=mZfl> z_P)zsrREU@FDf1hJOh@(rorj~+X(84ERig9=$#=17y<^^#3LyqXnZa< zSthC)+P10b``m+G0lhLnf#fzA%OKq_H)WQ#o`g_W7MoiDyD3$)D8JZ8RhMrm4Vv3+;%EgIgB8 zwgJhKod3P{Q59qR(8Wr3{3p1rch+sb8?bzzfP7byvnYC}reWcM5t(dk)cl^VdIBtr z%o7GqQ8GkIF*IC1{@&DmSN8^SVs%IkH&Og|wVasSZ%)AOjVxO3ylt@Pr#L^Iw)+HJ z$c~_d?u9F)DLk+t(@cuiQMmr+41BI@0~wS8>vr`AuyFn>1Fzi*xFFnX=r(&l)OYt+ zML<9PXVp{&%XA0WJ(oUZ_jk7{B^@I~VoVce-*o1|V~PF>ED5urY}o)Bu1q~Dpq+8P zY&1ZjXjmdt7m>YAnd9$?;^l=GG0uB)hP!yD3K1k2G((-jK^F;{>KejWIzNDW_O-At z@1TU$Po~YG4}s3RzqHz~K$V;TrzSaQ7IB5KH|TRo)o()q(*oD6e__u-qKNyLq@!y_ zCbFN3X9v%Kv0fKK!u6T?0pz;46-OY%5ttLLs}nhNUhh$Mb{MCKC&|9pz*vR~uZ=~o z_9hS3UbmEenw{K^0Cpw+8Kwm`je&7?zsVNkbxJJpKfBX9Kvw&_4re>(*KlEtxl#Dg zdb=sI`ro|2WX=wN;~)aZNiGyHC(OXZF!JZYaz*IWtEU!Z(lNzm(1v1Fuv5bb7t$S< z`rhIAFPa1X@1_`Vp2=V`9(gt44YXhdiSY0|VSoOGE`_I~UO=j-@nQ>6mR;-H$JU=I zd0+=s+&k`yV_a>;l_uhp%KfKy3IXko`aA9Mc#fCNTFIwqYH_$gU=BF#_v|FzCMQQ< zG;+(=v3!TYqvK~^f$O2RByes1B;bhIXwt+U%Qr=!BfV18trc5%Bg&8bnGV`NW9B}9 z=%uY%0VvvMWPzdv$DvUkV7sjX{xTjgpqn$?u>6t*%%crg-+kX<1=3A4R~sBSsnQE3 zsnx`RW(|oEIUZ;*hSC2B_i_o>gXgF=jz`R6a~hq2Ray@Wn|Gp zsF-C-hLbRwYRQ5y#&O{+>1c)E_1nxb4d?ZDFHC;JkARsd6eRBu_A~_`X8^fjUkHq~ zYCEKc@%FHb9-Ncvn+-|g?HX;G2C-!BJMCKZ-I5If^1joTQoZMQJKYX0JEdWZ^^ru( zb@a^;DZp+jPCc4tKqHKIeK-9t+nNQDzNB zohlfvjcl>>fq5c9()@=G(-9n;5x~Fr=s)RjK@di?n(#|j5gD4MF=OT+C7L_d)CR^T zA=5OGgXL2l3*C6%IzjVFtqVl2pufr%}gcUGVkW)SFY z$HgD3!BYs+f-F#LaJBE1$=@Evrh)mY(c9y^a~b)e0RSVZ%hpG5cXC^xGOb%Ql#RlF z3i>ACrX%A|ypQh=YgC#va}2&4h!&z8b!5*J5G0A=BY8j8WhmbOK9;6!h{XM#HEdbX ziLh9iYGfx$(@k5c8_f46ui7THP-28_ZH;;4qoD!KCP;&^b-=FLZvlh}v+2q=>|L69 z>L8XD$CM^>`+!YLDu5LG+@A^uB4oUBpRg!fah=>Q`8|+lGY5KmBV40BY<|7~`{se| z%;ZTW(t(nciRv#tfsT0iK#lQ%uSt#3%8&4qW4T-S1j;O6 z8Hia4aQ!=G4M0$+K^JlU@9Y1M{haD~P#U8;+@ZvHz)SG&p1WM(c2FZ27BCi}cfe&M` z^#pgnBWR(yN;*k6)X!6=Zngw=$_o`XepGF+&4eKE3oFMp0g6@-2DG;a?I~BpR1GU& zDPr0_0#2&txl-gYB@I^uhFrLk`UVNC|7Qrm?b6SNM@If?uD)ip=;ceehQ>$bcrTze z_!EG`3BzD#uh970HJ&SCW?1J$K7@<1aiD3ax8CDu1@KjP0i z2?+9V%QxXM1#q||I1wEl9euK%_#UwLXWsW^CNMk^ZvX*LU|m;4W8oPx*Rs5PybRa($9wbA6L`wq@9w*l=%n(Sn}Jpc{x`oKWW1aMO+9dtoNfS( z+Bvo<{omGb^@E`^R9gf4hVL>^YyEqBrZx4vjj#`THixxgJ_~9 zr>>mcIp{HoubT6GyxtACLv=W4;Wl$(r=BoUUVM&94vgo1f6-G=o{}xK4{@(8UUB!_ zdf;yE#@+n@KLV1mT<8-dy7@ zb(xq7-FpNG*`WFI0Z$~735x)WLTXxwJmESh#bka$h62ez;W0o9MI7}*M5bymzMsK| z%7lo9VZ4oyfz`0NNBN&W6u8KVR@+kfApg>DHN5Zep0*;Wt8Q~< z7$|mF1|sR<;RIe66NA8@Q_TPDUrbU?2k6GZ0_(rd$M0~zUO-~pe|FL(ke|Z@pl}}x z{9_*sk~?uh7LNC1MlD07w!RU#i2{i_U~T)|E>Q&_0-jb{efLq=}`i!L(_Ww(Lnt5 z<)6-s46q4R;J6EvkPaawUh-$aLP2g0xdeQG6Ue{ zV${6d=Ptl`3frw+YPFkNcS>OXnH@zN~ zv&HLLtCX^vlPGKhtqgi_gQ^wi5i2mLd6vX&_^l-!Q;_QK7(3~!;>gDPeslIf!DNz_ zeu*P}wZFd(>f|wg$+LyPL!j(G1_r)kb?=R#51@fo%0<7_EG*rzht0Fm_wP-q)d;iX zUm)#%=>kR?{aYud>=QE7d0_o0$}D>JDIDHE>PaIEf%|)QfT=l&=Os57Pz~I*ytN1C z27qzv-cPyo>W%vS7EFro#nPCh+zFs1Qc*FIjNTmhdiy8`*YW&q-fVw)IOEr1Gh3_0 z%YYC#kAUSIS~9}z@PtNF|%q49dN5AN_w5X3Zst&t(gU*h59;smGnKpla+{L z30$h1NO3m9Il*vI;AgH9F1e`VN4S)2@&a!i^d4mTH@sc*t3gevvF>kR6i)-f80NpWa1lD81 zFL4DuzQdS#3}Y#HAMbcf1Lv8nT*7b}wUAB9Y3QO)_xsu>Gjw* ztHEmEUwH>IV@Z*IfdFQ5tk%9w`C$umW~BY->gWgp#t?*f1V5`l;YM|i#XDT+gCM3`PR(jR$} ztkkpm(>IWFV8X)Y=?P0zGSFQll{*DIT-SJIG=84T_W0%lB3qaVckKqlHBg=Y7XA>G z>>8qF>^dlSJNv|H`6VZ%KpI^J!uQ8I5?`T@0~$?ADu&JsB8mQ`&vDsJ9R3!EZ z!fy2biECYh(}!e%AIKf=OptU9p=a5UN0@ zKYan9Q;Sn4SxzT|3Y6D7g9Fw&QS)p>&q$rKsU?Tgdk592R@aR9S~o$ogZdnl59}$J zyJPvus0w7zt%)L@3~{${6G3lWiU-OeWk7j|VWS4Euv^4~Pfh8`V3oTa)oD&^XQ7PI zLp84pgn~CZx6IRcub2_V{cUWvbp;SLI^t0+j=zG)=?L2Hw;nz$!0aa0Y}rzPWDN}@ z5|pJg;RPbVN=kaBLYeCqhM75~YC!k3voa-Sa}Kt)foaNgpNdi)sf!Iw_hCNWOVDuf zqTVv=ZRHo!NA)-1kwMYX_{REAaLns>MT~rdR|6zhTe|kW)$#p!f1!{ED-}&(Np}*M zo$3#rLKZo$wwm%42Qh#rvn0|$F`CS$=_hXe({ zR__p|J()L^KC5X&uQUU;)t?Lk8rFLgl(2JXqgt>rT(tnZRfOMV=9S0lRVc_S4d*8& zjqr}dkdg1NzbfbSEM7(hQ90x(t}~u958?k-2Xr+mlra%JPvF9K@aSATGsa z{52awvr+zOuixb^2@hs>GrV4|YhRjLsnD4tO~T5-TO4Yil;B&0+Ld=J%0_cD#*}5r z)PlPey__a`tc(;*$ol)Cf~+f*C}SU}@+;lK9Z@;ntHgtz#S~0pSL(7qDQsOn-?i2n zc#$?=@1<*U7ofUlLX@+0k*c4jx@l6HDAG+{*Y?y-J{*x7AW&8UQ@w6;WZJCKq=$rO z63T~#GNo4;+3y9Jc@4(DHzw#LkJQNI*O}XgO6hLXWj9EFNgyVW!G~m90j}NgEde85 z#ueWW9iV^7x_j+EV`b>!KQ-9ULbuO^3nAd+?+mB-(?mw@NIE+IhX6EVBDRkFhf8e% zXm%#F^!#`H6ujiak>2>qViCMR z4{5ng_hFXr4IBI|8ldUP;&Gg}Ry631d<^o_N_L<-xw2AwNkd+s}T2P?oKP}lEn>mt%9k#*F$qPLma8^%~KN47ov$jg9Zq;_SPm4&Oxi2vTDXh z8b)X+v+-nUgHpW8-x6AoNJX!A8P+u)9CaBOC!BxwGqEPqWJw*=I9+WjHuoCzp1t_c zpqU^rubzVZuh4k!gQe+>5d2*+iWeB5Ge{AjUU&Lg2{T7AW#$l2-XUm@~2Qy#$o=#bQa^3yq2)Eez!!ZcV!> z4{cdkwh@T1aCSql&b82Hu3itkx#R0Ru%!+^2dkfJd%RGSeirnh!5zy0c(8y@H0=+W z53JsS*M10WxdG47)`XcGpo9X1At*?Qse;x?k)V|tZn_tqbI*MaoYypOk2PF`Z_d?_ zZ1Ie}+ycLeJ3LF6UF^}7R7F5K2S#)ikmWuBzZ<->u3HTlQ)Hrt17U1h-=0rm7p2V< zn*`qD_nd%BHR;}m-Tb~c-cvxhYfPIAlFJ6|Io7!3uE#yNqAPd}7=FC-z)wW+2PDA- zRXL>Z@C%~kTgN$`{SqvXoXm%SiThNo)b$ zoJt!1o-O0nfF?VJ^dWK3iB*LTR;9vthC5r&Z+r6s8s?_}MzMicHw<8>**qlck3jAd zWo!~rv_nRfo8nDjv>p8tXDmV~H|{P;>)FhEYM&A#gqo89Z114p@X~Gi_wJMjMrUdu z_JI*nea*)QBh;#ooQZ%GFWg%afp%a~yUta62|GYB zo82DE&&v@9-DDYF9k<C9kFJkW4khCZ1xY>b@Eg?w+kce~IBsSP&yfVS3KOgfXf;ai}02E!6UoxC_1O8`h9#k2e zWQ{&R$5VR-o)f7ks32N9%VYg0A(L~n*<0%AcWtLJgsh+`8w(%~(l_+^8;d}Te(4l- zZqj#|LmH`I1Qu5WD>g~!2e-)N3FsG!hi}%>`Gl{5PuB{!4v04a&O2+MsYWNRqu7np zt?uP7l_oYMn?3b7XK9-7(#y!yfDnvqQmjCF8!SBl%hCmGi@;>hQzxmo56@2hl8^-O9x3w*>-PT2bhf)P%3?ud8GVCW6g2$Pzp zwzIsJR!n}2k_BHr)B79HfvKE*Vew?D&ZRZ|F8+e{m~cmUd(3V74by@l2snG=*iN@9 z3sWo<5E-J{i=5oIRD_=!ASxR4h5N|wbctwS09n@!_WDrmPKC_uS!Bht!ZmT|H}SkB zMXbBxFfX?r#x?CF_KGwE29JI<8)6^xF(G|8dhTJ!Y8Ds(uqp}|Z!wkCbPtST>sS8i zn_5aphov4Jzql!7 zleXDQEmHeY4l?)J^rL|_1XG0O!`6MlxgS`v9oP^EX^|^uADOJ;Io(1v*-$LPwD`1) z1f+V1gXYn3{1L=nwmnw7kx#h7xH%w2m9>knWdGwJ*zlI$TUdKip@Ru;6%izj1e~K# z42JWq*t(?hpITHYgY~7b7yggl-ZCo7u8SI_ySq!e zTe>?%kOt}QF6j_yY3W9~kq`;#?vf4xK~h>c`+nZ%eaH9bjB&m{$Kf9?Zmzob-fOM7 z=A8TI*1l&LDm$THNp0HF`s|>Pwo;(&Dc+uL^PqbTt^_8o=(xku9#SY;*qarI5zDXR zY!xW?sDnFmPtG`h$ZRstQW%2-#m`CA-$Nz7UIMP74G<-U#8z{#} z5N7A zasbzb(HWcuUD{U?!6#pXBeM8EG-P=Ydu!t|f^ggw4zs~KjH?SMMyvq!fmm1Zd;|%T zUK*<&O^C!Mly^8p&t(Z>9pabs@J8^ubN2Lf9e#W*;N%v`bB6O^NM|~E&1}AEk`!gs{fV)_hE0Y8~q`VqdN8yh$xV5LjN^Ta%oT# z3Ap4-u=?HnW&?`zWPr*K^Gu2qZ3A;XSP*?vAfQnLDCFMPL1-P|vgJW|IL?WoQ$SA@ z9qLf|2V|2d;lzcc_M{_x_6y~w;IB2AwX1AUiJPQ!O*SX6W2lmSo*TXnDl>n5TGCYG za4y}4g@G74k@+9j3l+DU_91}ClmIFOWB2$QTJ>xJji>9@Z;Bx8*aHX(A(DWxG_rYb z?>Z=cGXV?(fq_T}zawr-YDz$zmJFPEm?FHut&gmZdj{_TSqyxQoQ?#VNDimN?jFpv zvdc^#V;NYG@>@Ltd}pgSX8hkkp9e4}g)1d3f8ASAAaiTdfoSl^N7!|0$TmJ$QeCX# z!W}!Novh)$p#eGW9IX46But0*>4Kpt#upF>h2Y}gWqt5BsH4bUJH&H|qfkTCKNefP z5uv_TXx_7l!Gy?lU;T@;L7P9$&xBn>As>J)q3eAI(?y<^oRQZ+dW8%Tkna-!wo?Wd zCi!RzyOBObx28)QIatbxrz{O)Oa~U+xK9O){zViSsF+xsectpe!2ck%>)iv2Hk3I& zAHh3G8$>js{Fr9Wve?B7bPAC#Q~d`2(CQ_66MXt7v92JAIXi{;$0vQlx;7)>gjTf4xeAFAnUzh~Ut|93KJ%1V2f=fq-U7lzf&2#?gAZ;>w?rBLemol@?uw@~ z|8MU%1r%qRy@TDuSe(MhrY~7Pjo~Z-F`!lVoT8O`a?WSc7DyC%EB>D-qByaa58{x% zRp(L&eIwl1!w1zI3;vhOneC>d*SUB1f293?x zk3{>jy$2q+rOfCkJDeVxle5tMlP6TI>lmqYrMA zzIv<|$`}v@G5&YGDy`K&gMx-n>03OO#OV@iij+D!<)KEcp}gJzF}9S3R~f}W-@8f`h}DJ%sVd{#DoSuJ zgD8E~NQc$Bnax zBl4rw`R#W*%T8-SETr#OqaauI1aD})f*Np@j>6i`p+Z2E_!UDBy5;T?PE=hGrxQqn z;Hc&QFn~e4-9yGXgN1JKcAm$PT{igB^TQ`t0`Tm334x=*?CF8A3uxrRQxb{z>lex( zLy9blOHAb3IH=W*R=e1amp+wZ8KenV!C2pbwaNvcj~z^TfFAfB@K{U()`cSglKrCz zU~)*!jA1DbWW7gX@9sTB2T+k9E~Sv=6BNF=9qGQs#+h+TVBng&4}aIYPT`TvvAqw4 zg1ZB{db8I(Y^`KMY5C{vr-3pfFHFd1}@4(eV$^4l+{++Uwm!u;I~ z!#t`af2Uz%#clo1!hggxvR;n3-x~%8$v+r!JU$u%NnEJ$`QzPXiSOn9%0mRyPHGe| zzzoGOrSzQBcwHoij=%Nz7ywBWn(XG4oH|bUdwS~tr_i@99FUdpzzWP^lB7r~E0}zh z*Gr^m|r!C zLG>{Q@4n3i<5duGw%z+CgDb{z6XV-}1F&;p-P|Yprq~?=`J@KRzcCLMM$Tbcc)iIs zS;A#J!U23k4MER=MiHX!`zo0)opXU4Ulr0)2{EAr!gVo5>P7_^cyI<(-QwAS`NhNH z@umP(s07i1)_~*s!4UxC(sOj(7S>b^JaWg6*YUAifuyB44gcxk>aeJ9sf%5N2c%O= z>pYr!U@W^>S5VLg1K>g~7U?HeuODbmL?qOop|UiL%l?WN{yk;PMG*E55I|?T-ubry z%oSqiD#YQKYl|T-fr{+&9*mhN=Drb>|BSQSa0!sXy>Qe2{F*LDd0>j)>#nzy2IR5W zfxz!65Comvu1`gbWeI$mBhWAho`9Ajj;J(3^D+3~MX1B+L<`)9hDH%$0}R}5PVZXd zuWlJoF}cd%Zr46L)^vGAv6%n2w=f4l9b05}X!;*zL*)UoT44}2I6;bXI{W-7 zV1oMPLVF!EzwPSm>g7rrs{x)i-uHkh>`l`%vrV`f45+|wW~FkbzDR@Yn!F}@;0O{O z)S9dg#VuhD1e^AZxb%34)9o3TOwcTK5UzZuf0-CF;ov$dHTi?s;a zx5HP5vk<~>KT0F^?ngRh6ge2c3i;Kx{ieoHuYYOG*N;6+FNGS^xjLn9A!y*GBb~K7 z6j!jxo$0L*=##<|fAvFh0-aHm>X$Eu7%Ab%2w_QZO#H1$-$y|a`t?gk2fF1#v6mCM z0jH5t0^l@~jQo$&$Q7iN8+$_}E9icl*)WOpuOx%TkB<$0*&2W3m_QLchk{6r;Oi74 z>CyL%Nl@%sxPZ@-cHvX5*LPZWT&BOdL-v9oL0j8$_)imnAv$;47uv^GeAbxiL5LCWP z1{abc()#_6v!{YoSf&tf)5CW&0mCUsLmmMQP2JY5XqLA=kj2l?f0Zt?%&ws7H z-$U*eL*Yv5U+T^wIGx)6)x!hATXY{KoHb4)hOh0oeyJaJ_I- zzutisi8+evZ0UG0C}cnCyi~t9+EvWy6hXH}C_dh>U#P+kNPE0Wo=3e?1ZJ_VE+lpN zKc@k5a0q}g|0!$>pG`RE7IOwhAxcEqW#7_RE@k9R8h)E8DqmpG)l28FP?hO%=vKq) zxS|#BbVG=3&#&;TmnFX2Lx6^yIPOc6&mi9adLazcr51x0v?e8On)4pW)JT4y^ow%98l zws~|^w-AZdejyu7l%_R`*k6F+m|AZxp1iz(C5Q&>y`>d;BEx6j&_?$r!_yf6Y-i<;CcSGUt73?f2%pB?mBdSoV^zj#aqNo6l5~K z{hwNMMTHqBh!!YB{dj+7yEkjlYdAq}){+t2tn_AP)eUmK_ha7kqdc=t4XddC(>TuV zuPPP7t3cFnLi4F3oZrOqw~Y}@#^GVTj(?jAk(ND=g{g2E)KaW-1N9eb&?iJ+IA=Hm zxU>+dj+{_-(BBEtjze}&F@+)87w#wdH1J^#bQwn9g+(Fl$Dj1tx5`fvuEGnpAQ7^6 zzGo3qd6ggb6UY)OdLQq8ny2FIe*#9^{b;!67sj;RUdR3VOk(PId)vg*@)+24Z-?5w zzDhWKdl#%#VaOzv7)Gw6VEM`r)o+Y|ag@tPY_~KJqe&i>1BWv-_KXCx>-hTB4}plb zefdbyDn4n`i2?)?YmMdj9k@M7=Mn_{fv`KXGzF>?I2v|~1(A!yhtqU!%Tcn#1RQ~) zAgQ<19e!x5%0wLSxsDV{;eP?2A2W>NI0(BS`cGXSW^x~fuuAFkSr|kx3|O($Ae7DH z-7>Ty*O5PTJTZ+t)qKky6J-|yjFmUkp+sA-$wyAQun1_0m}0jQgkEdnyCIwg_`Uf; z2(nxjm@JL=yl&B9bw5J|dQDNX(?PF5 zDJstMy&!Kmn*UXOJZoQByf9+ar?98QzD-|&!&gldeH9EYBUVvw(Xp>cw+ODR?}8E0 zaUGunj&*&nl1xABfkbaYq=d^UI`lhmsF#QOyM2;WE5`GnaY0ayKehK?!ezohCB zco)MVwm2>eaZG0yXUnAP_8Rlbwm~ia$7jljE;xqBYrOa6SPvp4E8sz-0Vt&Bo{R&_ z5??@|Lg2Fs{*rVrU(mXUJ=dHru+h+Hu$yPTC18Eun>%wLyNW)sgRlu4DAH!tgpDH9 z;d$_Mnndq)J#h-8%s$T$_tk98Q8w7iIyK72RIiOYVcw%gwyxGcfW}u)^2x$WCy6gw z-;MhYTN}v{SIfVitejl8*n=MoX0;ajp>+^RaE2J6ue2dH{cGwZ#1L-S53g0nG^#wu zU)ToX8?fKmmjc3m{`nbKDLF++({K#(<(ya&KBe_{H{{vVM)g#N8DacicfURE5#d{Y z&lit=B?Ld{r-(h^;EIrHRr!v>r^CzBs=h72p$*f5ZjmklG~g*{z(HmhssHIbGTsocW_Jb zs4Hx%t`{dY78jDlAK_jz1Q*yf#?giQ?2qZ0O7hpI*NKZ;v?4A_kvb=hCZANMN}EC` zFE8Xo7@i+CNFns9H7Yn~f1&O7FRIjITyBTv7vQJ+N?G!yNApjf=IA;_ga(ux40ttU z>)J9{!*My8A`3YLoYi|MdMpcL}Tw!@1oJdHc5~R>z9+9IN#~g ztc5-$zWj@M!}!ory}DaVXoLh+ATU27&W>XuY$fc~;9Fk?bhZQdFQMJ3`&|Cqo^O1| zvydTZUt9pplQDRwE&o%Kdw7fj3ac{@Er*NAz?UQTwa=4&Xh&#S$?IK+v#9F}S>4zh zpJW7bLOGL=S1$1{$S}?qV9D(Ku^)2Ab;9wMFPF$%3FbgIf#Mk{Bx|K_`xs*Rbyh=Tw(e|H3D@HIr+mpsv6>+EPIkNSwkTNG~wt z`j{DhCP4bMCwkF+ZI1ce6y|z_DT4P7I977lU_PS@>!l@%1~AL1&9fa(HiU~B;MlfC z^&zMee38W;ydb~IcWIHL~w19G;(Z zB5+3uPv}QC8BdK1TAX5rW1I=GZx(Lj7x6l#l=&8MOp)r3Sf9e4%C3?e5 zsf*#gvKuJ}Tlx~(!?)*~_{4ry2}N!rucB_F?>BM=^;`}mJjp+Lbx_1?{)rDdAuv3S z$i}m(i&g?jk|z(ogQ5~HB?wc!D-5fy0c_DPG6{b+wX-z{GEHRF9uww|m{0#PZTpqC7Rja0PFTZY-_euEeW(qp|eS@ z^6PhqyKHGd24Nr~b%J`rcH1G7)wn7>*n>c{M{q^B=5^wT?`lg@;BoBa)p4Hgc#=^_ zc9*r2qzo4?bj0{>6qg^V2d35{_Cws$tLmv+jCWY+^*c9YUX81+(@0N+TZY9(9J~b{ zz1OcYHeERbn#j-XR^E$dyf20AXpfkXU!AXQr~V>W_{5j(Y5e>mKA-Nusx@jYbSvx$ z<+z723DGk|da346Z%~ufl;UzaDpa&D4&rB2Y1eYRMohtVSgBp3eCt|1 z=_k?^@>MRIesf?_^Qtf}<)8Wjc-s}bZf5S@QiKR`D*kmf5O*Om$22BHk)dZ5@I&v; zBXa6Nk|K;TF_a8SGvc5?!fpB$QHtfS;nZXQiqVm!bD-3Bddyzv6T;Ibl5O}2{9)83 zz8C6!V*ph~WONJSR~n2jGWVz*J^gMow~p6sm-{2D&R>ZMegZ6xSk%wsM{ReL*_eKx zS8N4(b7gsp=9E{Mf{SqW3JQaA`k{W1BH3_5IqPJp3BQTdQKIJ>7cy>bzMMI)kdV9$ z5d}H$rinL8oxR_*aElS?TfiH2Ftq(%Ps3scfE||_-duCQ!dY2KXLFvl@5G5U?830{_j~8;;3( z0tt9k$xRnBYQn!LIdgM;6ezt^duNA8{*2c!@&Np8gzQBG2y^x^LI>0tCyCa7nd??H z>#WhQ6KfMos8l?MKG7Uq;X2%e!TPJwwulzHrqguwZVdmE2f@0zln9PwAi$Q5akrEqb(%@t0*k+@GH}JiV7K zgO&vAZYDB$czy!!qRGcHP2+X1k@4}8I8y#8TCWBqK*!j&CwOD7v%(YgeV`{ z=<)q?x|lv_Tc)&fw)Ns5N&G44_tzVzidWPz`hN@YWo9hX->P}u4UU3}O6ukHsj_vp zFTLpXEv^=*d5+d95v%uk3h2%neRW8C>G}M$6;YF12T4COhO#y%_1L(My*Moe)~Nae z+iOnR@im=m^YQcX%Cq~PDs8;Yl?}?0ur3tSM6Rp^a#Ln%!Zm0#h0j3$K*6)elaky^ z4s-_y=xW$e_e7Mxe8%|}r^vlyH_2{h4m+z*Cmw);u0*nRojF^_*b{%vZImjq(W3fv zcX)Cp?^z_?PODZjS|;@Vhl?m^dSr<`ID|Acy!~{~ZE0;iDwE!qAP|bM#+?ZE?lIUg zIy@}P{fI6C`i|d*5%3IBdL*|yx51gJ0U1`AB} z-;w|Ds#04HB0xz=L#WHy#}t2jMYG)&-+)_kYq0k|^KiH@nYlLO(v;$xGcU6U#(@%L z+`@6L=+(V_?er*zdWiu7`bT6{m(W%3-4OxstjUKDr!QX-v|8P8p9~(4O~b7B1p$3R zKCp3B5Zs(QnyXe8ST5CXK6;&`_1SDNN^laC?$`@o=#Lm^gRpq*y$~j^pD-cLvO_X3 z7BpR+zqI}Sr0UoJ0DESL@Q?pGQH)_#d;nC>3a zeu9f|P9ipzBCZxdv|E$W0SrB6Z#BIW-IJvN>=OSsWrpqdZw{-Vk85Mk1yo>|sMoRxL6LkT@Ub_Jr7sCCq=$k$OGun>ttpgjJ@RIMaU&N127YHdG;s>akTo z!>u8DE0(T)D@6pB%O9hwvL`-mSDOODTEgth91=Nh!$+DWCDXyH(!SG$HmHNV77=p& zWZdq=>849za>cZ_ylO6-?ZMb8KMXO&k2y1|n*IJT`)x)1L^hff^}9}7xJ)5@n-A2) zYV@|~q(Uh%DtyPDqip$*ai)|$|Qka*)UIs6Vwj0Pr8PfrhDQhO&&DY;W9_w1;io1`q5*vQUh z6Lm0_c6JpD8<7T`Xf{<$MU;J!?FMD+21}ezR^A!i3$fn_=5zNE?GkoIm@6U*FfMMV z(w8o0hBby5xFvtuv@HC+XE$Hz9y6IMIHu^-+4k-M!vmY+&Nsy%m|WPS7zll0TYm0M zL)*$fD=eZ%#vfOML;2%@;d#$#(liW8vPVFqpWk>bpc|)}qOdrRo=guHnl+io|fs&@$ z%@yNofdN)mf5lo3kk#Lg+*eObW+>2pr#4@61IE{mYKS+gV0QQf>3(r)Y!h#)@*UES z%Q*f6#6ODZ$6XcQ9cBk#FDW89RHr*c%bK zHk7?p{j{YXnc2UKs3hY6_NbA)c{xx88s7*~27ye)DpwVTcUXOcwyUk?+lvWWnU`L- z0i&3A^gJ(kkmc4HgHxm%VoWsE`c4n2#?N(^rcKR$uslW;QE;^w8pB}xd?n29Rp4bP zq+PqVSW={u`l%Q?dp?HKeZQ(WWm|sQ&%MXbNo#s5Bcz8aEDVrIc9of`ZXP85)DSz0VIMm{neAY~|$As3rJwOzCM>Z=6(M z4`Aeck_N5+A`pc#t<&v?=UCSl#y%+EnyM3)$R=K@AoS^6=E3_=ZMx5Cw8>VOTD8At z$ej5Y9!s%bSb~f|Y^*BD%C*6YM=(DcT_!6+b#2SF)achEMTqSm5J%E^922&>$Gj<0 zF4=7E3^VPIaV&iz@LJiG+H<6TKKlY+5f((4m9q?{ULb!nrd`jYiGKrU8opKD^`0te zP$NMxH2IRpLs2G8@OSu5wv^c+Z7Wp#hr^^XhfT&fF4KlG&O6;BDoy3Fm(RPQ4L-_; z<`gcWqUltH7O)XIItx^nZqW2Nc7t0{xXacSY!m#2yv;j>0c1xhF@IQ&Dk$4XD(2JD9HroE{a@77M9=1J+j5eI6~vXuTlYuY$9!a z?iOQ$N=K-;Eyfm&h`Bcl!w5%C)&RN>WYqKV=MB=UTpVvtzF3ICc#hB&$)~F3YkGi!?<*7GpElB7b9b6e9v=0EU67fmmAsr!d6>B1}pkZF_I|R zxy$geq`)GDBc1jkquBqPppKSix@<2idALx-Sa~!)o#X8U|AVc*@PH&^&nliksehp9 zQw}_5>2FhpFKi2uJ>~~AH1#N>WUW~8_HHA4d>4XwKhJ91t_>PQUe`dyk60!q#XXFP zMq8VYv9H5IwkUGbU#VHM(xkxJUcK8WdG0G~W9#>j9{8?&XsIjVbiF-GbAlwkD`)%O zQtee)w%}=@VG5kN8$|s{T=jB&=`BViRs>WbIhY3ZH;6|uE42y=I&PbSwwK_ z5RfvG99ifnpK$&TW;i>VGI}{o7Ky!c%HInRO2$lE9z8=wyX8d?34!MH?;x!8H>OWz z$q)%i-~159#vkNvD{m)#j zGiCeXK;TvX&-eu3F|V)wa&0cJPA!NbNItDEjb|INw+ucJmX1v=Hwh#=f}Hg5Hwg%J z;Rp{%=GMhEMjgk==3aC)ZVHxy48GK@i8@uf!>Lm*uYl~-miT#ZH-P=FPgXZRdw&~Si$+)n1t#yu0{bc*?s^AR{@Ct%W~ocRZR z6noJar`AqC;4P9PZaZ;*cQv?+7GF~@Y|XyoaM6W-BO)_ zdY+8qEn8UUNur=niNs>%+XSnLPpW{og~4sgx#^nUxMinXzd3S%L;W(`(PO?MRUwJf ze}Yx79kIRHegOqzbwazP5*Ka0$9C{2Wqfu1SKu5}`}6*JDQ`-SfPzlqfm*#q-fh?|iC^1w!3}oe)xV_-*o0oGa}A>{ zDtX*Z#jWxNT~N?}R_uyBPl=XeQwJqe)}NWD;qy0ev!0d=fv7YaqC{ zQquA~;h?sMX6y8{Q~Fn11lq4hzqXz{kBi~yu2a_TArm)f3<_cGGKV+`=La3wVSN28 z{^}dw3&QMKl;3th6&1rN;6`) zw77{rZEaRkzC|TYi7_)zicZcghK6-67>Fa$GYPTr;2fR$bv9!S>ugY~}^#gmeB{js0^&N-4 zPHSPW08l8kcS@6;*gAZ9`Mjypy!FSSZ|O}<{L&ff4;*XK->z(@pL0hUiX6+p{>Jg8 zowtQU6rMMtZs7+x-WcgySOVLacd@S7K9RxoM}YxyxBY}hDie`|A{Xz;bhjLc%06t# z@SDFbFpWlH;;Flp%<8-7Jp<-{!`{uim9=m=IYa!zGy@|zDkgcZO{#lYd(j_x7UPRV zVa9ik-tg6J@CKuxPaxy_tEpiafz2UAB3IKR(TL63hfp+dnm%q}xkmNbZ>&2=6E|QG zbEizwQEY0|etkk{#UW9WZL~PV85tc-&1o7)$ddrhE3i7efA#368r>uuk<*M$RQ zW6H#9Y{>b?m-EgOO9R2IyKr3fUG4r$uoL#Q2)j?s@%A6E8^=srj_|Plx*ECg%3~W$ zJzt;E3;AjIpKqy>b*P-d#&7{ha`5fJmp(_I8A=+IEv%LGy3zOpXHx9@mGBS0`!o*Mt!C&uPqMxw>_Yyj{N=$^n@6KGrwQ>voQ%rB24?c4G>!w zGrHej5zlq{qWU^21mrk=F$Hi>i0dJad7t=2eclp>kwXT<>s}Xn^}yU4=2M5eGni_0 zObQxlvYn;<=SoLXcUmUl0F}~D{Ei4YF5Adv>hct>aDPrW68x@@PNFnr-hYkVJ#n8D z{}!_wL7hx&sPYTRNU>dUW$&T^N^Jp?l21^0D@Vg{kOpb?U#Ky~gS z^l^ev;PcH1Won~K?G^}$*z4^m}jPelcpr)!DgBZFR0d7hFW@m`46_-XU zzE|esN8Nz$xV@VnY+wawZbIQLIm$(TjsrRS;|&KC6t0t^jHFiPA65o`V*@@y9~LHI zr{xRQdL!$;x*eb#$cs`Yxx0ZT9avuTp}XbMsU%1^ck8aQxkB;WMozfqdrd>iAJ9+? z+h#tPupSvA0`w3TWd3XS&2BOWU>iNfn3NY$7L|6wd85The$QA1v-8Tr+Q(H-67;EV;iTfb@DM1XrF8D;bx z)PdAI)?#vFAD|C@Aj7aebQBn(5GTOyR4uLFIT4|V)wZT(W#LC2 z%f36QmJSU5#7dLRxq48u=1-#mz00Fu#?v4b9l4q1oQ8TaH&8axy43tD0w*Y4T~)f9 zs}!H5JsRwE5<_?jEr&|hy2iLHk?8 z3nnbd-cr!s`SsEcitBH3aydLbdeaf(xcfSIEpu04@ynHjg4b3X&R&^#~vA2Y)*X`D1J-kHrfnFtUXP_ahryx#jm1rOP=i zfdwzcdpQz2_`@=%Ip?w+z?0EjnV$#mZN{dN!tm)BLtw*U*D%$1eyy6A>qP6?b+tu@ zF}R;%*NuP5!EWS0!_1_S71>A5(0#RaLzc4eQ9gn$6E|ttEBkr!akC9i3;kFfJ>LK? zh()_PXdSe~N+@5@s={;`NiVlz{-0dfLjOta2WnxCoSf^4zPfuXr+{c-C9>D3)^eMewasI|v7%I}A0Y@q? z$1n0$O}j5#_L-Fm{g}u;;oIYkgO5AliMgAljY*v@edbZcH%bhET@X?w4rj*MkwtMa zS*;hceExtx#EVXggKX>B&1C)}F*Ht74;Qv91FKVJVVRE(G_RW&e{ifBx&9OY)v5-%NV*>yuT9e&MD zw5m*+@k96^@$LKMN)eVGtB?`b`n8Y~j-Wf`D*R-A4p~2RyoGo!s}GcEFVEYShoN_d z$p2bMW}ygMb%YgpLi5L}3e<67hBj`Ya8`A{mnO{}8ZG@Exjc~TdreF-luXm%(I#=r z#b8Es`i8Q_hc1MJi*(H~L5Rnw5md6iiNUn(Ykve4b7!hFmO^;s&TjxL2I3pM!-j7sL~9GezHZv%`VK`t1wKCmNQE5UU%_~uPALCr zajNHMC$Km~>+~EYpjfR|nE%pYP$g?oJQK~=yVyg0p&55cF z9s2M`;qU%LvRnc1#d>;D)?2Xs@y#WM#p=q@H?0Uiri0-HEmDxz7Wezoh~5^LN_>A3 zBjlv0XG52^4g3~XLDI@w`AI*x_M*}7yXnWV=kup;NX;b;xSZ^H8U>AM*VY7fcvEt| z#RJ>JHcR=IV2f4~V%(Ip5AQ>$H17)BD)g&S_=L%oZ3J-e`7mDaV2F7aG#aghq8SWR ztKex{fPF__MLB5c?f!adF&_)+zUBIn=6iZEW4WqYZyLW~M#`@?DiUA7!ntCVrktzO?Waf*sVajQX*1 zMraJxK-DlNimegZ3S!!`ynyTc4?&NLsW3wlaU~?>B52k)NuHgq>GU7}%HE>6D@E#! zIgcD8ha(6Fk8h5v+Rbh5QIQ!Q8co+9SJ)=c?#V}Q#a@o`a5F#u?vlV&tWsCYruBXC zR=~o;z@9M|fxAE6v@cplilVpEi3{U_NybHS<=jk+3SOF0ckuXlVcC`4SwGbCFY;5( zkHv%0)bO4t9g1V!j@p=;IHtwG87Td3myxBXi?UFACgpTOe}SD{mzP1C-gR0~1e9Td zy>nD}U@%=sd`iWi$>i@-mnMGoxSGI$~)CyS-u>a^v1^aI;U&pW$hU4 zy_u6mk{C->k0B}W-nw&yBXr%G zX;7DMdN22XL18gYcKx%L%^9Qgmav4LR#4z=(%nc5izo{V-XZF*S%I4XyPA_n`r-C||^+L@io zs>w(ZNC~e83MA<5y5CrFoJA|^7`j>}#n^~BjuOayiF9GxclIqSbP7<}p8L!(t9P8S zF0pGlKGbskyA7V<5E8;+Q&aD+udh+&Me(Ot(*G9XirD*mf4 zd+2x?MO5iFO)gQ_#-ZZMw$Svltpkoem(5fyyYK$&+zRWn)VD$i;%41hByiq*&(1e~ zCftV8=w{C8a?7xMA!~w*X_}NLHHeq?e7|AHkR>k>L^5IwYd2VD+Pu@NOLIzDF3FeJ zMj{dOyBBplnhR4boP00CQETJU*n3tc|#P>zzTI>nJT>6P#`Wq2n_^Ufpd zYlD$HTe)-MqL8{&E?~AI#%rb!V#!mjuxgi*_9A(-_Ue|yQ(y~pKEl|%uBv=F|h@zUdFtOS{FT zk4jO|#->Q-?8s#^-PB=T6WM|(n`tTlt!tZqOv|2_17-+GNEp*hVq=~*qP8#o7(Vd?}4^kEZZ~hDlpH1jqfzrsa>EM-mf5HVX zIy=)gVj=|N+!L@Q=HwiR7V!nYAj`VY@IV5liDjV^IvA9+`}x)0Swf!7&CVNY7^&S0 zNnpLztz#!N+B5LsdD;MCZCXTST|%N{CTxQcfu%(Tv~We}qWsyJ2oY0j?~42($;Qp% z-PYT|ein{ztdsdsa&R9esa~Bdb)tc9BJOoZ?d4)zsYM3Dl)l_A4h9B!rO1<)(>Xa# z7liFWJ8OOZmSaA*XTk@t>#VTUYu8Dp6rgWk%EY0%2TMO@t6thl>{hZ<-TnI{@3@74 zSdQREpvYb5XkY>{zP%~am;yrtVL%uaeM zb`gr1x2nvu?00^5`5ch*y|@7y!ciWgGFc$QV>YI^5(AZT;g6*Eab$JxECSa$1Vo^0 ziQW?(@iagIij3 zy`IX`z4PCWyk283cw=(7Lxn6f`k;L^gtdh+&~C|pn zKD;0p*u)XC$_K)A#`t!taebHMq_BP79gmpJ@U11;NY1j5xT+~h*e77ec(CsOyA;sf z6m>*K$_*^GPsna1Pi_lgpXq+@;FfEy%fmB2^wdJf&h{R*)m#8$wzK2Y>A6BnWgoN0 zR*W~nO)iqv7FK5>GgxwCBlveX3|_tVwg3v{b>Gh_^ca(qSMTY1JP^3u$Q3J?1@mH7 z#%g1p$@=Z^oi3|WISM2z`Wd|I~ba~^hwivhqzFTO)k<#;zU5&SO&WPRheP18?Pc?VimLs zJ1L)?7FwL^ey*tBYoPkBc4x~ZxP0_=w;jmhu@P&XGqM53I|Am95>xpkj5P>z@iCQc zGe0%U4DW>&$S}CkO~y!Y2YW5`e7=KSHO@~ z>&%a~pfVt{3{U4DZ_Y{&jzsczHJJB<<~9z!hVT}LH7VtE){+wC^eHe~Gr_ywm-Wo|o7}%R9j0|uqP&64%!dYPiZAL> z%BbAwr%F2szI>8b=;a8nd0$b(7a`3D|L@3%3@rGN%*m(E|2y=WFJDdY4Z{Zw@CRD3 z70~k%nKq!|pIL6i^X7ktDiG$F%8q2Xz_0&vGDckVa!q#JX8Y3r9a5MN7TVLwVU+v# z>RPath0vZOmW5v9TK_xrnFRE?DscP%y;iVYz6x)?TARvTO=iV^E+1@1^#{S=$f3jvaFd^fG-ChvM*cs(q|ImOo;Fm6+R$rzDDY2FR!ycx$|Usv E0UWExzyJUM literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/distribution/docs/images/notifications.svg b/vendor/github.com/docker/distribution/docs/images/notifications.svg new file mode 100644 index 000000000..6c3d680b9 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/images/notifications.svg @@ -0,0 +1 @@ +Registry instanceBroadcaster requestrepositoryhandlerListenerEndpoint_1queueretryhttpEndpoint_Nqueueretryhttp. . .RemoteEndpoint_1RemoteEndpoint_N \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/docs/images/v2-registry-auth.png b/vendor/github.com/docker/distribution/docs/images/v2-registry-auth.png new file mode 100644 index 0000000000000000000000000000000000000000..7f90c73814f08f6e018da7a7f9a0a261ef5c686c GIT binary patch literal 12590 zcmb_?XH*kkzitr41_B~oy3#?Uw*bAeOBMMPRc zkq)7q;s2iXz8~)WaL>9YYbR?~=C}8r+51{Fb?KdyIUvG<_L zB}o>R#aH>u|2RdybH*aaO!l6G`6ol-_pAK@W~+f{^m>G-p8vF!FT)k%u3Hpo`jB<>kc-Cwtdk#|Hz>^{b<*-ivfsr-hOV8(fVJ z7?R}@PnzWqrEr3!qn(rHR3TJn&r`px8V#lg>ho?c145}UXtaX1O4kFfeg_~Id>j0)krRn>B>22g{4xllnX0w5tvWaQ*6&A2!EfVnGPPY#h6z z?US%ig_pBgZ+uYoOP}m)g=y5@N8F#95@;3P%d2ax^dG`QKcK3p6~8j}=|h&|JJ>6< zXb5?2UD{D`4Xox)i@g!v`;$yJv&S#VO!cL9cabF1@0G(>nC7EQx9+YjV@hwTHV#@-naTL5hw8LG_1}ESuZF_w*Q=fymk9$d}d_%flP{e z%E0Bx#G9a92mFUjrkbpyJg`&LeJk1jox$%{Wr7nn@*jR z#<2$5;ZWY8F`xJX&~sYWSNSNAeMS;^Z$~ZSCgw zCkf&0nrj+tsH9&T*e>l`vd=;IWDNf_TO%T;hlwDK`@yw$#%k5?6D5Lp^h5t?-<)1m zbNypHM<$DselDebtIdqK+0K)MB0! z87JW=?_El5!_sNbP{J*r#vUY{pY@A7bRk`4;jfnvXR#F)1F?Y!B)SA$g+4Na6$@JN zWL2${f^TB(4SH6H+?0;H9y|9pi4MF`ww>qHZss@Dx3WZBTV4Ax*?+tLgT(wrpKHhF zE=>X@dj>&T?a?W8#csD-`=vg<7sa#OGRd9Oza%yR1*W@ojg8o2zs^27?8Y~k*w>Lj zm&=9aW)3n^TFkCGKIBVwNyho_W|z=pNKM{oivd<Z(SQS^!$SCu=fMjST9$;ApFRV%ZsX^XkYDF|D}j{MRv z;#&<7qPul{b5|@^nP$>E`tDu(8`1ndlaEop!|Q9IK_W+k(sG(OdW3o-rTd?#z>QUPB^`Ov`90|8EhjOAPab5l zhwbZTjrn;P=NCd&_gK>Pi)@u-3cf#h-+l`Z^QHt&Wv>&UMN_@LvYz+o2ZW=%G|Hd! zd*7+H-cUqhL8cb!FFDL&!fN^lBAYTpIH>3 zn>`Qhjk{l@y0_LnIyXPS4;~23^y1G9pr4{bEV#|$=0DPoARVg8dg4H2&A>H zz?x=+{4ScrAIyY#Pe*^!W-k2oDy-!Cs2P8ZQExR;;suFLlWkn#u!gmRujSGRzV2p< z+kiglvyJrM$gHC?%{m7P#|}n)QhZV}>Mt7EpO^cERnduPzC8vK4qIUOX7=<4RqVAY z4fW&Q@S&n$Oh5?*`pAudj7Q&F(*iDP?^(~Dj7k4CK^O7~slzAk8M-2qwRf+k@SeeT zv|Qt~Nb>B}*iPyfD{G zL#A6)jnC5Ir19J&@r}0=i<5E+A#tfqNxuc@1fz1=^Rkl?X?+(J8PP7{vc@m@OcPTi{DqJ8ei77vG8;@P!>?+org5jrs-PAm%V%}&N`+E$`MP=ga zB44g#ct=A8N|9Gc-_&-)YWp4zXKH@56*BRhS6{Apx(`+-#H%E?zK1CgO(MANo*4Tv zDcB8PNNs1|uf3SE@sL0Z7j0La*lUl@$>P_>5KM!cn1<3SSC*$bht zSwe*7>*{>Y{#m#FrGA^)0;_~Y-HgX!GgZKpQ`(+OXBK#B>NKDjs_GPi-!&+c1smzs zp|UsqeKMykc%TonQuoq(j}FPyL*-a>!3t~TGQ2Zf)Os-G;3}xKHF?&NqZ91bJ(`Kk zU2dAR&H06Z3Q_NTVmd*jFZnv5)5Fs5hipfO{Kh#QdD+xQD&bJvW@xZr!-J4JHwC7a zGe0u}YE9Lm0e7w7kW886_+&bg{TPH7O?1;9R&zzvepCuBx-EtJU~5!#J3Fdp+1TBM@&GwuY!|Na|s-wPL@By`>Y2DLzJy2;tw zI;!(N2_}2{!2P3RhgkI;4YtYgIkMQNU~7{c_V(i4RM&~w^_su9*70^C~+?Qc+fo`QB%oo1?Xw}gU*-aoFU=|4Y zOE`)1r^-ph_o5ii(%_V$VWQP!I+1?X{+Te~ciG5=xan}_AV>Im0_jnrj!x?}Ix+fM z5iBLaf8Ox|0RdJaI+Vz5iogLDj{Nw(qssZnFmrA(l zd23)r7B<)zJzknb+Y3PX^sw4|Zg}`oAX+TwV;`fQjF3%$^>OR^z!by;(__=0YLJ+i z2p?*FI|f1Hy&J1Fc6CE>oW-Iz?Cgy!E7R;1VxfV3lI4p-gpqiqI&~L+&#)G-AW_py z52In(i#reE9IaM^(!H>Lfi=sEPIrr0Y(V<|spQUJHsSc%S`m zEjhWa_wn7WE0iV@`rb%2h(#C6q!DUAB=+v3z$;}_-}2>9lAyHRyvvsloub*L9Uy53 zb0kkqR*16~^Nd_YbKYselBHh6Jd7R2<^=%QPs0+mVGtCLY;V3&gRBcH?p8NnalwPQ z+8dIkZB~yAAxPP!A-UA05oMW%U7EL|pw}*@#+cm$Xs1jgHIo4292wF!W3E%9{Zejn zfJO)wXXAVe8jPA_MK;yh3T|BmQ=a^~Z*m>L#$F%3 zKmKGZK!(#*RfEbvKgY!lGNKT7^}bo~k3e9AT?xs4+OG{y0?-D^nDCCvBhyApr)QuS zw#)~ZA04HTx6vUUMvU>Q=P}a-y~bQY)nY?*G+(#=um%-q+{S?lAUj_nF(Ks?bot;& zN4%_gqLhd*>2(nmZv)rE+PKDqby#`I4vf8Sf1@;ZJcHzEhU>T@@ok@n6HK`NQ(Cv1 z#DzuU?zVBn=YF_&5lirr14%8Q(kxW+r$3j4W-YGv3yRX!k3h;>B?cYtEQeYjHS3Yf zSYxiFP-@p!3$u;S9QQ~3U_%EFM(uAbHydw<6dm%9#bBgB3rNIe97F2zV}!K5#;fVu z=>s%+kB=k@3x!P!NLS-_NZf1Ee){xm;#hu+?Zr3I1ds|R%Y)7ju=~P|i$&(sWh&t= zvC!#wnnIzEzweGZ9iksBNY{Vd`nW91;ZvU(T*d3^cloZqK|Jc1`f0wm!IQq&+*2H% zXvm2?jbX649Eqz(!06QNbmK|)`L93Z&9r_wyZrRjpe1+)_SE+<(oU19x=Rne^Htr z6&n_1L{ngg^eMD?1~XRsJ?@D{yt*d4uw~=5xLi{F@$t#T78^8!&Ql)eaaVQQb|`IJ zjU1VAP!?sJ-HB(`Vt>){sO!}N4l;@U()J$m$Z$;-{@K=18a4n~?JrVDNqntV4mT)` z8~g0ylJ>&(6e81~8Xzu!{S;xwFf@i)Y`Y}0JIICJ`mE0{@){{`th%8g3cI~S=0xGV zm3l<*Xkz}!p+uxoL5uM5^*(DV9z8UmDq~g^5u9g1;+iC)GC}54&&lLFNpaL>qO*qm zcJf-=NiMAX%_x2pI}yT*m!(8Tq>|!GY!drKP5bPliOIhO<$I4j%%hW7Zzbt67u`}N z#Cl&=x2imVE?$j@f}@$hN;0=(;lsIlQ{T@nWYr&7%M-K=IA!8BR(!M;>Vd13tLi_Q zr`2)PLnb?&x9;e6jE^3cDbDBM+F7O+Zs;7X3CV1E%sm_N3S?#_ji#ZFsK~Iw#ut!j z+s0nP7gzUhufm+tf4zIPXZ5O9x8oL%7nHXmLncPt>XcBylQ87t7wX=H*a`n<6sH|z zD>=4;c1CIfx1ELF^!`@T2T>9GwaXmZZ$7fpwy~IubLlZN7smg2r0apZP;4#zWNJvs zmUeS-`SN|!xt7!OvH{k>&)FNEfwv|yNSZ~mNkTrF^ARCSc~v(H*;GH_2O4%96mhvW zMO>S)e8hPs0QCfWOKp!WG88mDM-NI+=hq0C<&faxO6XegYEdmIKdWOYQT!D5rFj&w zb2L@+RFBW!=)9LM_+2c-WJHnq!$(D;_!7Mo??`wDMwn87pDiJ2%ymL>J#Syec9TUiO2CXEK zgN&s&IO)U{UaxH1E}Nxyl_c@5r>0Q#c_(tXePcy;YDitr_O}YgDfL^Q?kR%ocje>0 z&$Ra|e@vGX1dMooU6UvDee{b7di2sfVuQ3FFHHbb*@l6BVwShG(u85MP3ZU%amI-d z_HR0u+ZN{Wf`8_AlyB~HJ(_UDuM{X&kZEd+TbRnjrvonf85n9pZMp7@8yv{Tgxc0?DkOQciP3Gs zxmS_40W#6hE|ZCi&kUWi)rtV8Ai#Lw5%u*Y}zDV%s4QCy@O&^6N}UEWmMdc;(>9&il`hjX-3o}i5|xA*5J z?;M}!CQ|k>HBA)^yJe0Pp5g%W)}~O^FFo(kh4pvjvB+hl<@U~#YJG6rR=M3L%Us{1 zbyza+3ii1R6tH5E{FE9p1XwND=Q<1|`}Q8}J(2t?+f=wQwO+v|#k}{!>}N;s*UE~$ zO1TUznu3)4j7WOlu~ImLv1#WpD5Z-m#x-DDTc$LP?=9m2jRMxdC=d4wDFXX7SoZJ% zkD|Y^*wEauQ~Ak^6Mkil;$2HI=+t6gQ}5g0_Ltz2bZ$NCAKNHH-p`WL=K5&ik*aqH zs!D1Eb{_rcTN(RC)q9kQnd5maCugxB8>9$RC`A~7qR$%;xGnnwRrFDb`gfXgzBujxyYkgrE)laV2*sO7}IW@ zR3(N#O!M823hS*V*Ss+MnuFxhP@n*buUL>%m;xt5u)Rbkf{IjL(|->kBSp#7*`OJ%!?`DN-7CG;!Q-0L0>`B^g{6z{beX{oJE5ob>ty4m-TsMp z<+=v?-UcsczjSSV7^4bp6qCsyN=9AJg6o`~Nai3}JJNqKIOXUC-~I($ZQ5bN$CT*W z%d7#D(A3;+mj_+2jVY};YTT1^Y1}sYzds(vz@AfuKcstTU%z-t69G|<69nPkqHAiK zdHjil@3SP3I1Q7Loj78oq zGL*_1m{Qnh5xVIv#|m-{g_)Ze(Xc%nxP$%rZIaegT7R7T9PoQTt$=bHd{FFPH-v`m z_{1VTH8Onq?hebxS$SL)XusDX+G0%@xQwU@%%^0(h**~HMP|nA1)r}FMNG}vF8SNN!p!!qXM(hC#hs@^RHQGU0IOs2# zJyVEh!0uu?J#~EMJKB8~Wtq*@VKUel#!6%T5lAy8EJ8qg9?gIqkj`6U3i4Ow4;deO z>d8ngp7%W#?o6KcS`asnPStB%J$WF*n^V&$EAGM#VfOP6>qw!-g(eWBhGte0H_4`6 zJgUSPqE@N6U5Wn(2D`)LuL6uE2bUltWPsl3Wo_i%fX3aStGi7=p2pblvqzRs(dDq@ z>|qlw*>jxUr;U&jOu@j@LWej*i~fT`iKw?Gi(!8pawzj&K!@bk`tgWClvZzl1+LRX zPkP~PW*z=vVnh<{J$!#2X)Sg50CeBN+LAH;>B%tN{a9!DOfMki)I?fqu?9w#legp` z>3Q`;4M0|Hzl%DWVuX9-;tYZ1`u<52pF5XeH5wCX{p-=Ewo}7=M{8}NdNM+Fywf@L z1>M%y=Xr&VMeQb=K*aK+9`mSt{kbt!h*QEj#FB>7e;1{`;0Z?v*q4n5`bFuMjDKEs z?CIXN$f10O1OegR|HRbcq0Q>?6FOumi>zI(`b9q@R0nLP^CFm6u8P`Z$$G^iV`=%V z-LF7{UP{^+>M!I7!MlMhZ*Zg@)HArYR>dr2jwkIm|G-R@Cx4vWrf;(I=c7Sjo44F zpaos?iG@tLz2&6_H9!CDfuR4uxB__pJ`gKjdwu=P$97SW0Pc&?C#R?~8jz+l;|pQu zr;PU*a%#h}Z7! zgntXirJi>3_-dUr?atk^;VC`&-m-`eg#C&9a4%}y_(g%_)6@E(nQ3?Dg429%00~r_ zya3^Y=pHi8yv5qlDIL_j)zNi!QhHpYk$JNOIVwMUKHU4%KK7hyLSoa-{}5~}H1Fj= zBDPy^-m<=-b3iKEYYn1cZ6TBG?U(=47_K9;sT#{v&MABLItnPxNFglrlV!-pq%4<=%@w`lRjX9%WG1w+_i`T$KmY7~*xT0+Ie%-tk-z ziSVX=qzd?KjYi%s@9%U9Pbq7S3oMNZHVT5tIh?(7AJlC2kg;B{^V2IV3-}DcO}f8q zJinKX(R#Z2FQ3nGgMyQ-2#3rU~u-l6Z8nswigEmNIXOHGB;?5BZY#3o}X95=HEK3~( ztjjnzMGLJ<9Jto5c67C)03Z`s2?7PAE*G?2<`q5zIuK>(S*Reg!Q;0mV&~ zm3Qv)@?xGhM-SA?d!)fsJ>%BsMQHnl8Ej{(+u1U~c(A3`>5xWU|8#%ajqZMB)odMO zKofzTysHPQrKLjPeHu6u$xX;a1Onl{)wB%bds-+4z*Mezb)G6?D!J{n^RtH7w#(0T zaE6U#DVa_+`LT`MSJ&2N=Zj<3)(NdO+AbhhIT=fO-2CUfGDxxA)5c*jitf86$uj}OfI z&hS`ilCiNOFfezY+^1`GYwNk1_JOBaX>!4x4Sx|afu5Gu{kiRfkCI+VqJ>5K`>z^b zM?lhY->8F!a6t9eMR?S*-76MhHe_TiBXW36b8d|Wv!-?M%1tq-&a6uzB`7$Ls_gF% zg_2R8y`2OFivAVw`>cHCcz(>BfP*p-VuuNct32v5fe09$Gn|0^yXq@mX7KYde5Ywo zneP78P*!Nh#vTRl?kYbH=rdl(l&8E2wFmgAVROBl;ZWX!7C%HBBl^&O%opg0ZKHQ&~0tf6!9&U(PDv`4<_?UG(0ju9_|*hK(~6WUBU*+BSv!Y_7%N= zQS~sr!g?o=E5S=#1-JL$`1D8>HYZNINt^<`d;$^_u=QX zi$5+l%eII!_AarU8Kr;s&lGEi8j8nF;l8LUmHaiGwlauNa@z!M z#1Ja}`sjO!edroFC;m8?jqqgv97YAkp1ktX+q}gpHcg9i?V#1!&uCo? zC!%5o9}h16?PxgYM5C!pUSvv5-2Uur&HfGn5S;w3_nyo0&c3whmgsSPDb+apZcnde z?=F=<=oiwymUD1?wQH|`Dr;~;P9JIA#l|b(#A9ZY`0vHVmwJY5!H18Xmk{B21lggl z-5E>XHC*y*fY=1}srW*_unciOie2o+y?J8}P`v+QkN-s!g8)t#c#96gQ?>{L-m?Ge zojM43eGc%;04Mx>3XR$&xRDU}zUR>g;nz0Tm`~KYcH4{)zBrppydg|KD(jX}YC?q; ze1qyY#Dt*Wb%59|8W1t9LR$wa1EcWF{9$vBh>~jcJD|z9c2KoJl!v{%O1Ys<%^Yvk z5k5=Dg^m2*psf%+LmDG54+Ca3I4PWjOKW<)x_Nb9fVzVVCMmSxu1f_F$4?+HV)M4= zZ?qKL*5BWRwL}ja(8QeXZS(E9PoGSkjN3#n!EP><%+o=4JYSEC?ZU+MsOApem%R-z zFE{vwjK(1=u2#s|54$WhzZu^%#7Xo)3-Mgid+Tl`Wq3`HpALD5lbsVsvuXG9&Ziq9 z7~>%xt3LoCtFJgRoj3imrQsCYv1gU!^sq(82eO`RfvLyR@~Zi%u4f~UC|5&hWR}iv zC}uNse{^(;%#OcOV;`3dXx#2ND(-h|>)VO+2cWj3P|Wn*kMa$CTJFo_bK&Q&=j zl6oK2fPT7lv6a3;(wY#vSb8ve9?jzZFHZ)@AkaV5&#fCuslyBh5J~)B9R9{^G_C!( zdYnkw_B78g;-!A@6D5(e6t*V7 zvmESlK;z0%0%Hp!Zz&4#ad2=D1_BiGBRjAO zD)|Y6wx}J+?e8lW@2|1ToelF5dIv}E9;lK?34Z0Ny_zwKEtb?J6_dS}sLLd;P3McM zk2kfa@E$)pyT;gk0aZq_dowBJb2AZYvW0!X0X-i4*?dfL2}r8ytqfc}>VJGH1CCu~ zoFG9F_+CP@I3Uokuvy>jPx!~`ZTQ;2ITpu0%xbTh0LjH&%ljZu+vrz{=A&8|E>3&~ za5hZVef5VW;NxJEZJ%%O0A`&pF=4HLT>29#Ye z70L5?*S;u$WiCl5&&8fvP%P2@x29SN;&MYI@eQ%-(-FynFuY-#{K4spTDL!QF~>d? zz}C@;9KZ=n$9FG|Btq5iAIEEvelyO^m1$}o*N*%~2L~-)c*nC647j3bH zcJo9^5&S2wjx-M#Z2^B7Kv#`4{d#_r=<{FiUcZ2^V{AB7pGYYcmz7`_PNtVsL&DTM zJT`TdJ2Z0TT?!I!DZW?^KxN%{o;g55A8$R~&?Va>mHk~`Fb&GxN6UCM% z{43bN?9LIX0Si~#IvMf>J-k)Bi~c=EjB+VIoSQ~Fb^sC3=GzJ^M|^b9PO6R&`S>`- zjYN-%mow$^M&YyQH6nw^gbO-Gz`4F#kSYu&6Twc?>k|Z zOX*tEZLUxZBG8o?p!&VDDavuv8-$0k{1P8C8m25TSh{W{9R=B*D;hk?d0}eEqt5lZ zm_e6%`lilhFc%Q)kf!we9~q>XbIMfN>dU~Ptr;?E)_R;7g*j^Z)RHZkTB0^@u059F z1SjaDQ@7vprLmzZMs~#(W-=%xy-J5+C9QP_dGjTOvnQ=y(f`y^(lGC-jn`co#d2X$ z)4x001aXuK7`39*^vBP)5m8|)8kw2NY&|T`Hmkh+`|!&ozu2Ll{_d%>APZti@xM6x z9ieBp>DLW|pUj+dW<|A@N_3X1{!_bdGVwp!vwu(t;I97-J^qof{TrP8ld%8CX82?` zmG>WbgCE^qIVOl!2H?eK!kdZ3M#FvswfB*b;~beYK!M|8w*XD&9fIQ-(Y2i2`LDfr zlfzDDsRSmOOSPD;jsrjj_KbAhA3!P{K*X$m9ybBv?#||3M?Ob$+V^PpZZjM+eFEK2 zKW`*tDl!Cx0y(&2>IA&Scn;{|Y_&?9Cs`=1e9_5iZbI3o0o#m0s%oju9K37IptZ*zl6EB$< zOF;Lq@@hwH=~994_>>EL-NXMp<}$2q;Xr*kXf=9#)O%|?{YZj?y1&#%p>0?vXoUtB zs0vCFvX1fv@+t%nm)!tBD_}y!B@m2Fr!@ugM+QN@0NS4QNZ*=R$cW~}VjBmerUj+T zb3=!i&9d{+8hzVT>G%dwnpbjfeV`}4!%T^UN5@ESFK0En@}!G@$H7Pc4M~Un!0!U zB;d$xaX-=ZdIoZDl`@qq@hh4|mK^|@P&mRX&5Z=euttuV3k06s)K#Yy-!frB{z_`Z zuYY74;VGX`L+R%C4onu&5kl;`A5*pI|4QJ3h(q0sH~pGy{2 z-TSk{;LHM1FrD+2rHv>eqbiZ$9K21zK7hU43c!|L-xcMfy~!mRB~4+~06~bi-z@N< zJa*-se94Gnq2yA~x1MB%Bf%e~bt9hMh{mQB%$&99`qTmjVM4OId0vV;rbW5M25!0< z=6nlMtZuh9h$`XC!8q_fVgGJfk2T?sD?K%Q@lX{M>>Xm2_#Aj*BRGIjXk2F9Jj3Nz zmY@mu{FsTQ`#v-7q|RxmjVs~gC3}(^&mTV^ppxzz?~yzbg0EuM-vUpEhr9?tXN_aZ z$3QHNn{4f*xB4#p;K=`RV}aMwmWZinhwDq}{AB9`H8F>^EOd9w)ZlU? z|7#Fsv?a_zcsUXf2gTY_3J-Cwk?oI7QP!mDdU_6Jy^&a*3>rXoy79|_DD$l+ zi>elV=QfQF ++++ +title = "Registry Overview" +description = "High-level overview of the Registry" +keywords = ["registry, on-prem, images, tags, repository, distribution"] +aliases = ["/registry/overview/"] +[menu.main] +parent="smn_registry" +weight=1 ++++ + + +# Docker Registry + +## What it is + +The Registry is a stateless, highly scalable server side application that stores and lets you distribute Docker images. +The Registry is open-source, under the permissive [Apache license](http://en.wikipedia.org/wiki/Apache_License). + +## Why use it + +You should use the Registry if you want to: + + * tightly control where your images are being stored + * fully own your images distribution pipeline + * integrate image storage and distribution tightly into your in-house development workflow + +## Alternatives + +Users looking for a zero maintenance, ready-to-go solution are encouraged to head-over to the [Docker Hub](https://hub.docker.com), which provides a free-to-use, hosted Registry, plus additional features (organization accounts, automated builds, and more). + +Users looking for a commercially supported version of the Registry should look into [Docker Trusted Registry](https://docs.docker.com/docker-trusted-registry/overview/). + +## Requirements + +The Registry is compatible with Docker engine **version 1.6.0 or higher**. +If you really need to work with older Docker versions, you should look into the [old python registry](https://github.com/docker/docker-registry). + +## TL;DR + +Start your registry + + docker run -d -p 5000:5000 --name registry registry:2 + +Pull (or build) some image from the hub + + docker pull ubuntu + +Tag the image so that it points to your registry + + docker tag ubuntu localhost:5000/myfirstimage + +Push it + + docker push localhost:5000/myfirstimage + +Pull it back + + docker pull localhost:5000/myfirstimage + +Now stop your registry and remove all data + + docker stop registry && docker rm -v registry + +## Next + +You should now read the [detailed introduction about the registry](introduction.md), or jump directly to [deployment instructions](deploying.md). diff --git a/vendor/github.com/docker/distribution/docs/insecure.md b/vendor/github.com/docker/distribution/docs/insecure.md new file mode 100644 index 000000000..4b7917d2b --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/insecure.md @@ -0,0 +1,116 @@ + + +# Insecure Registry + +While it's highly recommended to secure your registry using a TLS certificate +issued by a known CA, you may alternatively decide to use self-signed +certificates, or even use your registry over plain http. + +You have to understand the downsides in doing so, and the extra burden in +configuration. + +## Deploying a plain HTTP registry + +> **Warning**: it's not possible to use an insecure registry with basic authentication. + +This basically tells Docker to entirely disregard security for your registry. +While this is relatively easy to configure the daemon in this way, it is +**very** insecure. It does expose your registry to trivial MITM. Only use this +solution for isolated testing or in a tightly controlled, air-gapped +environment. + +1. Open the `/etc/default/docker` file or `/etc/sysconfig/docker` for editing. + + Depending on your operating system, your Engine daemon start options. + +2. Edit (or add) the `DOCKER_OPTS` line and add the `--insecure-registry` flag. + + This flag takes the URL of your registry, for example. + + `DOCKER_OPTS="--insecure-registry myregistrydomain.com:5000"` + +3. Close and save the configuration file. + +4. Restart your Docker daemon + + The command you use to restart the daemon depends on your operating system. + For example, on Ubuntu, this is usually the `service docker stop` and `service + docker start` command. + +5. Repeat this configuration on every Engine host that wants to access your registry. + + +## Using self-signed certificates + +> **Warning**: using this along with basic authentication requires to **also** trust the certificate into the OS cert store for some versions of docker (see below) + +This is more secure than the insecure registry solution. You must configure every docker daemon that wants to access your registry + +1. Generate your own certificate: + +``` + mkdir -p certs && openssl req \ + -newkey rsa:4096 -nodes -sha256 -keyout certs/domain.key \ + -x509 -days 365 -out certs/domain.crt +``` + +2. Be sure to use the name `myregistrydomain.com` as a CN. + +3. Use the result to [start your registry with TLS enabled](./deploying.md#get-a-certificate) + +4. Instruct every docker daemon to trust that certificate. + + This is done by copying the `domain.crt` file to `/etc/docker/certs.d/myregistrydomain.com:5000/ca.crt`. + +5. Don't forget to restart the Engine daemon. + +## Troubleshooting insecure registry + +This sections lists some common failures and how to recover from them. + +### Failing... + +Failing to configure the Engine daemon and trying to pull from a registry that is not using +TLS will results in the following message: + +``` +FATA[0000] Error response from daemon: v1 ping attempt failed with error: +Get https://myregistrydomain.com:5000/v1/_ping: tls: oversized record received with length 20527. +If this private registry supports only HTTP or HTTPS with an unknown CA certificate,please add +`--insecure-registry myregistrydomain.com:5000` to the daemon's arguments. +In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; +simply place the CA certificate at /etc/docker/certs.d/myregistrydomain.com:5000/ca.crt +``` + +### Docker still complains about the certificate when using authentication? + +When using authentication, some versions of docker also require you to trust the certificate at the OS level. Usually, on Ubuntu this is done with: + +```bash +$ cp certs/domain.crt /usr/local/share/ca-certificates/myregistrydomain.com.crt +update-ca-certificates +``` + +... and on Red Hat (and its derivatives) with: + +```bash +cp certs/domain.crt /etc/pki/ca-trust/source/anchors/myregistrydomain.com.crt +update-ca-trust +``` + +... On some distributions, e.g. Oracle Linux 6, the Shared System Certificates feature needs to be manually enabled: + +```bash +$ update-ca-trust enable +``` + +Now restart docker (`service docker stop && service docker start`, or any other way you use to restart docker). diff --git a/vendor/github.com/docker/distribution/docs/introduction.md b/vendor/github.com/docker/distribution/docs/introduction.md new file mode 100644 index 000000000..eceb5ffc1 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/introduction.md @@ -0,0 +1,55 @@ + + +# Understanding the Registry + +A registry is a storage and content delivery system, holding named Docker images, available in different tagged versions. + + > Example: the image `distribution/registry`, with tags `2.0` and `2.1`. + +Users interact with a registry by using docker push and pull commands. + + > Example: `docker pull registry-1.docker.io/distribution/registry:2.1`. + +Storage itself is delegated to drivers. The default storage driver is the local posix filesystem, which is suitable for development or small deployments. Additional cloud-based storage drivers like S3, Microsoft Azure, OpenStack Swift and Aliyun OSS are also supported. People looking into using other storage backends may do so by writing their own driver implementing the [Storage API](storage-drivers/index.md). + +Since securing access to your hosted images is paramount, the Registry natively supports TLS and basic authentication. + +The Registry GitHub repository includes additional information about advanced authentication and authorization methods. Only very large or public deployments are expected to extend the Registry in this way. + +Finally, the Registry ships with a robust [notification system](notifications.md), calling webhooks in response to activity, and both extensive logging and reporting, mostly useful for large installations that want to collect metrics. + +## Understanding image naming + +Image names as used in typical docker commands reflect their origin: + + * `docker pull ubuntu` instructs docker to pull an image named `ubuntu` from the official Docker Hub. This is simply a shortcut for the longer `docker pull docker.io/library/ubuntu` command + * `docker pull myregistrydomain:port/foo/bar` instructs docker to contact the registry located at `myregistrydomain:port` to find the image `foo/bar` + +You can find out more about the various Docker commands dealing with images in the [official Docker engine documentation](/engine/reference/commandline/cli.md). + +## Use cases + +Running your own Registry is a great solution to integrate with and complement your CI/CD system. In a typical workflow, a commit to your source revision control system would trigger a build on your CI system, which would then push a new image to your Registry if the build is successful. A notification from the Registry would then trigger a deployment on a staging environment, or notify other systems that a new image is available. + +It's also an essential component if you want to quickly deploy a new image over a large cluster of machines. + +Finally, it's the best way to distribute images inside an isolated network. + +## Requirements + +You absolutely need to be familiar with Docker, specifically with regard to pushing and pulling images. You must understand the difference between the daemon and the cli, and at least grasp basic concepts about networking. + +Also, while just starting a registry is fairly easy, operating it in a production environment requires operational skills, just like any other service. You are expected to be familiar with systems availability and scalability, logging and log processing, systems monitoring, and security 101. Strong understanding of http and overall network communications, plus familiarity with golang are certainly useful as well for advanced operations or hacking. + +## Next + +Dive into [deploying your registry](deploying.md) diff --git a/vendor/github.com/docker/distribution/docs/menu.md b/vendor/github.com/docker/distribution/docs/menu.md new file mode 100644 index 000000000..7e24a6907 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/menu.md @@ -0,0 +1,23 @@ + + +# Overview of Docker Registry Documentation + +The Docker Registry documentation includes the following topics: + +* [Docker Registry Introduction](index.md) +* [Understanding the Registry](introduction.md) +* [Deploying a registry server](deploying.md) +* [Registry Configuration Reference](configuration.md) +* [Notifications](notifications.md) +* [Recipes](recipes/index.md) +* [Getting help](help.md) diff --git a/vendor/github.com/docker/distribution/docs/migration.md b/vendor/github.com/docker/distribution/docs/migration.md new file mode 100644 index 000000000..da0aba91a --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/migration.md @@ -0,0 +1,30 @@ + + +# Migrating a 1.0 registry to 2.0 + +TODO: This needs to be revised in light of Olivier's work + +A few thoughts here: + +There was no "1.0". There was an implementation of the Registry API V1 but only a version 0.9 of the service was released. +The image formats are not compatible in any way. One must convert v1 images to v2 images using a docker client or other tool. +One can migrate images from one version to the other by pulling images from the old registry and pushing them to the v2 registry. + +----- + +The Docker Registry 2.0 is backward compatible with images created by the earlier specification. If you are migrating a private registry to version 2.0, you should use the following process: + +1. Configure and test a 2.0 registry image in a sandbox environment. + +2. Back up up your production image storage. + + Your production image storage should reside on a volume or storage backend. + Make sure you have a backup of its contents. + +3. Stop your existing registry service. + +4. Restart your registry with your tested 2.0 image. diff --git a/vendor/github.com/docker/distribution/docs/notifications.md b/vendor/github.com/docker/distribution/docs/notifications.md new file mode 100644 index 000000000..c511eb59e --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/notifications.md @@ -0,0 +1,350 @@ + + +# Notifications + +The Registry supports sending webhook notifications in response to events +happening within the registry. Notifications are sent in response to manifest +pushes and pulls and layer pushes and pulls. These actions are serialized into +events. The events are queued into a registry-internal broadcast system which +queues and dispatches events to [_Endpoints_](#endpoints). + +![](images/notifications.png) + +## Endpoints + +Notifications are sent to _endpoints_ via HTTP requests. Each configured +endpoint has isolated queues, retry configuration and http targets within each +instance of a registry. When an action happens within the registry, it is +converted into an event which is dropped into an inmemory queue. When the +event reaches the end of the queue, an http request is made to the endpoint +until the request succeeds. The events are sent serially to each endpoint but +order is not guaranteed. + +## Configuration + +To setup a registry instance to send notifications to endpoints, one must add +them to the configuration. A simple example follows: + + notifications: + endpoints: + - name: alistener + url: https://mylistener.example.com/event + headers: + Authorization: [Bearer ] + timeout: 500ms + threshold: 5 + backoff: 1s + +The above would configure the registry with an endpoint to send events to +`https://mylistener.example.com/event`, with the header "Authorization: Bearer +". The request would timeout after 500 milliseconds. If +5 failures happen consecutively, the registry will backoff for 1 second before +trying again. + +For details on the fields, please see the [configuration documentation](configuration.md#notifications). + +A properly configured endpoint should lead to a log message from the registry +upon startup: + +``` +INFO[0000] configuring endpoint alistener (https://mylistener.example.com/event), timeout=500ms, headers=map[Authorization:[Bearer ]] app.id=812bfeb2-62d6-43cf-b0c6-152f541618a3 environment=development service=registry +``` + +## Events + +Events have a well-defined JSON structure and are sent as the body of +notification requests. One or more events are sent in a structure called an +envelope. Each event has a unique id that can be used to uniquely identify incoming +requests, if required. Along with that, an _action_ is provided with a +_target_, identifying the object mutated during the event. + +The fields available in an `event` are described below. + +Field | Type | Description +----- | ----- | ------------- +id | string |ID provides a unique identifier for the event. +timestamp | Time | Timestamp is the time at which the event occurred. +action | string | Action indicates what action encompasses the provided event. +target | distribution.Descriptor | Target uniquely describes the target of the event. +length | int | Length in bytes of content. Same as Size field in Descriptor. +repository | string | Repository identifies the named repository. +fromRepository | string | FromRepository identifies the named repository which a blob was mounted from if appropriate. +url | string | URL provides a direct link to the content. +tag | string | Tag identifies a tag name in tag events +request | [RequestRecord](https://godoc.org/github.com/docker/distribution/notifications#RequestRecord) | Request covers the request that generated the event. +actor | [ActorRecord](https://godoc.org/github.com/docker/distribution/notifications#ActorRecord). | Actor specifies the agent that initiated the event. For most situations, this could be from the authorization context of the request. +source | [SourceRecord](https://godoc.org/github.com/docker/distribution/notifications#SourceRecord) | Source identifies the registry node that generated the event. Put differently, while the actor "initiates" the event, the source "generates" it. + + + +The following is an example of a JSON event, sent in response to the push of a +manifest: + +```json +{ + "events": [ + { + "id": "320678d8-ca14-430f-8bb6-4ca139cd83f7", + "timestamp": "2016-03-09T14:44:26.402973972-08:00", + "action": "pull", + "target": { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "size": 708, + "digest": "sha256:fea8895f450959fa676bcc1df0611ea93823a735a01205fd8622846041d0c7cf", + "length": 708, + "repository": "hello-world", + "url": "http://192.168.100.227:5000/v2/hello-world/manifests/sha256:fea8895f450959fa676bcc1df0611ea93823a735a01205fd8622846041d0c7cf", + "tag": "latest" + }, + "request": { + "id": "6df24a34-0959-4923-81ca-14f09767db19", + "addr": "192.168.64.11:42961", + "host": "192.168.100.227:5000", + "method": "GET", + "useragent": "curl/7.38.0" + }, + "actor": {}, + "source": { + "addr": "xtal.local:5000", + "instanceID": "a53db899-3b4b-4a62-a067-8dd013beaca4" + } + } + ] +} +``` + + +The target struct of events which are sent when manifests and blobs are deleted +will contain a subset of the data contained in Get and Put events. Specifically, +only the digest and repository will be sent. + +```json +"target": { + "digest": "sha256:d89e1bee20d9cb344674e213b581f14fbd8e70274ecf9d10c514bab78a307845", + "repository": "library/test" +}, +``` + +> __NOTE:__ As of version 2.1, the `length` field for event targets +> is being deprecated for the `size` field, bringing the target in line with +> common nomenclature. Both will continue to be set for the foreseeable +> future. Newer code should favor `size` but accept either. + +## Envelope + +The envelope contains one or more events, with the following json structure: + +```json +{ + "events": [ ... ], +} +``` + +While events may be sent in the same envelope, the set of events within that +envelope have no implied relationship. For example, the registry may choose to +group unrelated events and send them in the same envelope to reduce the total +number of requests. + +The full package has the mediatype +"application/vnd.docker.distribution.events.v1+json", which will be set on the +request coming to an endpoint. + +An example of a full event may look as follows: + +```json +GET /callback +Host: application/vnd.docker.distribution.events.v1+json +Authorization: Bearer +Content-Type: application/vnd.docker.distribution.events.v1+json + +{ + "events": [ + { + "id": "asdf-asdf-asdf-asdf-0", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.distribution.manifest.v1+json", + "length": 1, + "digest": "sha256:fea8895f450959fa676bcc1df0611ea93823a735a01205fd8622846041d0c7cf", + "repository": "library/test", + "url": "http://example.com/v2/library/test/manifests/sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + }, + { + "id": "asdf-asdf-asdf-asdf-1", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", + "length": 2, + "digest": "sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", + "repository": "library/test", + "url": "http://example.com/v2/library/test/blobs/sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + }, + { + "id": "asdf-asdf-asdf-asdf-2", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", + "length": 3, + "digest": "sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", + "repository": "library/test", + "url": "http://example.com/v2/library/test/blobs/sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + } + ] +} +``` + +## Responses + +The registry is fairly accepting of the response codes from endpoints. If an +endpoint responds with any 2xx or 3xx response code (after following +redirects), the message will be considered delivered and discarded. + +In turn, it is recommended that endpoints are accepting of incoming responses, +as well. While the format of event envelopes are standardized by media type, +any "pickyness" about validation may cause the queue to backup on the +registry. + +## Monitoring + +The state of the endpoints are reported via the debug/vars http interface, +usually configured to `http://localhost:5001/debug/vars`. Information such as +configuration and metrics are available by endpoint. + +The following provides an example of a few endpoints that have experienced +several failures and have since recovered: + +```json +"notifications":{ + "endpoints":[ + { + "name":"local-5003", + "url":"http://localhost:5003/callback", + "Headers":{ + "Authorization":[ + "Bearer \u003can example token\u003e" + ] + }, + "Timeout":1000000000, + "Threshold":10, + "Backoff":1000000000, + "Metrics":{ + "Pending":76, + "Events":76, + "Successes":0, + "Failures":0, + "Errors":46, + "Statuses":{ + + } + } + }, + { + "name":"local-8083", + "url":"http://localhost:8083/callback", + "Headers":null, + "Timeout":1000000000, + "Threshold":10, + "Backoff":1000000000, + "Metrics":{ + "Pending":0, + "Events":76, + "Successes":76, + "Failures":0, + "Errors":28, + "Statuses":{ + "202 Accepted":76 + } + } + } + ] +} +``` + +If using notification as part of a larger application, it is _critical_ to +monitor the size ("Pending" above) of the endpoint queues. If failures or +queue sizes are increasing, it can indicate a larger problem. + +The logs are also a valuable resource for monitoring problems. A failing +endpoint will lead to messages similar to the following: + +``` +ERRO[0340] retryingsink: error writing events: httpSink{http://localhost:5003/callback}: error posting: Post http://localhost:5003/callback: dial tcp 127.0.0.1:5003: connection refused, retrying +WARN[0340] httpSink{http://localhost:5003/callback} encountered too many errors, backing off +``` + +The above indicates that several errors have led to a backoff and the registry +will wait before retrying. + +## Considerations + +Currently, the queues are inmemory, so endpoints should be _reasonably +reliable_. They are designed to make a best-effort to send the messages but if +an instance is lost, messages may be dropped. If an endpoint goes down, care +should be taken to ensure that the registry instance is not terminated before +the endpoint comes back up or messages will be lost. + +This can be mitigated by running endpoints in close proximity to the registry +instances. One could run an endpoint that pages to disk and then forwards a +request to provide better durability. + +The notification system is designed around a series of interchangeable _sinks_ +which can be wired up to achieve interesting behavior. If this system doesn't +provide acceptable guarantees, adding a transactional `Sink` to the registry +is a possibility, although it may have an effect on request service time. +Please see the +[godoc](http://godoc.org/github.com/docker/distribution/notifications#Sink) +for more information. diff --git a/vendor/github.com/docker/distribution/docs/recipes/apache.md b/vendor/github.com/docker/distribution/docs/recipes/apache.md new file mode 100644 index 000000000..ac24113b2 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/recipes/apache.md @@ -0,0 +1,215 @@ + + +# Authenticating proxy with apache + +## Use-case + +People already relying on an apache proxy to authenticate their users to other services might want to leverage it and have Registry communications tunneled through the same pipeline. + +Usually, that includes enterprise setups using LDAP/AD on the backend and a SSO mechanism fronting their internal http portal. + +### Alternatives + +If you just want authentication for your registry, and are happy maintaining users access separately, you should really consider sticking with the native [basic auth registry feature](../deploying.md#native-basic-auth). + +### Solution + +With the method presented here, you implement basic authentication for docker engines in a reverse proxy that sits in front of your registry. + +While we use a simple htpasswd file as an example, any other apache authentication backend should be fairly easy to implement once you are done with the example. + +We also implement push restriction (to a limited user group) for the sake of the example. Again, you should modify this to fit your mileage. + +### Gotchas + +While this model gives you the ability to use whatever authentication backend you want through the secondary authentication mechanism implemented inside your proxy, it also requires that you move TLS termination from the Registry to the proxy itself. + +Furthermore, introducing an extra http layer in your communication pipeline will make it more complex to deploy, maintain, and debug, and will possibly create issues. + +## Setting things up + +Read again [the requirements](index.md#requirements). + +Ready? + +Run the following script: + +``` +mkdir -p auth +mkdir -p data + +# This is the main apache configuration you will use +cat < auth/httpd.conf +LoadModule headers_module modules/mod_headers.so + +LoadModule authn_file_module modules/mod_authn_file.so +LoadModule authn_core_module modules/mod_authn_core.so +LoadModule authz_groupfile_module modules/mod_authz_groupfile.so +LoadModule authz_user_module modules/mod_authz_user.so +LoadModule authz_core_module modules/mod_authz_core.so +LoadModule auth_basic_module modules/mod_auth_basic.so +LoadModule access_compat_module modules/mod_access_compat.so + +LoadModule log_config_module modules/mod_log_config.so + +LoadModule ssl_module modules/mod_ssl.so + +LoadModule proxy_module modules/mod_proxy.so +LoadModule proxy_http_module modules/mod_proxy_http.so + +LoadModule unixd_module modules/mod_unixd.so + + + SSLRandomSeed startup builtin + SSLRandomSeed connect builtin + + + + User daemon + Group daemon + + +ServerAdmin you@example.com + +ErrorLog /proc/self/fd/2 + +LogLevel warn + + + LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined + LogFormat "%h %l %u %t \"%r\" %>s %b" common + + + LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio + + + CustomLog /proc/self/fd/1 common + + +ServerRoot "/usr/local/apache2" + +Listen 5043 + + + AllowOverride none + Require all denied + + + + + ServerName myregistrydomain.com + + SSLEngine on + SSLCertificateFile /usr/local/apache2/conf/domain.crt + SSLCertificateKeyFile /usr/local/apache2/conf/domain.key + + ## SSL settings recommandation from: https://raymii.org/s/tutorials/Strong_SSL_Security_On_Apache2.html + # Anti CRIME + SSLCompression off + + # POODLE and other stuff + SSLProtocol all -SSLv2 -SSLv3 -TLSv1 + + # Secure cypher suites + SSLCipherSuite EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH + SSLHonorCipherOrder on + + Header always set "Docker-Distribution-Api-Version" "registry/2.0" + Header onsuccess set "Docker-Distribution-Api-Version" "registry/2.0" + RequestHeader set X-Forwarded-Proto "https" + + ProxyRequests off + ProxyPreserveHost on + + # no proxy for /error/ (Apache HTTPd errors messages) + ProxyPass /error/ ! + + ProxyPass /v2 http://registry:5000/v2 + ProxyPassReverse /v2 http://registry:5000/v2 + + + Order deny,allow + Allow from all + AuthName "Registry Authentication" + AuthType basic + AuthUserFile "/usr/local/apache2/conf/httpd.htpasswd" + AuthGroupFile "/usr/local/apache2/conf/httpd.groups" + + # Read access to authentified users + + Require valid-user + + + # Write access to docker-deployer only + + Require group pusher + + + + + +EOF + +# Now, create a password file for "testuser" and "testpassword" +docker run --entrypoint htpasswd httpd:2.4 -Bbn testuser testpassword > auth/httpd.htpasswd +# Create another one for "testuserpush" and "testpasswordpush" +docker run --entrypoint htpasswd httpd:2.4 -Bbn testuserpush testpasswordpush >> auth/httpd.htpasswd + +# Create your group file +echo "pusher: testuserpush" > auth/httpd.groups + +# Copy over your certificate files +cp domain.crt auth +cp domain.key auth + +# Now create your compose file + +cat < docker-compose.yml +apache: + image: "httpd:2.4" + hostname: myregistrydomain.com + ports: + - 5043:5043 + links: + - registry:registry + volumes: + - `pwd`/auth:/usr/local/apache2/conf + +registry: + image: registry:2 + ports: + - 127.0.0.1:5000:5000 + volumes: + - `pwd`/data:/var/lib/registry + +EOF +``` + +## Starting and stopping + +Now, start your stack: + + docker-compose up -d + +Login with a "push" authorized user (using `testuserpush` and `testpasswordpush`), then tag and push your first image: + + docker login myregistrydomain.com:5043 + docker tag ubuntu myregistrydomain.com:5043/test + docker push myregistrydomain.com:5043/test + +Now, login with a "pull-only" user (using `testuser` and `testpassword`), then pull back the image: + + docker login myregistrydomain.com:5043 + docker pull myregistrydomain.com:5043/test + +Verify that the "pull-only" can NOT push: + + docker push myregistrydomain.com:5043/test diff --git a/vendor/github.com/docker/distribution/docs/recipes/index.md b/vendor/github.com/docker/distribution/docs/recipes/index.md new file mode 100644 index 000000000..b4dd63679 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/recipes/index.md @@ -0,0 +1,37 @@ + + +# Recipes + +You will find here a list of "recipes", end-to-end scenarios for exotic or otherwise advanced use-cases. + +Most users are not expected to have a use for these. + +## Requirements + +You should have followed entirely the basic [deployment guide](../deploying.md). + +If you have not, please take the time to do so. + +At this point, it's assumed that: + + * you understand Docker security requirements, and how to configure your docker engines properly + * you have installed Docker Compose + * it's HIGHLY recommended that you get a certificate from a known CA instead of self-signed certificates + * inside the current directory, you have a X509 `domain.crt` and `domain.key`, for the CN `myregistrydomain.com` + * be sure you have stopped and removed any previously running registry (typically `docker stop registry && docker rm -v registry`) + +## The List + + * [using Apache as an authenticating proxy](apache.md) + * [using Nginx as an authenticating proxy](nginx.md) + * [running a Registry on OS X](osx-setup-guide.md) + * [mirror the Docker Hub](mirror.md) diff --git a/vendor/github.com/docker/distribution/docs/recipes/menu.md b/vendor/github.com/docker/distribution/docs/recipes/menu.md new file mode 100644 index 000000000..b79c1b309 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/recipes/menu.md @@ -0,0 +1,21 @@ + + +# Recipes + +## The List + + * [using Apache as an authenticating proxy](apache.md) + * [using Nginx as an authenticating proxy](nginx.md) + * [running a Registry on OS X](osx-setup-guide.md) + * [mirror the Docker Hub](mirror.md) diff --git a/vendor/github.com/docker/distribution/docs/recipes/mirror.md b/vendor/github.com/docker/distribution/docs/recipes/mirror.md new file mode 100644 index 000000000..241e41bd6 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/recipes/mirror.md @@ -0,0 +1,74 @@ + + +# Registry as a pull through cache + +## Use-case + +If you have multiple instances of Docker running in your environment (e.g., multiple physical or virtual machines, all running the Docker daemon), each time one of them requires an image that it doesn’t have it will go out to the internet and fetch it from the public Docker registry. By running a local registry mirror, you can keep most of the redundant image fetch traffic on your local network. + +### Alternatives + +Alternatively, if the set of images you are using is well delimited, you can simply pull them manually and push them to a simple, local, private registry. + +Furthermore, if your images are all built in-house, not using the Hub at all and relying entirely on your local registry is the simplest scenario. + +### Gotcha + +It's currently not possible to mirror another private registry. Only the central Hub can be mirrored. + +### Solution + +The Registry can be configured as a pull through cache. In this mode a Registry responds to all normal docker pull requests but stores all content locally. + +## How does it work? + +The first time you request an image from your local registry mirror, it pulls the image from the public Docker registry and stores it locally before handing it back to you. On subsequent requests, the local registry mirror is able to serve the image from its own storage. + +### What if the content changes on the Hub? + +When a pull is attempted with a tag, the Registry will check the remote to ensure if it has the latest version of the requested content. If it doesn't it will fetch the latest content and cache it. + +### What about my disk? + +In environments with high churn rates, stale data can build up in the cache. When running as a pull through cache the Registry will periodically remove old content to save disk space. Subsequent requests for removed content will cause a remote fetch and local re-caching. + +To ensure best performance and guarantee correctness the Registry cache should be configured to use the `filesystem` driver for storage. + +## Running a Registry as a pull through cache + +The easiest way to run a registry as a pull through cache is to run the official Registry image. + +Multiple registry caches can be deployed over the same back-end. A single registry cache will ensure that concurrent requests do not pull duplicate data, but this property will not hold true for a registry cache cluster. + +### Configuring the cache + +To configure a Registry to run as a pull through cache, the addition of a `proxy` section is required to the config file. + +In order to access private images on the Docker Hub, a username and password can be supplied. + + proxy: + remoteurl: https://registry-1.docker.io + username: [username] + password: [password] + +> :warn: if you specify a username and password, it's very important to understand that private resources that this user has access to on the Hub will be made available on your mirror. It's thus paramount that you secure your mirror by implementing authentication if you expect these resources to stay private! + +### Configuring the Docker daemon + +You will need to pass the `--registry-mirror` option to your Docker daemon on startup: + + docker --registry-mirror=https:// daemon + +For example, if your mirror is serving on `http://10.0.0.2:5000`, you would run: + + docker --registry-mirror=https://10.0.0.2:5000 daemon + +NOTE: Depending on your local host setup, you may be able to add the `--registry-mirror` option to the `DOCKER_OPTS` variable in `/etc/default/docker`. diff --git a/vendor/github.com/docker/distribution/docs/recipes/nginx.md b/vendor/github.com/docker/distribution/docs/recipes/nginx.md new file mode 100644 index 000000000..f4a676791 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/recipes/nginx.md @@ -0,0 +1,190 @@ + + +# Authenticating proxy with nginx + + +## Use-case + +People already relying on a nginx proxy to authenticate their users to other services might want to leverage it and have Registry communications tunneled through the same pipeline. + +Usually, that includes enterprise setups using LDAP/AD on the backend and a SSO mechanism fronting their internal http portal. + +### Alternatives + +If you just want authentication for your registry, and are happy maintaining users access separately, you should really consider sticking with the native [basic auth registry feature](../deploying.md#native-basic-auth). + +### Solution + +With the method presented here, you implement basic authentication for docker engines in a reverse proxy that sits in front of your registry. + +While we use a simple htpasswd file as an example, any other nginx authentication backend should be fairly easy to implement once you are done with the example. + +We also implement push restriction (to a limited user group) for the sake of the example. Again, you should modify this to fit your mileage. + +### Gotchas + +While this model gives you the ability to use whatever authentication backend you want through the secondary authentication mechanism implemented inside your proxy, it also requires that you move TLS termination from the Registry to the proxy itself. + +Furthermore, introducing an extra http layer in your communication pipeline will make it more complex to deploy, maintain, and debug, and will possibly create issues. Make sure the extra complexity is required. + +For instance, Amazon's Elastic Load Balancer (ELB) in HTTPS mode already sets the following client header: + +``` +X-Real-IP +X-Forwarded-For +X-Forwarded-Proto +``` + +So if you have an nginx sitting behind it, should remove these lines from the example config below: + +``` +X-Real-IP $remote_addr; # pass on real client's IP +X-Forwarded-For $proxy_add_x_forwarded_for; +X-Forwarded-Proto $scheme; +``` + +Otherwise nginx will reset the ELB's values, and the requests will not be routed properly. For more information, see [#970](https://github.com/docker/distribution/issues/970). + +## Setting things up + +Read again [the requirements](index.md#requirements). + +Ready? + +-- + +Create the required directories + +``` +mkdir -p auth +mkdir -p data +``` + +Create the main nginx configuration you will use. + +``` + +cat < auth/nginx.conf +events { + worker_connections 1024; +} + +http { + + upstream docker-registry { + server registry:5000; + } + + ## Set a variable to help us decide if we need to add the + ## 'Docker-Distribution-Api-Version' header. + ## The registry always sets this header. + ## In the case of nginx performing auth, the header will be unset + ## since nginx is auth-ing before proxying. + map \$upstream_http_docker_distribution_api_version \$docker_distribution_api_version { + 'registry/2.0' ''; + default registry/2.0; + } + + server { + listen 443 ssl; + server_name myregistrydomain.com; + + # SSL + ssl_certificate /etc/nginx/conf.d/domain.crt; + ssl_certificate_key /etc/nginx/conf.d/domain.key; + + # Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html + ssl_protocols TLSv1.1 TLSv1.2; + ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH'; + ssl_prefer_server_ciphers on; + ssl_session_cache shared:SSL:10m; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location /v2/ { + # Do not allow connections from docker 1.5 and earlier + # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents + if (\$http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*\$" ) { + return 404; + } + + # To add basic authentication to v2 use auth_basic setting. + auth_basic "Registry realm"; + auth_basic_user_file /etc/nginx/conf.d/nginx.htpasswd; + + ## If $docker_distribution_api_version is empty, the header will not be added. + ## See the map directive above where this variable is defined. + add_header 'Docker-Distribution-Api-Version' \$docker_distribution_api_version always; + + proxy_pass http://docker-registry; + proxy_set_header Host \$http_host; # required for docker client's sake + proxy_set_header X-Real-IP \$remote_addr; # pass on real client's IP + proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto \$scheme; + proxy_read_timeout 900; + } + } +} +EOF +``` + +Now create a password file for "testuser" and "testpassword" + +``` +docker run --rm --entrypoint htpasswd registry:2 -bn testuser testpassword > auth/nginx.htpasswd +``` + +Copy over your certificate files + +``` +cp domain.crt auth +cp domain.key auth +``` + +Now create your compose file + +``` +cat < docker-compose.yml +nginx: + image: "nginx:1.9" + ports: + - 5043:443 + links: + - registry:registry + volumes: + - ./auth:/etc/nginx/conf.d + - ./auth/nginx.conf:/etc/nginx/nginx.conf:ro + +registry: + image: registry:2 + ports: + - 127.0.0.1:5000:5000 + volumes: + - `pwd`./data:/var/lib/registry +EOF +``` + +## Starting and stopping + +Now, start your stack: + + docker-compose up -d + +Login with a "push" authorized user (using `testuser` and `testpassword`), then tag and push your first image: + + docker login -u=testuser -p=testpassword -e=root@example.ch myregistrydomain.com:5043 + docker tag ubuntu myregistrydomain.com:5043/test + docker push myregistrydomain.com:5043/test + docker pull myregistrydomain.com:5043/test diff --git a/vendor/github.com/docker/distribution/docs/recipes/osx-setup-guide.md b/vendor/github.com/docker/distribution/docs/recipes/osx-setup-guide.md new file mode 100644 index 000000000..d47d31c10 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/recipes/osx-setup-guide.md @@ -0,0 +1,81 @@ + + +# OS X Setup Guide + +## Use-case + +This is useful if you intend to run a registry server natively on OS X. + +### Alternatives + +You can start a VM on OS X, and deploy your registry normally as a container using Docker inside that VM. + +The simplest road to get there is traditionally to use the [docker Toolbox](https://www.docker.com/toolbox), or [docker-machine](/machine/index.md), which usually relies on the [boot2docker](http://boot2docker.io/) iso inside a VirtualBox VM. + +### Solution + +Using the method described here, you install and compile your own from the git repository and run it as an OS X agent. + +### Gotchas + +Production services operation on OS X is out of scope of this document. Be sure you understand well these aspects before considering going to production with this. + +## Setup golang on your machine + +If you know, safely skip to the next section. + +If you don't, the TLDR is: + + bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/master/binscripts/gvm-installer) + source ~/.gvm/scripts/gvm + gvm install go1.4.2 + gvm use go1.4.2 + +If you want to understand, you should read [How to Write Go Code](https://golang.org/doc/code.html). + +## Checkout the Docker Distribution source tree + + mkdir -p $GOPATH/src/github.com/docker + git clone https://github.com/docker/distribution.git $GOPATH/src/github.com/docker/distribution + cd $GOPATH/src/github.com/docker/distribution + +## Build the binary + + GOPATH=$(PWD)/Godeps/_workspace:$GOPATH make binaries + sudo cp bin/registry /usr/local/libexec/registry + +## Setup + +Copy the registry configuration file in place: + + mkdir /Users/Shared/Registry + cp docs/osx/config.yml /Users/Shared/Registry/config.yml + +## Running the Docker Registry under launchd + +Copy the Docker registry plist into place: + + plutil -lint docs/osx/com.docker.registry.plist + cp docs/osx/com.docker.registry.plist ~/Library/LaunchAgents/ + chmod 644 ~/Library/LaunchAgents/com.docker.registry.plist + +Start the Docker registry: + + launchctl load ~/Library/LaunchAgents/com.docker.registry.plist + +### Restarting the docker registry service + + launchctl stop com.docker.registry + launchctl start com.docker.registry + +### Unloading the docker registry service + + launchctl unload ~/Library/LaunchAgents/com.docker.registry.plist diff --git a/vendor/github.com/docker/distribution/docs/recipes/osx/com.docker.registry.plist b/vendor/github.com/docker/distribution/docs/recipes/osx/com.docker.registry.plist new file mode 100644 index 000000000..0982349f4 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/recipes/osx/com.docker.registry.plist @@ -0,0 +1,42 @@ + + + + + Label + com.docker.registry + KeepAlive + + StandardErrorPath + /Users/Shared/Registry/registry.log + StandardOutPath + /Users/Shared/Registry/registry.log + Program + /usr/local/libexec/registry + ProgramArguments + + /usr/local/libexec/registry + /Users/Shared/Registry/config.yml + + Sockets + + http-listen-address + + SockServiceName + 5000 + SockType + dgram + SockFamily + IPv4 + + http-debug-address + + SockServiceName + 5001 + SockType + dgram + SockFamily + IPv4 + + + + diff --git a/vendor/github.com/docker/distribution/docs/recipes/osx/config.yml b/vendor/github.com/docker/distribution/docs/recipes/osx/config.yml new file mode 100644 index 000000000..63b8f7135 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/recipes/osx/config.yml @@ -0,0 +1,16 @@ +version: 0.1 +log: + level: info + fields: + service: registry + environment: macbook-air +storage: + cache: + blobdescriptor: inmemory + filesystem: + rootdirectory: /Users/Shared/Registry +http: + addr: 0.0.0.0:5000 + secret: mytokensecret + debug: + addr: localhost:5001 diff --git a/vendor/github.com/docker/distribution/docs/spec/api.md b/vendor/github.com/docker/distribution/docs/spec/api.md index efa55cfcf..c4517c0b4 100644 --- a/vendor/github.com/docker/distribution/docs/spec/api.md +++ b/vendor/github.com/docker/distribution/docs/spec/api.md @@ -1,8 +1,12 @@ ---- -title: "HTTP API V2" -description: "Specification for the Registry API." -keywords: ["registry, on-prem, images, tags, repository, distribution, api, advanced"] ---- + # Docker Registry HTTP API V2 @@ -244,7 +248,7 @@ enforce this. The rules for a repository name are as follows: must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`. 2. If a repository name has two or more path components, they must be separated by a forward slash ("/"). -3. The total length of a repository name, including slashes, must be less than +3. The total length of a repository name, including slashes, must be less the 256 characters. These name requirements _only_ apply to the registry API and should accept a @@ -795,7 +799,7 @@ Note that the upload url will not be available forever. If the upload uuid is unknown to the registry, a `404 Not Found` response will be returned and the client must restart the upload process. -#### Deleting a Layer +### Deleting a Layer A layer may be deleted from the registry via its `name` and `digest`. A delete may be issued with the following request format: diff --git a/vendor/github.com/docker/distribution/docs/spec/api.md.tmpl b/vendor/github.com/docker/distribution/docs/spec/api.md.tmpl index cf7f92971..eeafec1ea 100644 --- a/vendor/github.com/docker/distribution/docs/spec/api.md.tmpl +++ b/vendor/github.com/docker/distribution/docs/spec/api.md.tmpl @@ -1,8 +1,12 @@ ---- -title: "HTTP API V2" -description: "Specification for the Registry API." -keywords: ["registry, on-prem, images, tags, repository, distribution, api, advanced"] ---- + # Docker Registry HTTP API V2 @@ -244,7 +248,7 @@ enforce this. The rules for a repository name are as follows: must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`. 2. If a repository name has two or more path components, they must be separated by a forward slash ("/"). -3. The total length of a repository name, including slashes, must be less than +3. The total length of a repository name, including slashes, must be less the 256 characters. These name requirements _only_ apply to the registry API and should accept a @@ -795,7 +799,7 @@ Note that the upload url will not be available forever. If the upload uuid is unknown to the registry, a `404 Not Found` response will be returned and the client must restart the upload process. -#### Deleting a Layer +### Deleting a Layer A layer may be deleted from the registry via its `name` and `digest`. A delete may be issued with the following request format: diff --git a/vendor/github.com/docker/distribution/docs/spec/auth/index.md b/vendor/github.com/docker/distribution/docs/spec/auth/index.md index d1aa94225..f6ee8e1fa 100644 --- a/vendor/github.com/docker/distribution/docs/spec/auth/index.md +++ b/vendor/github.com/docker/distribution/docs/spec/auth/index.md @@ -1,8 +1,13 @@ ---- -title: "Docker Registry Token Authentication" -description: "Docker Registry v2 authentication schema" -keywords: ["registry, on-prem, images, tags, repository, distribution, authentication, advanced"] ---- + # Docker Registry v2 authentication diff --git a/vendor/github.com/docker/distribution/docs/spec/auth/jwt.md b/vendor/github.com/docker/distribution/docs/spec/auth/jwt.md index aa9941b0b..c90bd6e86 100644 --- a/vendor/github.com/docker/distribution/docs/spec/auth/jwt.md +++ b/vendor/github.com/docker/distribution/docs/spec/auth/jwt.md @@ -1,8 +1,13 @@ ---- -title: "Token Authentication Implementation" -description: "Describe the reference implementation of the Docker Registry v2 authentication schema" -keywords: ["registry, on-prem, images, tags, repository, distribution, JWT authentication, advanced"] ---- + # Docker Registry v2 Bearer token specification diff --git a/vendor/github.com/docker/distribution/docs/spec/auth/oauth.md b/vendor/github.com/docker/distribution/docs/spec/auth/oauth.md index d946da8a2..0f8d35dbb 100644 --- a/vendor/github.com/docker/distribution/docs/spec/auth/oauth.md +++ b/vendor/github.com/docker/distribution/docs/spec/auth/oauth.md @@ -1,8 +1,13 @@ ---- -title: "Oauth2 Token Authentication" -description: "Specifies the Docker Registry v2 authentication" -keywords: ["registry, on-prem, images, tags, repository, distribution, oauth2, advanced"] ---- + # Docker Registry v2 authentication using OAuth2 @@ -188,3 +193,4 @@ Content-Type: application/json {"refresh_token":"kas9Da81Dfa8","access_token":"eyJhbGciOiJFUzI1NiIsInR5":"expires_in":900,"scope":"repository:samalba/my-app:pull,repository:samalba/my-app:push"} ``` + diff --git a/vendor/github.com/docker/distribution/docs/spec/auth/scope.md b/vendor/github.com/docker/distribution/docs/spec/auth/scope.md index 6ef61edf1..a8f6c0628 100644 --- a/vendor/github.com/docker/distribution/docs/spec/auth/scope.md +++ b/vendor/github.com/docker/distribution/docs/spec/auth/scope.md @@ -1,8 +1,13 @@ ---- -title: "Token Scope Documentation" -description: "Describes the scope and access fields used for registry authorization tokens" -keywords: ["registry, on-prem, images, tags, repository, distribution, advanced, access, scope"] ---- + # Docker Registry Token Scope and Access @@ -39,23 +44,13 @@ intended to represent. This type may be specific to a resource provider but must be understood by the authorization server in order to validate the subject is authorized for a specific resource. -#### Resource Class - -The resource type might have a resource class which further classifies the -the resource name within the resource type. A class is not required and -is specific to the resource type. - #### Example Resource Types - `repository` - represents a single repository within a registry. A repository may represent many manifest or content blobs, but the resource type is considered the collections of those items. Actions which may be performed on a `repository` are `pull` for accessing the collection and `push` for adding to -it. By default the `repository` type has the class of `image`. - - `repository(plugin)` - represents a single repository of plugins within a -registry. A plugin repository has the same content and actions as a repository. - - `registry` - represents the entire registry. Used for administrative actions -or lookup operations that span an entire registry. +it. ### Resource Name @@ -88,8 +83,7 @@ scopes. ``` scope := resourcescope [ ' ' resourcescope ]* resourcescope := resourcetype ":" resourcename ":" action [ ',' action ]* -resourcetype := resourcetypevalue [ '(' resourcetypevalue ')' ] -resourcetypevalue := /[a-z0-9]+/ +resourcetype := /[a-z]*/ resourcename := [ hostname '/' ] component [ '/' component ]* hostname := hostcomponent ['.' hostcomponent]* [':' port-number] hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ @@ -146,3 +140,4 @@ done by fetching an access token using the refresh token. Since the refresh token is not scoped to specific resources for an audience, extra care should be taken to only use the refresh token to negotiate new access tokens directly with the authorization server, and never with a resource provider. + diff --git a/vendor/github.com/docker/distribution/docs/spec/auth/token.md b/vendor/github.com/docker/distribution/docs/spec/auth/token.md index f8391bd5a..81af53b2e 100644 --- a/vendor/github.com/docker/distribution/docs/spec/auth/token.md +++ b/vendor/github.com/docker/distribution/docs/spec/auth/token.md @@ -1,8 +1,13 @@ ---- -title: "Token Authentication Specification" -description: "Specifies the Docker Registry v2 authentication" -keywords: ["registry, on-prem, images, tags, repository, distribution, Bearer authentication, advanced"] ---- + # Docker Registry v2 authentication via central service @@ -20,7 +25,7 @@ This document outlines the v2 Docker registry authentication scheme: 5. The client retries the original request with the Bearer token embedded in the request's Authorization header. 6. The Registry authorizes the client by validating the Bearer token and the - claim set embedded within it and begins the push/pull session as usual. + claim set embedded within it and begins the push/pull session as usual. ## Requirements @@ -156,7 +161,7 @@ Defines getting a bearer and refresh token using the token endpoint. expires_in
- (Optional) The duration in seconds since the token was issued that it + (Optional) The duration in seconds since the token was issued that it will remain valid. When omitted, this defaults to 60 seconds. For compatibility with older clients, a token should never be returned with less than 60 seconds to live. diff --git a/vendor/github.com/docker/distribution/docs/spec/implementations.md b/vendor/github.com/docker/distribution/docs/spec/implementations.md index 347465350..ec937b647 100644 --- a/vendor/github.com/docker/distribution/docs/spec/implementations.md +++ b/vendor/github.com/docker/distribution/docs/spec/implementations.md @@ -1,6 +1,8 @@ ---- -published: false ---- + # Distribution API Implementations diff --git a/vendor/github.com/docker/distribution/docs/spec/index.md b/vendor/github.com/docker/distribution/docs/spec/index.md index 952ebabd2..474bd455c 100644 --- a/vendor/github.com/docker/distribution/docs/spec/index.md +++ b/vendor/github.com/docker/distribution/docs/spec/index.md @@ -1,8 +1,13 @@ ---- -title: "Reference Overview" -description: "Explains registry JSON objects" -keywords: ["registry, service, images, repository, json"] ---- + # Docker Registry Reference diff --git a/vendor/github.com/docker/distribution/docs/spec/json.md b/vendor/github.com/docker/distribution/docs/spec/json.md index 825b17ac2..a8916dccc 100644 --- a/vendor/github.com/docker/distribution/docs/spec/json.md +++ b/vendor/github.com/docker/distribution/docs/spec/json.md @@ -1,9 +1,13 @@ ---- -published: false -title: "Docker Distribution JSON Canonicalization" -description: "Explains registry JSON objects" -keywords: ["registry, service, images, repository, json"] ---- + diff --git a/vendor/github.com/docker/distribution/docs/spec/manifest-v2-1.md b/vendor/github.com/docker/distribution/docs/spec/manifest-v2-1.md index 1fadc25a0..056f4bc66 100644 --- a/vendor/github.com/docker/distribution/docs/spec/manifest-v2-1.md +++ b/vendor/github.com/docker/distribution/docs/spec/manifest-v2-1.md @@ -1,12 +1,16 @@ ---- -title: "Image Manifest V 2, Schema 1 " -description: "image manifest for the Registry." -keywords: ["registry, on-prem, images, tags, repository, distribution, api, advanced, manifest"] ---- + # Image Manifest Version 2, Schema 1 -This document outlines the format of the V2 image manifest. The image +This document outlines the format of of the V2 image manifest. The image manifest described herein was introduced in the Docker daemon in the [v1.3.0 release](https://github.com/docker/docker/commit/9f482a66ab37ec396ac61ed0c00d59122ac07453). It is a provisional manifest to provide a compatibility with the [V1 Image diff --git a/vendor/github.com/docker/distribution/docs/spec/manifest-v2-2.md b/vendor/github.com/docker/distribution/docs/spec/manifest-v2-2.md index 2319cf6d2..afc614bcc 100644 --- a/vendor/github.com/docker/distribution/docs/spec/manifest-v2-2.md +++ b/vendor/github.com/docker/distribution/docs/spec/manifest-v2-2.md @@ -1,12 +1,16 @@ ---- -title: "Image Manifest V 2, Schema 2 " -description: "image manifest for the Registry." -keywords: ["registry, on-prem, images, tags, repository, distribution, api, advanced, manifest"] ---- + # Image Manifest Version 2, Schema 2 -This document outlines the format of the V2 image manifest, schema version 2. +This document outlines the format of of the V2 image manifest, schema version 2. The original (and provisional) image manifest for V2 (schema 1), was introduced in the Docker daemon in the [v1.3.0 release](https://github.com/docker/docker/commit/9f482a66ab37ec396ac61ed0c00d59122ac07453) @@ -30,7 +34,6 @@ the resources they reference: - `application/vnd.docker.container.image.v1+json`: Container config JSON - `application/vnd.docker.image.rootfs.diff.tar.gzip`: "Layer", as a gzipped tar - `application/vnd.docker.image.rootfs.foreign.diff.tar.gzip`: "Layer", as a gzipped tar that should never be pushed -- `application/vnd.docker.plugin.v1+json`: Plugin config JSON ## Manifest List @@ -60,8 +63,8 @@ image manifest based on the Content-Type returned in the HTTP response. - **`mediaType`** *string* The MIME type of the referenced object. This will generally be - `application/vnd.docker.distribution.manifest.v2+json`, but it could also - be `application/vnd.docker.distribution.manifest.v1+json` if the manifest + `application/vnd.docker.image.manifest.v2+json`, but it could also + be `application/vnd.docker.image.manifest.v1+json` if the manifest list references a legacy schema-1 manifest. - **`size`** *int* @@ -123,7 +126,7 @@ image manifest based on the Content-Type returned in the HTTP response. "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", "manifests": [ { - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "mediaType": "application/vnd.docker.image.manifest.v2+json", "size": 7143, "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", "platform": { @@ -132,7 +135,7 @@ image manifest based on the Content-Type returned in the HTTP response. } }, { - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "mediaType": "application/vnd.docker.image.manifest.v2+json", "size": 7682, "digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270", "platform": { diff --git a/vendor/github.com/docker/distribution/docs/spec/menu.md b/vendor/github.com/docker/distribution/docs/spec/menu.md index 9237e3ce8..ebc52327b 100644 --- a/vendor/github.com/docker/distribution/docs/spec/menu.md +++ b/vendor/github.com/docker/distribution/docs/spec/menu.md @@ -1,7 +1,13 @@ ---- -title: "Reference" -description: "Explains registry JSON objects" -keywords: ["registry, service, images, repository, json"] -type: "menu" -identifier: "smn_registry_ref" ---- + + diff --git a/vendor/github.com/docker/distribution/docs/storage-drivers/azure.md b/vendor/github.com/docker/distribution/docs/storage-drivers/azure.md new file mode 100644 index 000000000..a84888de8 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/storage-drivers/azure.md @@ -0,0 +1,78 @@ + + + +# Microsoft Azure storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/) for object storage. + +## Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ accountname + + yes + + Name of the Azure Storage Account. +
+ accountkey + + yes + + Primary or Secondary Key for the Storage Account. +
+ container + + yes + + Name of the Azure root storage container in which all registry data will be stored. Must comply the storage container name [requirements][create-container-api]. +
+ realm + + no + + Domain name suffix for the Storage Service API endpoint. For example realm for "Azure in China" would be `core.chinacloudapi.cn` and realm for "Azure Government" would be `core.usgovcloudapi.net`. By default, this + is core.windows.net. +
+ + +## Related Information + +* To get information about +[azure-blob-storage](http://azure.microsoft.com/en-us/services/storage/) visit +the Microsoft website. +* You can use Microsoft's [Blob Service REST API](https://msdn.microsoft.com/en-us/library/azure/dd135733.aspx) to [create a container] (https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx). diff --git a/vendor/github.com/docker/distribution/docs/storage-drivers/filesystem.md b/vendor/github.com/docker/distribution/docs/storage-drivers/filesystem.md new file mode 100644 index 000000000..8e269cdbc --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/storage-drivers/filesystem.md @@ -0,0 +1,24 @@ + + + +# Filesystem storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses the local filesystem. + +## Parameters + +`rootdirectory`: (optional) The absolute path to a root directory tree in which +to store all registry files. The registry stores all its data here so make sure +there is adequate space available. Defaults to `/var/lib/registry`. +`maxthreads`: (optional) The maximum number of simultaneous blocking filesystem +operations permitted within the registry. Each operation spawns a new thread and +may cause thread exhaustion issues if many are done in parallel. Defaults to +`100`, and can be no lower than `25`. diff --git a/vendor/github.com/docker/distribution/docs/storage-drivers/gcs.md b/vendor/github.com/docker/distribution/docs/storage-drivers/gcs.md new file mode 100644 index 000000000..1bc67f9ed --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/storage-drivers/gcs.md @@ -0,0 +1,78 @@ + + + +# Google Cloud Storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses Google Cloud for object storage. + +## Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ bucket + + yes + + Storage bucket name. +
+ keyfile + + no + + A private service account key file in JSON format. Instead of a key file Google Application Default Credentials can be used. +
+ rootdirectory + + no + + This is a prefix that will be applied to all Google Cloud Storage keys to allow you to segment data in your bucket if necessary. +
+ chunksize + + no (default 5242880) + + This is the chunk size used for uploading large blobs, must be a multiple of 256*1024. +
+ + +`bucket`: The name of your Google Cloud Storage bucket where you wish to store objects (needs to already be created prior to driver initialization). + +`keyfile`: (optional) A private key file in JSON format, used for [Service Account Authentication](https://cloud.google.com/storage/docs/authentication#service_accounts). + +**Note** Instead of a key file you can use [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials). + +`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). diff --git a/vendor/github.com/docker/distribution/docs/storage-drivers/index.md b/vendor/github.com/docker/distribution/docs/storage-drivers/index.md new file mode 100644 index 000000000..89635bd37 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/storage-drivers/index.md @@ -0,0 +1,66 @@ + + + +# Docker Registry Storage Driver + +This document describes the registry storage driver model, implementation, and explains how to contribute new storage drivers. + +## Provided Drivers + +This storage driver package comes bundled with several drivers: + +- [inmemory](inmemory.md): A temporary storage driver using a local inmemory map. This exists solely for reference and testing. +- [filesystem](filesystem.md): A local storage driver configured to use a directory tree in the local filesystem. +- [s3](s3.md): A driver storing objects in an Amazon Simple Storage Solution (S3) bucket. +- [azure](azure.md): A driver storing objects in [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/). +- [swift](swift.md): A driver storing objects in [Openstack Swift](http://docs.openstack.org/developer/swift/). +- [oss](oss.md): A driver storing objects in [Aliyun OSS](http://www.aliyun.com/product/oss). +- [gcs](gcs.md): A driver storing objects in a [Google Cloud Storage](https://cloud.google.com/storage/) bucket. + +## Storage Driver API + +The storage driver API is designed to model a filesystem-like key/value storage in a manner abstract enough to support a range of drivers from the local filesystem to Amazon S3 or other distributed object storage systems. + +Storage drivers are required to implement the `storagedriver.StorageDriver` interface provided in `storagedriver.go`, which includes methods for reading, writing, and deleting content, as well as listing child objects of a specified prefix key. + +Storage drivers are intended to be written in Go, providing compile-time +validation of the `storagedriver.StorageDriver` interface. + +## Driver Selection and Configuration + +The preferred method of selecting a storage driver is using the `StorageDriverFactory` interface in the `storagedriver/factory` package. These factories provide a common interface for constructing storage drivers with a parameters map. The factory model is based off of the [Register](http://golang.org/pkg/database/sql/#Register) and [Open](http://golang.org/pkg/database/sql/#Open) methods in the builtin [database/sql](http://golang.org/pkg/database/sql) package. + +Storage driver factories may be registered by name using the +`factory.Register` method, and then later invoked by calling `factory.Create` +with a driver name and parameters map. If no such storage driver can be found, +`factory.Create` will return an `InvalidStorageDriverError`. + +## Driver Contribution + +### Writing new storage drivers + +To create a valid storage driver, one must implement the +`storagedriver.StorageDriver` interface and make sure to expose this driver +via the factory system. + +#### Registering + +Storage drivers should call `factory.Register` with their driver name in an `init` method, allowing callers of `factory.New` to construct instances of this driver without requiring modification of imports throughout the codebase. + +## Testing + +Storage driver test suites are provided in +`storagedriver/testsuites/testsuites.go` and may be used for any storage +driver written in Go. Tests can be registered using the `RegisterSuite` +function, which run the same set of tests for any registered drivers. diff --git a/vendor/github.com/docker/distribution/docs/storage-drivers/inmemory.md b/vendor/github.com/docker/distribution/docs/storage-drivers/inmemory.md new file mode 100644 index 000000000..1a14e77a2 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/storage-drivers/inmemory.md @@ -0,0 +1,23 @@ + + + +# In-memory storage driver (Testing Only) + +For purely tests purposes, you can use the `inmemory` storage driver. This +driver is an implementation of the `storagedriver.StorageDriver` interface which +uses local memory for object storage. If you would like to run a registry from +volatile memory, use the [`filesystem` driver](filesystem.md) on a ramdisk. + +**IMPORTANT**: This storage driver *does not* persist data across runs. This is why it is only suitable for testing. *Never* use this driver in production. + +## Parameters + +None diff --git a/vendor/github.com/docker/distribution/docs/storage-drivers/menu.md b/vendor/github.com/docker/distribution/docs/storage-drivers/menu.md new file mode 100644 index 000000000..3638649fc --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/storage-drivers/menu.md @@ -0,0 +1,13 @@ + + diff --git a/vendor/github.com/docker/distribution/docs/storage-drivers/oss.md b/vendor/github.com/docker/distribution/docs/storage-drivers/oss.md new file mode 100644 index 000000000..a85e315e2 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/storage-drivers/oss.md @@ -0,0 +1,126 @@ + + +# Aliyun OSS storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses [Aliyun OSS](http://www.aliyun.com/product/oss) for object storage. + +## Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ accesskeyid + +yes + +Your access key ID. +
+ accesskeysecret + +yes + +Your access key secret. +
+ region + +yes + The name of the OSS region in which you would like to store objects (for example `oss-cn-beijing`). For a list of regions, you can look at +
+ endpoint + +no + +An endpoint which defaults to `..aliyuncs.com` or `.-internal.aliyuncs.com` (when `internal=true`). You can change the default endpoint by changing this value. +
+ internal + +no + An internal endpoint or the public endpoint for OSS access. The default is false. For a list of regions, you can look at +
+ bucket + +yes + The name of your OSS bucket where you wish to store objects (needs to already be created prior to driver initialization). +
+ encrypt + +no + Specifies whether you would like your data encrypted on the server side. Defaults to false if not specified. +
+ secure + +no + Specifies whether to transfer data to the bucket over ssl or not. If you omit this value, `true` is used. +
+ chunksize + +no + The default part size for multipart uploads (performed by WriteStream) to OSS. The default is 10 MB. Keep in mind that the minimum part size for OSS is 5MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to OSS. +
+ rootdirectory + +no + The root directory tree in which to store all registry files. Defaults to an empty string (bucket root). +
diff --git a/vendor/github.com/docker/distribution/docs/storage-drivers/s3.md b/vendor/github.com/docker/distribution/docs/storage-drivers/s3.md new file mode 100644 index 000000000..30187db47 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/storage-drivers/s3.md @@ -0,0 +1,320 @@ + + + +# S3 storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses Amazon S3 or S3 compatible services for object storage. + +## Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ accesskey + + yes + + Your AWS Access Key. +
+ secretkey + + yes + + Your AWS Secret Key. +
+ region + + yes + + The AWS region in which your bucket exists. For the moment, the Go AWS + library in use does not use the newer DNS based bucket routing. +
+ regionendpoint + + no + + Endpoint for S3 compatible storage services (Minio, etc) +
+ bucket + + yes + + The bucket name in which you want to store the registry's data. +
+ encrypt + + no + + Specifies whether the registry stores the image in encrypted format or + not. A boolean value. The default is false. +
+ keyid + + no + + Optional KMS key ID to use for encryption (encrypt must be true, or this + parameter will be ignored). The default is none. +
+ secure + + no + + Indicates whether to use HTTPS instead of HTTP. A boolean value. The + default is true. +
+ v4auth + + no + + Indicates whether the registry uses Version 4 of AWS's authentication. + Generally, you should set this to true unless you are using an + S3 compatible provider that does not support v4 signature signing. + If you set this to false then the storage driver will use v2 signature signing. + By default, this is true. + You can not use v2 signing if you are using AWS S3. +
+ chunksize + + no + + The S3 API requires multipart upload chunks to be at least 5MB. This value + should be a number that is larger than 5*1024*1024. +
+ multipartcopychunksize + + no + + Chunk size for all but the last Upload Part - Copy + operation of a copy that uses the multipart upload API. +
+ multipartcopymaxconcurrency + + no + + Maximum number of concurrent Upload Part - Copy operations for a + copy that uses the multipart upload API. +
+ multipartcopythresholdsize + + no + + Objects above this size will be copied using the multipart upload API. + PUT Object - Copy is used for objects at or below this size. +
+ rootdirectory + + no + + This is a prefix that will be applied to all S3 keys to allow you to segment data in your bucket if necessary. +
+ storageclass + + no + + The S3 storage class applied to each registry file. The default value is STANDARD. +
+ objectacl + + no + + The S3 Canned ACL for objects. The default value is "private". +
+ + +`accesskey`: Your aws access key. + +`secretkey`: Your aws secret key. + +**Note** You can provide empty strings for your access and secret keys if you plan on running the driver on an ec2 instance and will handle authentication with the instance's credentials. + +`region`: The name of the aws region in which you would like to store objects (for example `us-east-1`). For a list of regions, you can look at http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html + +`regionendpoint`: (optional) Endpoint URL for S3 compatible APIs. This should not be provided when using Amazon S3. + +`bucket`: The name of your S3 bucket where you wish to store objects. The bucket must exist prior to the driver initialization. + +`encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified). + +`keyid`: (optional) Whether you would like your data encrypted with this KMS key ID (defaults to none if not specified, will be ignored if encrypt is not true). + +`secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to true (meaning transferring over ssl) if not specified. Note that while setting this to false will improve performance, it is not recommended due to security concerns. + +`v4auth`: (optional) Whether you would like to use aws signature version 4 with your requests. This defaults to true if not specified (note that the eu-central-1 region does not work with version 2 signatures, so the driver will error out if initialized with this region and v4auth set to false) + +`chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to S3. The default is 10 MB. Keep in mind that the minimum part size for S3 is 5MB. Depending on the speed of your connection to S3, a larger chunk size may result in better performance; faster connections will benefit from larger chunk sizes. + +`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). + +`storageclass`: (optional) The storage class applied to each registry file. Defaults to STANDARD. Valid options are NONE, STANDARD and REDUCED_REDUNDANCY. Use NONE if your S3 compatible provider does not support storage classes. + +`objectacl`: (optional) The canned object ACL to be applied to each registry object. Defaults to `private`. If you are using a bucket owned by another AWS account, it is recommended that you set this to `bucket-owner-full-control` so that the bucket owner can access your objects. Other valid options are available in the [AWS S3 documentation](http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl). + +## S3 permission scopes + +The following IAM permissions are required by the registry for push and pull. See [the S3 policy documentation](http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) for more details. + +``` + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket", + "s3:GetBucketLocation", + "s3:ListBucketMultipartUploads" + ], + "Resource": "arn:aws:s3:::mybucket" + }, + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:DeleteObject", + "s3:ListMultipartUploadParts", + "s3:AbortMultipartUpload" + ], + "Resource": "arn:aws:s3:::mybucket/*" + } +] +``` + +# CloudFront as Middleware with S3 backend + +## Use Case + +Adding CloudFront as a middleware for your S3 backed registry can dramatically improve pull times. Your registry will have the ability to retrieve your images from edge servers, rather than the geographically limited location of your S3 bucket. The farther your registry is from your bucket, the more improvements you will see. See [Amazon CloudFront](https://aws.amazon.com/cloudfront/details/). + +## Configuring CloudFront for Distribution + +If you are unfamiliar with creating a CloudFront distribution, see [Getting Started with Cloudfront](http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/GettingStarted.html). + +Defaults can be kept in most areas except: + +### Origin: + +The CloudFront distribution must be created such that the `Origin Path` is set to the directory level of the root "docker" key in S3. If your registry exists on the root of the bucket, this path should be left blank. + +### Behaviors: + + - Viewer Protocol Policy: HTTPS Only + - Allowed HTTP Methods: GET, HEAD, OPTIONS, PUT, POST, PATCH, DELETE + - Cached HTTP Methods: OPTIONS (checked) + - Restrict Viewer Access (Use Signed URLs or Signed Cookies): Yes + - Trusted Signers: Self (Can add other accounts as long as you have access to CloudFront Key Pairs for those additional accounts) + +## Registry configuration + +Here the `middleware` option is used. It is still important to keep the `storage` option as CloudFront will only handle `pull` actions; `push` actions are still directly written to S3. + +The following example shows what you will need at minimum: +``` +... +storage: + s3: + region: us-east-1 + bucket: docker.myregistry.com +middleware: + storage: + - name: cloudfront + options: + baseurl: https://abcdefghijklmn.cloudfront.net/ + privatekey: /etc/docker/cloudfront/pk-ABCEDFGHIJKLMNOPQRST.pem + keypairid: ABCEDFGHIJKLMNOPQRST +... +``` + +## CloudFront Key-Pair + +A CloudFront key-pair is required for all AWS accounts needing access to your CloudFront distribution. For information, please see [Creating CloudFront Key Pairs](http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-trusted-signers.html#private-content-creating-cloudfront-key-pairs). diff --git a/vendor/github.com/docker/distribution/docs/storage-drivers/swift.md b/vendor/github.com/docker/distribution/docs/storage-drivers/swift.md new file mode 100644 index 000000000..0bbf98c57 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/storage-drivers/swift.md @@ -0,0 +1,268 @@ + + + +# OpenStack Swift storage driver + +An implementation of the `storagedriver.StorageDriver` interface that uses [OpenStack Swift](http://docs.openstack.org/developer/swift/) for object storage. + +## Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ authurl + + yes + + URL for obtaining an auth token. https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth +
+ username + + yes + + Your Openstack user name. +
+ password + + yes + + Your Openstack password. +
+ region + + no + + The Openstack region in which your container exists. +
+ container + + yes + + The name of your Swift container where you wish to store the registry's data. The driver creates the named container during its initialization. +
+ tenant + + no + + Your Openstack tenant name. You can either use tenant or tenantid. +
+ tenantid + + no + + Your Openstack tenant id. You can either use tenant or tenantid. +
+ domain + + no + + Your user's Openstack domain name for Identity v3 API. You can either use domain or domainid. +
+ domainid + + no + + Your user's Openstack domain id for Identity v3 API. You can either use domain or domainid. +
+ tenantdomain + + no + + Your tenant's Openstack domain name for Identity v3 API. Only necessary if different from the domain. You can either use tenantdomain or tenantdomainid. +
+ tenantdomainid + + no + + Your tenant's Openstack domain id for Identity v3 API. Only necessary if different from the domain. You can either use tenantdomain or tenantdomainid. +
+ trustid + + no + + Your Openstack trust id for Identity v3 API. +
+ insecureskipverify + + no + + true to skip TLS verification, false by default. +
+ chunksize + + no + + Size of the data segments for the Swift Dynamic Large Objects. This value should be a number (defaults to 5M). +
+ prefix + + no + + This is a prefix that will be applied to all Swift keys to allow you to segment data in your container if necessary. Defaults to the empty string which is the container's root. +
+ secretkey + + no + + The secret key used to generate temporary URLs. +
+ accesskey + + no + + The access key to generate temporary URLs. It is used by HP Cloud Object Storage in addition to the `secretkey` parameter. +
+ authversion + + no + + Specify the OpenStack Auth's version,for example 3. By default the driver will autodetect the auth's version from the AuthURL. +
+ endpointtype + + no + + The endpoint type used when connecting to swift. Possible values are `public`, `internal` and `admin`. Default is `public`. +
+ +The features supported by the Swift server are queried by requesting the `/info` URL on the server. In case the administrator +disabled that feature, the configuration file can specify the following optional parameters : + + + + + + + + + + +
+ tempurlcontainerkey + +

+ Specify whether to use container secret key to generate temporary URL when set to true, or the account secret key otherwise.

+

+
+ tempurlmethods + +

+ Array of HTTP methods that are supported by the TempURL middleware of the Swift server. Example:

+ + - tempurlmethods: + - GET + - PUT + - HEAD + - POST + - DELETE + +

+
diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go index 020d33258..c20f28113 100644 --- a/vendor/github.com/docker/distribution/errors.go +++ b/vendor/github.com/docker/distribution/errors.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" - "github.com/opencontainers/go-digest" + "github.com/docker/distribution/digest" ) // ErrAccessDenied is returned when an access to a requested resource is @@ -77,7 +77,7 @@ func (err ErrManifestUnknownRevision) Error() string { type ErrManifestUnverified struct{} func (ErrManifestUnverified) Error() string { - return "unverified manifest" + return fmt.Sprintf("unverified manifest") } // ErrManifestVerification provides a type to collect errors encountered diff --git a/vendor/github.com/docker/distribution/health/checks/checks.go b/vendor/github.com/docker/distribution/health/checks/checks.go index 7760f6105..e3c3b08d3 100644 --- a/vendor/github.com/docker/distribution/health/checks/checks.go +++ b/vendor/github.com/docker/distribution/health/checks/checks.go @@ -2,11 +2,9 @@ package checks import ( "errors" - "fmt" "net" "net/http" "os" - "path/filepath" "strconv" "time" @@ -17,19 +15,10 @@ import ( // if the file exists. func FileChecker(f string) health.Checker { return health.CheckFunc(func() error { - absoluteFilePath, err := filepath.Abs(f) - if err != nil { - return fmt.Errorf("failed to get absolute path for %q: %v", f, err) - } - - _, err = os.Stat(absoluteFilePath) - if err == nil { + if _, err := os.Stat(f); err == nil { return errors.New("file exists") - } else if os.IsNotExist(err) { - return nil } - - return err + return nil }) } diff --git a/vendor/github.com/docker/distribution/health/doc.go b/vendor/github.com/docker/distribution/health/doc.go index 877f4daca..8c106b42b 100644 --- a/vendor/github.com/docker/distribution/health/doc.go +++ b/vendor/github.com/docker/distribution/health/doc.go @@ -24,7 +24,7 @@ // "manual" checks that allow the service to quickly be brought in/out of // rotation. // -// import _ "github.com/docker/distribution/health/api" +// import _ "github.com/docker/distribution/registry/health/api" // // # curl localhost:5001/debug/health // {} @@ -122,12 +122,6 @@ // # curl localhost:5001/debug/health // {"fileChecker":"file exists"} // -// FileChecker only accepts absolute or relative file path. It does not work -// properly with tilde(~). You should make sure that the application has -// proper permission(read and execute permission for directory along with -// the specified file path). Otherwise, the FileChecker will report error -// and file health check is not ok. -// // You could also test the connectivity to a downstream service by using a // "HTTPChecker", but ensure that you only mark the test unhealthy if there // are a minimum of two failures in a row: diff --git a/vendor/github.com/docker/distribution/health/health_test.go b/vendor/github.com/docker/distribution/health/health_test.go index 8d1a028b7..766fe159f 100644 --- a/vendor/github.com/docker/distribution/health/health_test.go +++ b/vendor/github.com/docker/distribution/health/health_test.go @@ -25,8 +25,8 @@ func TestReturns200IfThereAreNoChecks(t *testing.T) { } } -// TestReturns503IfThereAreErrorChecks ensures that the result code of the -// health endpoint is 503 if there are health checks with errors. +// TestReturns500IfThereAreErrorChecks ensures that the result code of the +// health endpoint is 500 if there are health checks with errors func TestReturns503IfThereAreErrorChecks(t *testing.T) { recorder := httptest.NewRecorder() diff --git a/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go index 3aa0662d9..a2082ec02 100644 --- a/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go +++ b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go @@ -6,8 +6,8 @@ import ( "fmt" "github.com/docker/distribution" + "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" - "github.com/opencontainers/go-digest" ) // MediaTypeManifestList specifies the mediaType for manifest lists. @@ -81,7 +81,7 @@ type ManifestList struct { Manifests []ManifestDescriptor `json:"manifests"` } -// References returns the distribution descriptors for the referenced image +// References returnes the distribution descriptors for the referenced image // manifests. func (m ManifestList) References() []distribution.Descriptor { dependencies := make([]distribution.Descriptor, len(m.Manifests)) diff --git a/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go b/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go index 9d222566c..5cdd76796 100644 --- a/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go +++ b/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go @@ -9,10 +9,11 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" - "github.com/docker/distribution/manifest" "github.com/docker/distribution/reference" "github.com/docker/libtrust" - "github.com/opencontainers/go-digest" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" ) type diffID digest.Digest @@ -94,7 +95,7 @@ func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Mani } if len(img.RootFS.DiffIDs) != len(mb.descriptors) { - return nil, fmt.Errorf("number of descriptors and number of layers in rootfs must match: len(%v) != len(%v)", img.RootFS.DiffIDs, mb.descriptors) + return nil, errors.New("number of descriptors and number of layers in rootfs must match") } // Generate IDs for each layer @@ -240,13 +241,8 @@ func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, e // AppendReference adds a reference to the current ManifestBuilder func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error { - descriptor := d.Descriptor() - - if err := descriptor.Digest.Validate(); err != nil { - return err - } - - mb.descriptors = append(mb.descriptors, descriptor) + // todo: verification here? + mb.descriptors = append(mb.descriptors, d.Descriptor()) return nil } diff --git a/vendor/github.com/docker/distribution/manifest/schema1/config_builder_test.go b/vendor/github.com/docker/distribution/manifest/schema1/config_builder_test.go index 399d8f315..5f9abaa9f 100644 --- a/vendor/github.com/docker/distribution/manifest/schema1/config_builder_test.go +++ b/vendor/github.com/docker/distribution/manifest/schema1/config_builder_test.go @@ -9,9 +9,9 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/libtrust" - "github.com/opencontainers/go-digest" ) type mockBlobService struct { @@ -197,14 +197,10 @@ func TestConfigBuilder(t *testing.T) { bs := &mockBlobService{descriptors: make(map[digest.Digest]distribution.Descriptor)} - ref, err := reference.WithName("testrepo") + ref, err := reference.ParseNamed("testrepo:testtag") if err != nil { t.Fatalf("could not parse reference: %v", err) } - ref, err = reference.WithTag(ref, "testtag") - if err != nil { - t.Fatalf("could not add tag: %v", err) - } builder := NewConfigManifestBuilder(bs, pk, ref, []byte(imgJSON)) diff --git a/vendor/github.com/docker/distribution/manifest/schema1/manifest.go b/vendor/github.com/docker/distribution/manifest/schema1/manifest.go index 65042a75f..bff47bde0 100644 --- a/vendor/github.com/docker/distribution/manifest/schema1/manifest.go +++ b/vendor/github.com/docker/distribution/manifest/schema1/manifest.go @@ -5,9 +5,9 @@ import ( "fmt" "github.com/docker/distribution" + "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/libtrust" - "github.com/opencontainers/go-digest" ) const ( diff --git a/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go b/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go index ae4014781..fc1045f9e 100644 --- a/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go +++ b/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go @@ -6,10 +6,10 @@ import ( "errors" "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/reference" "github.com/docker/libtrust" - "github.com/opencontainers/go-digest" ) // referenceManifestBuilder is a type for constructing manifests from schema1 diff --git a/vendor/github.com/docker/distribution/manifest/schema1/reference_builder_test.go b/vendor/github.com/docker/distribution/manifest/schema1/reference_builder_test.go index 9eaa666c9..35db28e46 100644 --- a/vendor/github.com/docker/distribution/manifest/schema1/reference_builder_test.go +++ b/vendor/github.com/docker/distribution/manifest/schema1/reference_builder_test.go @@ -4,10 +4,10 @@ import ( "testing" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/reference" "github.com/docker/libtrust" - "github.com/opencontainers/go-digest" ) func makeSignedManifest(t *testing.T, pk libtrust.PrivateKey, refs []Reference) *SignedManifest { @@ -55,7 +55,7 @@ func TestReferenceBuilder(t *testing.T) { handCrafted := makeSignedManifest(t, pk, []Reference{r1, r2}) - ref, err := reference.WithName(handCrafted.Manifest.Name) + ref, err := reference.ParseNamed(handCrafted.Manifest.Name) if err != nil { t.Fatalf("could not parse reference: %v", err) } diff --git a/vendor/github.com/docker/distribution/manifest/schema2/builder.go b/vendor/github.com/docker/distribution/manifest/schema2/builder.go index 4b6ba5628..ec0bf858d 100644 --- a/vendor/github.com/docker/distribution/manifest/schema2/builder.go +++ b/vendor/github.com/docker/distribution/manifest/schema2/builder.go @@ -3,7 +3,7 @@ package schema2 import ( "github.com/docker/distribution" "github.com/docker/distribution/context" - "github.com/opencontainers/go-digest" + "github.com/docker/distribution/digest" ) // builder is a type for constructing manifests. @@ -11,25 +11,21 @@ type builder struct { // bs is a BlobService used to publish the configuration blob. bs distribution.BlobService - // configMediaType is media type used to describe configuration - configMediaType string - // configJSON references configJSON []byte - // dependencies is a list of descriptors that gets built by successive - // calls to AppendReference. In case of image configuration these are layers. - dependencies []distribution.Descriptor + // layers is a list of layer descriptors that gets built by successive + // calls to AppendReference. + layers []distribution.Descriptor } // NewManifestBuilder is used to build new manifests for the current schema // version. It takes a BlobService so it can publish the configuration blob // as part of the Build process. -func NewManifestBuilder(bs distribution.BlobService, configMediaType string, configJSON []byte) distribution.ManifestBuilder { +func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribution.ManifestBuilder { mb := &builder{ - bs: bs, - configMediaType: configMediaType, - configJSON: make([]byte, len(configJSON)), + bs: bs, + configJSON: make([]byte, len(configJSON)), } copy(mb.configJSON, configJSON) @@ -40,9 +36,9 @@ func NewManifestBuilder(bs distribution.BlobService, configMediaType string, con func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { m := Manifest{ Versioned: SchemaVersion, - Layers: make([]distribution.Descriptor, len(mb.dependencies)), + Layers: make([]distribution.Descriptor, len(mb.layers)), } - copy(m.Layers, mb.dependencies) + copy(m.Layers, mb.layers) configDigest := digest.FromBytes(mb.configJSON) @@ -52,7 +48,7 @@ func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { case nil: // Override MediaType, since Put always replaces the specified media // type with application/octet-stream in the descriptor it returns. - m.Config.MediaType = mb.configMediaType + m.Config.MediaType = MediaTypeConfig return FromStruct(m) case distribution.ErrBlobUnknown: // nop @@ -61,10 +57,10 @@ func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { } // Add config to the blob store - m.Config, err = mb.bs.Put(ctx, mb.configMediaType, mb.configJSON) + m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON) // Override MediaType, since Put always replaces the specified media // type with application/octet-stream in the descriptor it returns. - m.Config.MediaType = mb.configMediaType + m.Config.MediaType = MediaTypeConfig if err != nil { return nil, err } @@ -74,11 +70,11 @@ func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { // AppendReference adds a reference to the current ManifestBuilder. func (mb *builder) AppendReference(d distribution.Describable) error { - mb.dependencies = append(mb.dependencies, d.Descriptor()) + mb.layers = append(mb.layers, d.Descriptor()) return nil } // References returns the current references added to this builder. func (mb *builder) References() []distribution.Descriptor { - return mb.dependencies + return mb.layers } diff --git a/vendor/github.com/docker/distribution/manifest/schema2/builder_test.go b/vendor/github.com/docker/distribution/manifest/schema2/builder_test.go index 697c1bc92..02ed401bf 100644 --- a/vendor/github.com/docker/distribution/manifest/schema2/builder_test.go +++ b/vendor/github.com/docker/distribution/manifest/schema2/builder_test.go @@ -6,7 +6,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" - "github.com/opencontainers/go-digest" + "github.com/docker/distribution/digest" ) type mockBlobService struct { @@ -166,7 +166,7 @@ func TestBuilder(t *testing.T) { } bs := &mockBlobService{descriptors: make(map[digest.Digest]distribution.Descriptor)} - builder := NewManifestBuilder(bs, MediaTypeImageConfig, imgJSON) + builder := NewManifestBuilder(bs, imgJSON) for _, d := range descriptors { if err := builder.AppendReference(d); err != nil { @@ -195,7 +195,7 @@ func TestBuilder(t *testing.T) { if target.Digest != configDigest { t.Fatalf("unexpected digest in target: %s", target.Digest.String()) } - if target.MediaType != MediaTypeImageConfig { + if target.MediaType != MediaTypeConfig { t.Fatalf("unexpected media type in target: %s", target.MediaType) } if target.Size != 3153 { @@ -203,8 +203,8 @@ func TestBuilder(t *testing.T) { } references := manifest.References() - expected := append([]distribution.Descriptor{manifest.Target()}, descriptors...) - if !reflect.DeepEqual(references, expected) { + + if !reflect.DeepEqual(references, descriptors) { t.Fatal("References() does not match the descriptors added") } } diff --git a/vendor/github.com/docker/distribution/manifest/schema2/manifest.go b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go index a2708c750..dd2ed114c 100644 --- a/vendor/github.com/docker/distribution/manifest/schema2/manifest.go +++ b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go @@ -6,19 +6,19 @@ import ( "fmt" "github.com/docker/distribution" + "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" - "github.com/opencontainers/go-digest" ) const ( // MediaTypeManifest specifies the mediaType for the current version. MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" - // MediaTypeImageConfig specifies the mediaType for the image configuration. - MediaTypeImageConfig = "application/vnd.docker.container.image.v1+json" + // MediaTypeConfig specifies the mediaType for the image configuration. + MediaTypeConfig = "application/vnd.docker.container.image.v1+json" // MediaTypePluginConfig specifies the mediaType for plugin configuration. - MediaTypePluginConfig = "application/vnd.docker.plugin.v1+json" + MediaTypePluginConfig = "application/vnd.docker.plugin.image.v0+json" // MediaTypeLayer is the mediaType used for layers referenced by the // manifest. @@ -27,10 +27,6 @@ const ( // MediaTypeForeignLayer is the mediaType used for layers that must be // downloaded from foreign URLs. MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" - - // MediaTypeUncompressedLayer is the mediaType used for layers which - // are not compressed. - MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar" ) var ( @@ -73,10 +69,7 @@ type Manifest struct { // References returnes the descriptors of this manifests references. func (m Manifest) References() []distribution.Descriptor { - references := make([]distribution.Descriptor, 0, 1+len(m.Layers)) - references = append(references, m.Config) - references = append(references, m.Layers...) - return references + return m.Layers } // Target returns the target of this signed manifest. diff --git a/vendor/github.com/docker/distribution/manifest/schema2/manifest_test.go b/vendor/github.com/docker/distribution/manifest/schema2/manifest_test.go index 86226606f..459d614cd 100644 --- a/vendor/github.com/docker/distribution/manifest/schema2/manifest_test.go +++ b/vendor/github.com/docker/distribution/manifest/schema2/manifest_test.go @@ -32,7 +32,7 @@ func TestManifest(t *testing.T) { Config: distribution.Descriptor{ Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", Size: 985, - MediaType: MediaTypeImageConfig, + MediaType: MediaTypeConfig, }, Layers: []distribution.Descriptor{ { @@ -82,7 +82,7 @@ func TestManifest(t *testing.T) { if target.Digest != "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b" { t.Fatalf("unexpected digest in target: %s", target.Digest.String()) } - if target.MediaType != MediaTypeImageConfig { + if target.MediaType != MediaTypeConfig { t.Fatalf("unexpected media type in target: %s", target.MediaType) } if target.Size != 985 { @@ -90,22 +90,16 @@ func TestManifest(t *testing.T) { } references := deserialized.References() - if len(references) != 2 { + if len(references) != 1 { t.Fatalf("unexpected number of references: %d", len(references)) } - - if !reflect.DeepEqual(references[0], target) { - t.Fatalf("first reference should be target: %v != %v", references[0], target) - } - - // Test the second reference - if references[1].Digest != "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b" { + if references[0].Digest != "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b" { t.Fatalf("unexpected digest in reference: %s", references[0].Digest.String()) } - if references[1].MediaType != MediaTypeLayer { + if references[0].MediaType != MediaTypeLayer { t.Fatalf("unexpected media type in reference: %s", references[0].MediaType) } - if references[1].Size != 153263 { + if references[0].Size != 153263 { t.Fatalf("unexpected size in reference: %d", references[0].Size) } } diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go index 2c99f25d3..2ac7c8f21 100644 --- a/vendor/github.com/docker/distribution/manifests.go +++ b/vendor/github.com/docker/distribution/manifests.go @@ -5,25 +5,20 @@ import ( "mime" "github.com/docker/distribution/context" - "github.com/opencontainers/go-digest" + "github.com/docker/distribution/digest" ) // Manifest represents a registry object specifying a set of // references and an optional target type Manifest interface { // References returns a list of objects which make up this manifest. - // A reference is anything which can be represented by a - // distribution.Descriptor. These can consist of layers, resources or other - // manifests. - // - // While no particular order is required, implementations should return - // them from highest to lowest priority. For example, one might want to - // return the base layer before the top layer. + // The references are strictly ordered from base to head. A reference + // is anything which can be represented by a distribution.Descriptor References() []Descriptor // Payload provides the serialized format of the manifest, in addition to - // the media type. - Payload() (mediaType string, payload []byte, err error) + // the mediatype. + Payload() (mediatype string, payload []byte, err error) } // ManifestBuilder creates a manifest allowing one to include dependencies. @@ -41,9 +36,6 @@ type ManifestBuilder interface { // AppendReference includes the given object in the manifest after any // existing dependencies. If the add fails, such as when adding an // unsupported dependency, an error may be returned. - // - // The destination of the reference is dependent on the manifest type and - // the dependency type. AppendReference(dependency Describable) error } @@ -94,20 +86,20 @@ var mappings = make(map[string]UnmarshalFunc, 0) func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { // Need to look up by the actual media type, not the raw contents of // the header. Strip semicolons and anything following them. - var mediaType string + var mediatype string if ctHeader != "" { var err error - mediaType, _, err = mime.ParseMediaType(ctHeader) + mediatype, _, err = mime.ParseMediaType(ctHeader) if err != nil { return nil, Descriptor{}, err } } - unmarshalFunc, ok := mappings[mediaType] + unmarshalFunc, ok := mappings[mediatype] if !ok { unmarshalFunc, ok = mappings[""] if !ok { - return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType) + return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype and no default available: %s", mediatype) } } @@ -116,10 +108,10 @@ func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) // RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This // should be called from specific -func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error { - if _, ok := mappings[mediaType]; ok { - return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType) +func RegisterManifestSchema(mediatype string, u UnmarshalFunc) error { + if _, ok := mappings[mediatype]; ok { + return fmt.Errorf("manifest mediatype registration would overwrite existing: %s", mediatype) } - mappings[mediaType] = u + mappings[mediatype] = u return nil } diff --git a/vendor/github.com/docker/distribution/notifications/bridge.go b/vendor/github.com/docker/distribution/notifications/bridge.go index 8f6386d3c..502288a40 100644 --- a/vendor/github.com/docker/distribution/notifications/bridge.go +++ b/vendor/github.com/docker/distribution/notifications/bridge.go @@ -6,9 +6,9 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/uuid" - "github.com/opencontainers/go-digest" ) type bridge struct { diff --git a/vendor/github.com/docker/distribution/notifications/bridge_test.go b/vendor/github.com/docker/distribution/notifications/bridge_test.go index 863509936..0f85791c8 100644 --- a/vendor/github.com/docker/distribution/notifications/bridge_test.go +++ b/vendor/github.com/docker/distribution/notifications/bridge_test.go @@ -4,12 +4,12 @@ import ( "testing" "github.com/docker/distribution" + "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/uuid" "github.com/docker/libtrust" - "github.com/opencontainers/go-digest" ) var ( @@ -43,7 +43,7 @@ func TestEventBridgeManifestPulled(t *testing.T) { return nil })) - repoRef, _ := reference.WithName(repo) + repoRef, _ := reference.ParseNamed(repo) if err := l.ManifestPulled(repoRef, sm); err != nil { t.Fatalf("unexpected error notifying manifest pull: %v", err) } @@ -56,7 +56,7 @@ func TestEventBridgeManifestPushed(t *testing.T) { return nil })) - repoRef, _ := reference.WithName(repo) + repoRef, _ := reference.ParseNamed(repo) if err := l.ManifestPushed(repoRef, sm); err != nil { t.Fatalf("unexpected error notifying manifest pull: %v", err) } @@ -72,7 +72,7 @@ func TestEventBridgeManifestPushedWithTag(t *testing.T) { return nil })) - repoRef, _ := reference.WithName(repo) + repoRef, _ := reference.ParseNamed(repo) if err := l.ManifestPushed(repoRef, sm, distribution.WithTag(m.Tag)); err != nil { t.Fatalf("unexpected error notifying manifest pull: %v", err) } @@ -88,7 +88,7 @@ func TestEventBridgeManifestPulledWithTag(t *testing.T) { return nil })) - repoRef, _ := reference.WithName(repo) + repoRef, _ := reference.ParseNamed(repo) if err := l.ManifestPulled(repoRef, sm, distribution.WithTag(m.Tag)); err != nil { t.Fatalf("unexpected error notifying manifest pull: %v", err) } @@ -100,7 +100,7 @@ func TestEventBridgeManifestDeleted(t *testing.T) { return nil })) - repoRef, _ := reference.WithName(repo) + repoRef, _ := reference.ParseNamed(repo) if err := l.ManifestDeleted(repoRef, dgst); err != nil { t.Fatalf("unexpected error notifying manifest pull: %v", err) } @@ -160,7 +160,7 @@ func checkCommonManifest(t *testing.T, action string, events ...Event) { t.Fatalf("unexpected event action: %q != %q", event.Action, action) } - repoRef, _ := reference.WithName(repo) + repoRef, _ := reference.ParseNamed(repo) ref, _ := reference.WithDigest(repoRef, dgst) u, err := ub.BuildManifestURL(ref) if err != nil { diff --git a/vendor/github.com/docker/distribution/notifications/endpoint.go b/vendor/github.com/docker/distribution/notifications/endpoint.go index 44d0f6d7b..29a9e27b5 100644 --- a/vendor/github.com/docker/distribution/notifications/endpoint.go +++ b/vendor/github.com/docker/distribution/notifications/endpoint.go @@ -13,7 +13,7 @@ type EndpointConfig struct { Threshold int Backoff time.Duration IgnoredMediaTypes []string - Transport *http.Transport `json:"-"` + Transport *http.Transport } // defaults set any zero-valued fields to a reasonable default. diff --git a/vendor/github.com/docker/distribution/notifications/event.go b/vendor/github.com/docker/distribution/notifications/event.go index 9651cd1b1..b59a72bed 100644 --- a/vendor/github.com/docker/distribution/notifications/event.go +++ b/vendor/github.com/docker/distribution/notifications/event.go @@ -77,7 +77,7 @@ type Event struct { Request RequestRecord `json:"request,omitempty"` // Actor specifies the agent that initiated the event. For most - // situations, this could be from the authorization context of the request. + // situations, this could be from the authorizaton context of the request. Actor ActorRecord `json:"actor,omitempty"` // Source identifies the registry node that generated the event. Put @@ -87,7 +87,7 @@ type Event struct { } // ActorRecord specifies the agent that initiated the event. For most -// situations, this could be from the authorization context of the request. +// situations, this could be from the authorizaton context of the request. // Data in this record can refer to both the initiating client and the // generating request. type ActorRecord struct { diff --git a/vendor/github.com/docker/distribution/notifications/http_test.go b/vendor/github.com/docker/distribution/notifications/http_test.go index de47f789e..e04693621 100644 --- a/vendor/github.com/docker/distribution/notifications/http_test.go +++ b/vendor/github.com/docker/distribution/notifications/http_test.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "mime" - "net" "net/http" "net/http/httptest" "reflect" @@ -95,21 +94,6 @@ func TestHTTPSink(t *testing.T) { var expectedMetrics EndpointMetrics expectedMetrics.Statuses = make(map[string]int) - closeL, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatalf("unexpected error creating listener: %v", err) - } - defer closeL.Close() - go func() { - for { - c, err := closeL.Accept() - if err != nil { - return - } - c.Close() - } - }() - for _, tc := range []struct { events []Event // events to send url string @@ -137,8 +121,8 @@ func TestHTTPSink(t *testing.T) { failure: true, }, { - // Case where connection is immediately closed - url: closeL.Addr().String(), + // Case where connection never goes through. + url: "http://shoudlntresolve/", failure: true, }, } { diff --git a/vendor/github.com/docker/distribution/notifications/listener.go b/vendor/github.com/docker/distribution/notifications/listener.go index 25b5a800f..c968b98ad 100644 --- a/vendor/github.com/docker/distribution/notifications/listener.go +++ b/vendor/github.com/docker/distribution/notifications/listener.go @@ -5,8 +5,8 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" - "github.com/opencontainers/go-digest" ) // ManifestListener describes a set of methods for listening to events related to manifests. diff --git a/vendor/github.com/docker/distribution/notifications/listener_test.go b/vendor/github.com/docker/distribution/notifications/listener_test.go index a58498078..c7db5944e 100644 --- a/vendor/github.com/docker/distribution/notifications/listener_test.go +++ b/vendor/github.com/docker/distribution/notifications/listener_test.go @@ -7,6 +7,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/reference" @@ -15,7 +16,6 @@ import ( "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" "github.com/docker/libtrust" - "github.com/opencontainers/go-digest" ) func TestListener(t *testing.T) { @@ -33,7 +33,7 @@ func TestListener(t *testing.T) { ops: make(map[string]int), } - repoRef, _ := reference.WithName("foo/bar") + repoRef, _ := reference.ParseNamed("foo/bar") repository, err := registry.Repository(ctx, repoRef) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) diff --git a/vendor/github.com/docker/distribution/notifications/metrics_test.go b/vendor/github.com/docker/distribution/notifications/metrics_test.go deleted file mode 100644 index 03a08e2c8..000000000 --- a/vendor/github.com/docker/distribution/notifications/metrics_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package notifications - -import ( - "encoding/json" - "expvar" - "testing" -) - -func TestMetricsExpvar(t *testing.T) { - endpointsVar := expvar.Get("registry").(*expvar.Map).Get("notifications").(*expvar.Map).Get("endpoints") - - var v interface{} - if err := json.Unmarshal([]byte(endpointsVar.String()), &v); err != nil { - t.Fatalf("unexpected error unmarshaling endpoints: %v", err) - } - if v != nil { - t.Fatalf("expected nil, got %#v", v) - } - - NewEndpoint("x", "y", EndpointConfig{}) - - if err := json.Unmarshal([]byte(endpointsVar.String()), &v); err != nil { - t.Fatalf("unexpected error unmarshaling endpoints: %v", err) - } - if slice, ok := v.([]interface{}); !ok || len(slice) != 1 { - t.Logf("expected one-element []interface{}, got %#v", v) - } -} diff --git a/vendor/github.com/docker/distribution/notifications/sinks.go b/vendor/github.com/docker/distribution/notifications/sinks.go index beb8bad46..549ba97e2 100644 --- a/vendor/github.com/docker/distribution/notifications/sinks.go +++ b/vendor/github.com/docker/distribution/notifications/sinks.go @@ -151,7 +151,7 @@ func (eq *eventQueue) Write(events ...Event) error { return nil } -// Close shuts down the event queue, flushing +// Close shutsdown the event queue, flushing func (eq *eventQueue) Close() error { eq.mu.Lock() defer eq.mu.Unlock() diff --git a/vendor/github.com/docker/distribution/reference/helpers.go b/vendor/github.com/docker/distribution/reference/helpers.go deleted file mode 100644 index 978df7eab..000000000 --- a/vendor/github.com/docker/distribution/reference/helpers.go +++ /dev/null @@ -1,42 +0,0 @@ -package reference - -import "path" - -// IsNameOnly returns true if reference only contains a repo name. -func IsNameOnly(ref Named) bool { - if _, ok := ref.(NamedTagged); ok { - return false - } - if _, ok := ref.(Canonical); ok { - return false - } - return true -} - -// FamiliarName returns the familiar name string -// for the given named, familiarizing if needed. -func FamiliarName(ref Named) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().Name() - } - return ref.Name() -} - -// FamiliarString returns the familiar string representation -// for the given reference, familiarizing if needed. -func FamiliarString(ref Reference) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().String() - } - return ref.String() -} - -// FamiliarMatch reports whether ref matches the specified pattern. -// See https://godoc.org/path#Match for supported patterns. -func FamiliarMatch(pattern string, ref Reference) (bool, error) { - matched, err := path.Match(pattern, FamiliarString(ref)) - if namedRef, isNamed := ref.(Named); isNamed && !matched { - matched, _ = path.Match(pattern, FamiliarName(namedRef)) - } - return matched, err -} diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go deleted file mode 100644 index 2d71fc5e9..000000000 --- a/vendor/github.com/docker/distribution/reference/normalize.go +++ /dev/null @@ -1,170 +0,0 @@ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/docker/distribution/digestset" - "github.com/opencontainers/go-digest" -) - -var ( - legacyDefaultDomain = "index.docker.io" - defaultDomain = "docker.io" - officialRepoName = "library" - defaultTag = "latest" -) - -// normalizedNamed represents a name which has been -// normalized and has a familiar form. A familiar name -// is what is used in Docker UI. An example normalized -// name is "docker.io/library/ubuntu" and corresponding -// familiar name of "ubuntu". -type normalizedNamed interface { - Named - Familiar() Named -} - -// ParseNormalizedNamed parses a string into a named reference -// transforming a familiar name from Docker UI to a fully -// qualified reference. If the value may be an identifier -// use ParseAnyReference. -func ParseNormalizedNamed(s string) (Named, error) { - if ok := anchoredIdentifierRegexp.MatchString(s); ok { - return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) - } - domain, remainder := splitDockerDomain(s) - var remoteName string - if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { - remoteName = remainder[:tagSep] - } else { - remoteName = remainder - } - if strings.ToLower(remoteName) != remoteName { - return nil, errors.New("invalid reference format: repository name must be lowercase") - } - - ref, err := Parse(domain + "/" + remainder) - if err != nil { - return nil, err - } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) - } - return named, nil -} - -// splitDockerDomain splits a repository name to domain and remotename string. -// If no valid domain is found, the default domain is used. Repository name -// needs to be already validated before. -func splitDockerDomain(name string) (domain, remainder string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { - domain, remainder = defaultDomain, name - } else { - domain, remainder = name[:i], name[i+1:] - } - if domain == legacyDefaultDomain { - domain = defaultDomain - } - if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { - remainder = officialRepoName + "/" + remainder - } - return -} - -// familiarizeName returns a shortened version of the name familiar -// to to the Docker UI. Familiar names have the default domain -// "docker.io" and "library/" repository prefix removed. -// For example, "docker.io/library/redis" will have the familiar -// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". -// Returns a familiarized named only reference. -func familiarizeName(named namedRepository) repository { - repo := repository{ - domain: named.Domain(), - path: named.Path(), - } - - if repo.domain == defaultDomain { - repo.domain = "" - // Handle official repositories which have the pattern "library/" - if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { - repo.path = split[1] - } - } - return repo -} - -func (r reference) Familiar() Named { - return reference{ - namedRepository: familiarizeName(r.namedRepository), - tag: r.tag, - digest: r.digest, - } -} - -func (r repository) Familiar() Named { - return familiarizeName(r) -} - -func (t taggedReference) Familiar() Named { - return taggedReference{ - namedRepository: familiarizeName(t.namedRepository), - tag: t.tag, - } -} - -func (c canonicalReference) Familiar() Named { - return canonicalReference{ - namedRepository: familiarizeName(c.namedRepository), - digest: c.digest, - } -} - -// TagNameOnly adds the default tag "latest" to a reference if it only has -// a repo name. -func TagNameOnly(ref Named) Named { - if IsNameOnly(ref) { - namedTagged, err := WithTag(ref, defaultTag) - if err != nil { - // Default tag must be valid, to create a NamedTagged - // type with non-validated input the WithTag function - // should be used instead - panic(err) - } - return namedTagged - } - return ref -} - -// ParseAnyReference parses a reference string as a possible identifier, -// full digest, or familiar name. -func ParseAnyReference(ref string) (Reference, error) { - if ok := anchoredIdentifierRegexp.MatchString(ref); ok { - return digestReference("sha256:" + ref), nil - } - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - - return ParseNormalizedNamed(ref) -} - -// ParseAnyReferenceWithSet parses a reference string as a possible short -// identifier to be matched in a digest set, a full digest, or familiar name. -func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { - if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { - dgst, err := ds.Lookup(ref) - if err == nil { - return digestReference(dgst), nil - } - } else { - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - } - - return ParseNormalizedNamed(ref) -} diff --git a/vendor/github.com/docker/distribution/reference/normalize_test.go b/vendor/github.com/docker/distribution/reference/normalize_test.go deleted file mode 100644 index a881972ac..000000000 --- a/vendor/github.com/docker/distribution/reference/normalize_test.go +++ /dev/null @@ -1,625 +0,0 @@ -package reference - -import ( - "strconv" - "testing" - - "github.com/docker/distribution/digestset" - "github.com/opencontainers/go-digest" -) - -func TestValidateReferenceName(t *testing.T) { - validRepoNames := []string{ - "docker/docker", - "library/debian", - "debian", - "docker.io/docker/docker", - "docker.io/library/debian", - "docker.io/debian", - "index.docker.io/docker/docker", - "index.docker.io/library/debian", - "index.docker.io/debian", - "127.0.0.1:5000/docker/docker", - "127.0.0.1:5000/library/debian", - "127.0.0.1:5000/debian", - "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", - - // This test case was moved from invalid to valid since it is valid input - // when specified with a hostname, it removes the ambiguity from about - // whether the value is an identifier or repository name - "docker.io/1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - } - invalidRepoNames := []string{ - "https://github.com/docker/docker", - "docker/Docker", - "-docker", - "-docker/docker", - "-docker.io/docker/docker", - "docker///docker", - "docker.io/docker/Docker", - "docker.io/docker///docker", - "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - } - - for _, name := range invalidRepoNames { - _, err := ParseNormalizedNamed(name) - if err == nil { - t.Fatalf("Expected invalid repo name for %q", name) - } - } - - for _, name := range validRepoNames { - _, err := ParseNormalizedNamed(name) - if err != nil { - t.Fatalf("Error parsing repo name %s, got: %q", name, err) - } - } -} - -func TestValidateRemoteName(t *testing.T) { - validRepositoryNames := []string{ - // Sanity check. - "docker/docker", - - // Allow 64-character non-hexadecimal names (hexadecimal names are forbidden). - "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", - - // Allow embedded hyphens. - "docker-rules/docker", - - // Allow multiple hyphens as well. - "docker---rules/docker", - - //Username doc and image name docker being tested. - "doc/docker", - - // single character names are now allowed. - "d/docker", - "jess/t", - - // Consecutive underscores. - "dock__er/docker", - } - for _, repositoryName := range validRepositoryNames { - _, err := ParseNormalizedNamed(repositoryName) - if err != nil { - t.Errorf("Repository name should be valid: %v. Error: %v", repositoryName, err) - } - } - - invalidRepositoryNames := []string{ - // Disallow capital letters. - "docker/Docker", - - // Only allow one slash. - "docker///docker", - - // Disallow 64-character hexadecimal. - "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - - // Disallow leading and trailing hyphens in namespace. - "-docker/docker", - "docker-/docker", - "-docker-/docker", - - // Don't allow underscores everywhere (as opposed to hyphens). - "____/____", - - "_docker/_docker", - - // Disallow consecutive periods. - "dock..er/docker", - "dock_.er/docker", - "dock-.er/docker", - - // No repository. - "docker/", - - //namespace too long - "this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker", - } - for _, repositoryName := range invalidRepositoryNames { - if _, err := ParseNormalizedNamed(repositoryName); err == nil { - t.Errorf("Repository name should be invalid: %v", repositoryName) - } - } -} - -func TestParseRepositoryInfo(t *testing.T) { - type tcase struct { - RemoteName, FamiliarName, FullName, AmbiguousName, Domain string - } - - tcases := []tcase{ - { - RemoteName: "fooo/bar", - FamiliarName: "fooo/bar", - FullName: "docker.io/fooo/bar", - AmbiguousName: "index.docker.io/fooo/bar", - Domain: "docker.io", - }, - { - RemoteName: "library/ubuntu", - FamiliarName: "ubuntu", - FullName: "docker.io/library/ubuntu", - AmbiguousName: "library/ubuntu", - Domain: "docker.io", - }, - { - RemoteName: "nonlibrary/ubuntu", - FamiliarName: "nonlibrary/ubuntu", - FullName: "docker.io/nonlibrary/ubuntu", - AmbiguousName: "", - Domain: "docker.io", - }, - { - RemoteName: "other/library", - FamiliarName: "other/library", - FullName: "docker.io/other/library", - AmbiguousName: "", - Domain: "docker.io", - }, - { - RemoteName: "private/moonbase", - FamiliarName: "127.0.0.1:8000/private/moonbase", - FullName: "127.0.0.1:8000/private/moonbase", - AmbiguousName: "", - Domain: "127.0.0.1:8000", - }, - { - RemoteName: "privatebase", - FamiliarName: "127.0.0.1:8000/privatebase", - FullName: "127.0.0.1:8000/privatebase", - AmbiguousName: "", - Domain: "127.0.0.1:8000", - }, - { - RemoteName: "private/moonbase", - FamiliarName: "example.com/private/moonbase", - FullName: "example.com/private/moonbase", - AmbiguousName: "", - Domain: "example.com", - }, - { - RemoteName: "privatebase", - FamiliarName: "example.com/privatebase", - FullName: "example.com/privatebase", - AmbiguousName: "", - Domain: "example.com", - }, - { - RemoteName: "private/moonbase", - FamiliarName: "example.com:8000/private/moonbase", - FullName: "example.com:8000/private/moonbase", - AmbiguousName: "", - Domain: "example.com:8000", - }, - { - RemoteName: "privatebasee", - FamiliarName: "example.com:8000/privatebasee", - FullName: "example.com:8000/privatebasee", - AmbiguousName: "", - Domain: "example.com:8000", - }, - { - RemoteName: "library/ubuntu-12.04-base", - FamiliarName: "ubuntu-12.04-base", - FullName: "docker.io/library/ubuntu-12.04-base", - AmbiguousName: "index.docker.io/library/ubuntu-12.04-base", - Domain: "docker.io", - }, - { - RemoteName: "library/foo", - FamiliarName: "foo", - FullName: "docker.io/library/foo", - AmbiguousName: "docker.io/foo", - Domain: "docker.io", - }, - { - RemoteName: "library/foo/bar", - FamiliarName: "library/foo/bar", - FullName: "docker.io/library/foo/bar", - AmbiguousName: "", - Domain: "docker.io", - }, - { - RemoteName: "store/foo/bar", - FamiliarName: "store/foo/bar", - FullName: "docker.io/store/foo/bar", - AmbiguousName: "", - Domain: "docker.io", - }, - } - - for _, tcase := range tcases { - refStrings := []string{tcase.FamiliarName, tcase.FullName} - if tcase.AmbiguousName != "" { - refStrings = append(refStrings, tcase.AmbiguousName) - } - - var refs []Named - for _, r := range refStrings { - named, err := ParseNormalizedNamed(r) - if err != nil { - t.Fatal(err) - } - refs = append(refs, named) - } - - for _, r := range refs { - if expected, actual := tcase.FamiliarName, FamiliarName(r); expected != actual { - t.Fatalf("Invalid normalized reference for %q. Expected %q, got %q", r, expected, actual) - } - if expected, actual := tcase.FullName, r.String(); expected != actual { - t.Fatalf("Invalid canonical reference for %q. Expected %q, got %q", r, expected, actual) - } - if expected, actual := tcase.Domain, Domain(r); expected != actual { - t.Fatalf("Invalid domain for %q. Expected %q, got %q", r, expected, actual) - } - if expected, actual := tcase.RemoteName, Path(r); expected != actual { - t.Fatalf("Invalid remoteName for %q. Expected %q, got %q", r, expected, actual) - } - - } - } -} - -func TestParseReferenceWithTagAndDigest(t *testing.T) { - shortRef := "busybox:latest@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa" - ref, err := ParseNormalizedNamed(shortRef) - if err != nil { - t.Fatal(err) - } - if expected, actual := "docker.io/library/"+shortRef, ref.String(); actual != expected { - t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual) - } - - if _, isTagged := ref.(NamedTagged); !isTagged { - t.Fatalf("Reference from %q should support tag", ref) - } - if _, isCanonical := ref.(Canonical); !isCanonical { - t.Fatalf("Reference from %q should support digest", ref) - } - if expected, actual := shortRef, FamiliarString(ref); actual != expected { - t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual) - } -} - -func TestInvalidReferenceComponents(t *testing.T) { - if _, err := ParseNormalizedNamed("-foo"); err == nil { - t.Fatal("Expected WithName to detect invalid name") - } - ref, err := ParseNormalizedNamed("busybox") - if err != nil { - t.Fatal(err) - } - if _, err := WithTag(ref, "-foo"); err == nil { - t.Fatal("Expected WithName to detect invalid tag") - } - if _, err := WithDigest(ref, digest.Digest("foo")); err == nil { - t.Fatal("Expected WithDigest to detect invalid digest") - } -} - -func equalReference(r1, r2 Reference) bool { - switch v1 := r1.(type) { - case digestReference: - if v2, ok := r2.(digestReference); ok { - return v1 == v2 - } - case repository: - if v2, ok := r2.(repository); ok { - return v1 == v2 - } - case taggedReference: - if v2, ok := r2.(taggedReference); ok { - return v1 == v2 - } - case canonicalReference: - if v2, ok := r2.(canonicalReference); ok { - return v1 == v2 - } - case reference: - if v2, ok := r2.(reference); ok { - return v1 == v2 - } - } - return false -} - -func TestParseAnyReference(t *testing.T) { - tcases := []struct { - Reference string - Equivalent string - Expected Reference - Digests []digest.Digest - }{ - { - Reference: "redis", - Equivalent: "docker.io/library/redis", - }, - { - Reference: "redis:latest", - Equivalent: "docker.io/library/redis:latest", - }, - { - Reference: "docker.io/library/redis:latest", - Equivalent: "docker.io/library/redis:latest", - }, - { - Reference: "redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Equivalent: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Equivalent: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "dmcgowan/myapp", - Equivalent: "docker.io/dmcgowan/myapp", - }, - { - Reference: "dmcgowan/myapp:latest", - Equivalent: "docker.io/dmcgowan/myapp:latest", - }, - { - Reference: "docker.io/mcgowan/myapp:latest", - Equivalent: "docker.io/mcgowan/myapp:latest", - }, - { - Reference: "dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Equivalent: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Equivalent: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", - Equivalent: "docker.io/library/dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", - }, - { - Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", - Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Digests: []digest.Digest{ - digest.Digest("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - }, - }, - { - Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", - Equivalent: "docker.io/library/dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", - Digests: []digest.Digest{ - digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - }, - }, - { - Reference: "dbcc1c", - Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Digests: []digest.Digest{ - digest.Digest("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - }, - }, - { - Reference: "dbcc1", - Equivalent: "docker.io/library/dbcc1", - Digests: []digest.Digest{ - digest.Digest("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - }, - }, - { - Reference: "dbcc1c", - Equivalent: "docker.io/library/dbcc1c", - Digests: []digest.Digest{ - digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - }, - }, - } - - for _, tcase := range tcases { - var ref Reference - var err error - if len(tcase.Digests) == 0 { - ref, err = ParseAnyReference(tcase.Reference) - } else { - ds := digestset.NewSet() - for _, dgst := range tcase.Digests { - if err := ds.Add(dgst); err != nil { - t.Fatalf("Error adding digest %s: %v", dgst.String(), err) - } - } - ref, err = ParseAnyReferenceWithSet(tcase.Reference, ds) - } - if err != nil { - t.Fatalf("Error parsing reference %s: %v", tcase.Reference, err) - } - if ref.String() != tcase.Equivalent { - t.Fatalf("Unexpected string: %s, expected %s", ref.String(), tcase.Equivalent) - } - - expected := tcase.Expected - if expected == nil { - expected, err = Parse(tcase.Equivalent) - if err != nil { - t.Fatalf("Error parsing reference %s: %v", tcase.Equivalent, err) - } - } - if !equalReference(ref, expected) { - t.Errorf("Unexpected reference %#v, expected %#v", ref, expected) - } - } -} - -func TestNormalizedSplitHostname(t *testing.T) { - testcases := []struct { - input string - domain string - name string - }{ - { - input: "test.com/foo", - domain: "test.com", - name: "foo", - }, - { - input: "test_com/foo", - domain: "docker.io", - name: "test_com/foo", - }, - { - input: "docker/migrator", - domain: "docker.io", - name: "docker/migrator", - }, - { - input: "test.com:8080/foo", - domain: "test.com:8080", - name: "foo", - }, - { - input: "test-com:8080/foo", - domain: "test-com:8080", - name: "foo", - }, - { - input: "foo", - domain: "docker.io", - name: "library/foo", - }, - { - input: "xn--n3h.com/foo", - domain: "xn--n3h.com", - name: "foo", - }, - { - input: "xn--n3h.com:18080/foo", - domain: "xn--n3h.com:18080", - name: "foo", - }, - { - input: "docker.io/foo", - domain: "docker.io", - name: "library/foo", - }, - { - input: "docker.io/library/foo", - domain: "docker.io", - name: "library/foo", - }, - { - input: "docker.io/library/foo/bar", - domain: "docker.io", - name: "library/foo/bar", - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - named, err := ParseNormalizedNamed(testcase.input) - if err != nil { - failf("error parsing name: %s", err) - } - domain, name := SplitHostname(named) - if domain != testcase.domain { - failf("unexpected domain: got %q, expected %q", domain, testcase.domain) - } - if name != testcase.name { - failf("unexpected name: got %q, expected %q", name, testcase.name) - } - } -} - -func TestMatchError(t *testing.T) { - named, err := ParseAnyReference("foo") - if err != nil { - t.Fatal(err) - } - _, err = FamiliarMatch("[-x]", named) - if err == nil { - t.Fatalf("expected an error, got nothing") - } -} - -func TestMatch(t *testing.T) { - matchCases := []struct { - reference string - pattern string - expected bool - }{ - { - reference: "foo", - pattern: "foo/**/ba[rz]", - expected: false, - }, - { - reference: "foo/any/bat", - pattern: "foo/**/ba[rz]", - expected: false, - }, - { - reference: "foo/a/bar", - pattern: "foo/**/ba[rz]", - expected: true, - }, - { - reference: "foo/b/baz", - pattern: "foo/**/ba[rz]", - expected: true, - }, - { - reference: "foo/c/baz:tag", - pattern: "foo/**/ba[rz]", - expected: true, - }, - { - reference: "foo/c/baz:tag", - pattern: "foo/*/baz:tag", - expected: true, - }, - { - reference: "foo/c/baz:tag", - pattern: "foo/c/baz:tag", - expected: true, - }, - { - reference: "example.com/foo/c/baz:tag", - pattern: "*/foo/c/baz", - expected: true, - }, - { - reference: "example.com/foo/c/baz:tag", - pattern: "example.com/foo/c/baz", - expected: true, - }, - } - for _, c := range matchCases { - named, err := ParseAnyReference(c.reference) - if err != nil { - t.Fatal(err) - } - actual, err := FamiliarMatch(c.pattern, named) - if err != nil { - t.Fatal(err) - } - if actual != c.expected { - t.Fatalf("expected %s match %s to be %v, was %v", c.reference, c.pattern, c.expected, actual) - } - } -} diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go index 2f66cca87..5b3e08ee4 100644 --- a/vendor/github.com/docker/distribution/reference/reference.go +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -4,24 +4,21 @@ // Grammar // // reference := name [ ":" tag ] [ "@" digest ] -// name := [domain '/'] path-component ['/' path-component]* -// domain := domain-component ['.' domain-component]* [':' port-number] -// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// name := [hostname '/'] component ['/' component]* +// hostname := hostcomponent ['.' hostcomponent]* [':' port-number] +// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // port-number := /[0-9]+/ -// path-component := alpha-numeric [separator alpha-numeric]* +// component := alpha-numeric [separator alpha-numeric]* // alpha-numeric := /[a-z0-9]+/ // separator := /[_.]|__|[-]*/ // // tag := /[\w][\w.-]{0,127}/ // // digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ] // digest-algorithm-separator := /[+.-_]/ // digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ // digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value -// -// identifier := /[a-f0-9]{64}/ -// short-identifier := /[a-f0-9]{6,64}/ package reference import ( @@ -29,7 +26,7 @@ import ( "fmt" "strings" - "github.com/opencontainers/go-digest" + "github.com/docker/distribution/digest" ) const ( @@ -55,9 +52,6 @@ var ( // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) - - // ErrNameNotCanonical is returned when a name is not canonical. - ErrNameNotCanonical = errors.New("repository name must be canonical") ) // Reference is an opaque object reference identifier that may include @@ -131,56 +125,23 @@ type Digested interface { } // Canonical reference is an object with a fully unique -// name including a name with domain and digest +// name including a name with hostname and digest type Canonical interface { Named Digest() digest.Digest } -// namedRepository is a reference to a repository with a name. -// A namedRepository has both domain and path components. -type namedRepository interface { - Named - Domain() string - Path() string -} - -// Domain returns the domain part of the Named reference -func Domain(named Named) string { - if r, ok := named.(namedRepository); ok { - return r.Domain() - } - domain, _ := splitDomain(named.Name()) - return domain -} - -// Path returns the name without the domain part of the Named reference -func Path(named Named) (name string) { - if r, ok := named.(namedRepository); ok { - return r.Path() - } - _, path := splitDomain(named.Name()) - return path -} - -func splitDomain(name string) (string, string) { - match := anchoredNameRegexp.FindStringSubmatch(name) - if len(match) != 3 { - return "", name - } - return match[1], match[2] -} - // SplitHostname splits a named reference into a // hostname and name string. If no valid hostname is // found, the hostname is empty and the full value // is returned as name -// DEPRECATED: Use Domain or Path func SplitHostname(named Named) (string, string) { - if r, ok := named.(namedRepository); ok { - return r.Domain(), r.Path() + name := named.Name() + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return "", name } - return splitDomain(named.Name()) + return match[1], match[2] } // Parse parses s and returns a syntactically valid Reference. @@ -202,24 +163,13 @@ func Parse(s string) (Reference, error) { return nil, ErrNameTooLong } - var repo repository - - nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if nameMatch != nil && len(nameMatch) == 3 { - repo.domain = nameMatch[1] - repo.path = nameMatch[2] - } else { - repo.domain = "" - repo.path = matches[1] - } - ref := reference{ - namedRepository: repo, - tag: matches[2], + name: matches[1], + tag: matches[2], } if matches[3] != "" { var err error - ref.digest, err = digest.Parse(matches[3]) + ref.digest, err = digest.ParseDigest(matches[3]) if err != nil { return nil, err } @@ -234,17 +184,18 @@ func Parse(s string) (Reference, error) { } // ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name and be in the canonical -// form, otherwise an error is returned. +// the Named interface. The reference must have a name, otherwise an error is +// returned. // If an error was encountered it is returned, along with a nil Reference. // NOTE: ParseNamed will not handle short digests. func ParseNamed(s string) (Named, error) { - named, err := ParseNormalizedNamed(s) + ref, err := Parse(s) if err != nil { return nil, err } - if named.String() != s { - return nil, ErrNameNotCanonical + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) } return named, nil } @@ -255,15 +206,10 @@ func WithName(name string) (Named, error) { if len(name) > NameTotalLengthMax { return nil, ErrNameTooLong } - - match := anchoredNameRegexp.FindStringSubmatch(name) - if match == nil || len(match) != 3 { + if !anchoredNameRegexp.MatchString(name) { return nil, ErrReferenceInvalidFormat } - return repository{ - domain: match[1], - path: match[2], - }, nil + return repository(name), nil } // WithTag combines the name from "name" and the tag from "tag" to form a @@ -272,23 +218,9 @@ func WithTag(name Named, tag string) (NamedTagged, error) { if !anchoredTagRegexp.MatchString(tag) { return nil, ErrTagInvalidFormat } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if canonical, ok := name.(Canonical); ok { - return reference{ - namedRepository: repo, - tag: tag, - digest: canonical.Digest(), - }, nil - } return taggedReference{ - namedRepository: repo, - tag: tag, + name: name.Name(), + tag: tag, }, nil } @@ -298,37 +230,14 @@ func WithDigest(name Named, digest digest.Digest) (Canonical, error) { if !anchoredDigestRegexp.MatchString(digest.String()) { return nil, ErrDigestInvalidFormat } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if tagged, ok := name.(Tagged); ok { - return reference{ - namedRepository: repo, - tag: tagged.Tag(), - digest: digest, - }, nil - } return canonicalReference{ - namedRepository: repo, - digest: digest, + name: name.Name(), + digest: digest, }, nil } -// TrimNamed removes any tag or digest from the named reference. -func TrimNamed(ref Named) Named { - domain, path := SplitHostname(ref) - return repository{ - domain: domain, - path: path, - } -} - func getBestReferenceType(ref reference) Reference { - if ref.Name() == "" { + if ref.name == "" { // Allow digest only references if ref.digest != "" { return digestReference(ref.digest) @@ -338,16 +247,16 @@ func getBestReferenceType(ref reference) Reference { if ref.tag == "" { if ref.digest != "" { return canonicalReference{ - namedRepository: ref.namedRepository, - digest: ref.digest, + name: ref.name, + digest: ref.digest, } } - return ref.namedRepository + return repository(ref.name) } if ref.digest == "" { return taggedReference{ - namedRepository: ref.namedRepository, - tag: ref.tag, + name: ref.name, + tag: ref.tag, } } @@ -355,13 +264,17 @@ func getBestReferenceType(ref reference) Reference { } type reference struct { - namedRepository + name string tag string digest digest.Digest } func (r reference) String() string { - return r.Name() + ":" + r.tag + "@" + r.digest.String() + return r.name + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Name() string { + return r.name } func (r reference) Tag() string { @@ -372,34 +285,20 @@ func (r reference) Digest() digest.Digest { return r.digest } -type repository struct { - domain string - path string -} +type repository string func (r repository) String() string { - return r.Name() + return string(r) } func (r repository) Name() string { - if r.domain == "" { - return r.path - } - return r.domain + "/" + r.path -} - -func (r repository) Domain() string { - return r.domain -} - -func (r repository) Path() string { - return r.path + return string(r) } type digestReference digest.Digest func (d digestReference) String() string { - return digest.Digest(d).String() + return d.String() } func (d digestReference) Digest() digest.Digest { @@ -407,12 +306,16 @@ func (d digestReference) Digest() digest.Digest { } type taggedReference struct { - namedRepository - tag string + name string + tag string } func (t taggedReference) String() string { - return t.Name() + ":" + t.tag + return t.name + ":" + t.tag +} + +func (t taggedReference) Name() string { + return t.name } func (t taggedReference) Tag() string { @@ -420,12 +323,16 @@ func (t taggedReference) Tag() string { } type canonicalReference struct { - namedRepository + name string digest digest.Digest } func (c canonicalReference) String() string { - return c.Name() + "@" + c.digest.String() + return c.name + "@" + c.digest.String() +} + +func (c canonicalReference) Name() string { + return c.name } func (c canonicalReference) Digest() digest.Digest { diff --git a/vendor/github.com/docker/distribution/reference/reference_test.go b/vendor/github.com/docker/distribution/reference/reference_test.go index 16b871f98..f60cf093e 100644 --- a/vendor/github.com/docker/distribution/reference/reference_test.go +++ b/vendor/github.com/docker/distribution/reference/reference_test.go @@ -1,14 +1,12 @@ package reference import ( - _ "crypto/sha256" - _ "crypto/sha512" "encoding/json" "strconv" "strings" "testing" - "github.com/opencontainers/go-digest" + "github.com/docker/distribution/digest" ) func TestReferenceParse(t *testing.T) { @@ -21,8 +19,8 @@ func TestReferenceParse(t *testing.T) { err error // repository is the string representation for the reference repository string - // domain is the domain expected in the reference - domain string + // hostname is the hostname expected in the reference + hostname string // tag is the tag for the reference tag string // digest is the digest for the reference (enforces digest reference) @@ -44,37 +42,37 @@ func TestReferenceParse(t *testing.T) { }, { input: "test.com/repo:tag", - domain: "test.com", + hostname: "test.com", repository: "test.com/repo", tag: "tag", }, { input: "test:5000/repo", - domain: "test:5000", + hostname: "test:5000", repository: "test:5000/repo", }, { input: "test:5000/repo:tag", - domain: "test:5000", + hostname: "test:5000", repository: "test:5000/repo", tag: "tag", }, { input: "test:5000/repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - domain: "test:5000", + hostname: "test:5000", repository: "test:5000/repo", digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", }, { input: "test:5000/repo:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - domain: "test:5000", + hostname: "test:5000", repository: "test:5000/repo", tag: "tag", digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", }, { input: "test:5000/repo", - domain: "test:5000", + hostname: "test:5000", repository: "test:5000/repo", }, { @@ -122,7 +120,7 @@ func TestReferenceParse(t *testing.T) { }, { input: strings.Repeat("a/", 127) + "a:tag-puts-this-over-max", - domain: "a", + hostname: "a", repository: strings.Repeat("a/", 127) + "a", tag: "tag-puts-this-over-max", }, @@ -132,30 +130,30 @@ func TestReferenceParse(t *testing.T) { }, { input: "sub-dom1.foo.com/bar/baz/quux", - domain: "sub-dom1.foo.com", + hostname: "sub-dom1.foo.com", repository: "sub-dom1.foo.com/bar/baz/quux", }, { input: "sub-dom1.foo.com/bar/baz/quux:some-long-tag", - domain: "sub-dom1.foo.com", + hostname: "sub-dom1.foo.com", repository: "sub-dom1.foo.com/bar/baz/quux", tag: "some-long-tag", }, { input: "b.gcr.io/test.example.com/my-app:test.example.com", - domain: "b.gcr.io", + hostname: "b.gcr.io", repository: "b.gcr.io/test.example.com/my-app", tag: "test.example.com", }, { input: "xn--n3h.com/myimage:xn--n3h.com", // ☃.com in punycode - domain: "xn--n3h.com", + hostname: "xn--n3h.com", repository: "xn--n3h.com/myimage", tag: "xn--n3h.com", }, { input: "xn--7o8h.com/myimage:xn--7o8h.com@sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // 🐳.com in punycode - domain: "xn--7o8h.com", + hostname: "xn--7o8h.com", repository: "xn--7o8h.com/myimage", tag: "xn--7o8h.com", digest: "sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", @@ -167,7 +165,7 @@ func TestReferenceParse(t *testing.T) { }, { input: "foo/foo_bar.com:8080", - domain: "foo", + hostname: "foo", repository: "foo/foo_bar.com", tag: "8080", }, @@ -198,11 +196,11 @@ func TestReferenceParse(t *testing.T) { if named.Name() != testcase.repository { failf("unexpected repository: got %q, expected %q", named.Name(), testcase.repository) } - domain, _ := SplitHostname(named) - if domain != testcase.domain { - failf("unexpected domain: got %q, expected %q", domain, testcase.domain) + hostname, _ := SplitHostname(named) + if hostname != testcase.hostname { + failf("unexpected hostname: got %q, expected %q", hostname, testcase.hostname) } - } else if testcase.repository != "" || testcase.domain != "" { + } else if testcase.repository != "" || testcase.hostname != "" { failf("expected named type, got %T", repo) } @@ -282,39 +280,39 @@ func TestWithNameFailure(t *testing.T) { func TestSplitHostname(t *testing.T) { testcases := []struct { - input string - domain string - name string + input string + hostname string + name string }{ { - input: "test.com/foo", - domain: "test.com", - name: "foo", + input: "test.com/foo", + hostname: "test.com", + name: "foo", }, { - input: "test_com/foo", - domain: "", - name: "test_com/foo", + input: "test_com/foo", + hostname: "", + name: "test_com/foo", }, { - input: "test:8080/foo", - domain: "test:8080", - name: "foo", + input: "test:8080/foo", + hostname: "test:8080", + name: "foo", }, { - input: "test.com:8080/foo", - domain: "test.com:8080", - name: "foo", + input: "test.com:8080/foo", + hostname: "test.com:8080", + name: "foo", }, { - input: "test-com:8080/foo", - domain: "test-com:8080", - name: "foo", + input: "test-com:8080/foo", + hostname: "test-com:8080", + name: "foo", }, { - input: "xn--n3h.com:18080/foo", - domain: "xn--n3h.com:18080", - name: "foo", + input: "xn--n3h.com:18080/foo", + hostname: "xn--n3h.com:18080", + name: "foo", }, } for _, testcase := range testcases { @@ -327,9 +325,9 @@ func TestSplitHostname(t *testing.T) { if err != nil { failf("error parsing name: %s", err) } - domain, name := SplitHostname(named) - if domain != testcase.domain { - failf("unexpected domain: got %q, expected %q", domain, testcase.domain) + hostname, name := SplitHostname(named) + if hostname != testcase.hostname { + failf("unexpected hostname: got %q, expected %q", hostname, testcase.hostname) } if name != testcase.name { failf("unexpected name: got %q, expected %q", name, testcase.name) @@ -469,7 +467,6 @@ func TestSerialization(t *testing.T) { func TestWithTag(t *testing.T) { testcases := []struct { name string - digest digest.Digest tag string combined string }{ @@ -493,12 +490,6 @@ func TestWithTag(t *testing.T) { tag: "TAG5", combined: "test.com:8000/foo:TAG5", }, - { - name: "test.com:8000/foo", - digest: "sha256:1234567890098765432112345667890098765", - tag: "TAG5", - combined: "test.com:8000/foo:TAG5@sha256:1234567890098765432112345667890098765", - }, } for _, testcase := range testcases { failf := func(format string, v ...interface{}) { @@ -510,14 +501,6 @@ func TestWithTag(t *testing.T) { if err != nil { failf("error parsing name: %s", err) } - if testcase.digest != "" { - canonical, err := WithDigest(named, testcase.digest) - if err != nil { - failf("error adding digest") - } - named = canonical - } - tagged, err := WithTag(named, testcase.tag) if err != nil { failf("WithTag failed: %s", err) @@ -532,7 +515,6 @@ func TestWithDigest(t *testing.T) { testcases := []struct { name string digest digest.Digest - tag string combined string }{ { @@ -550,12 +532,6 @@ func TestWithDigest(t *testing.T) { digest: "sha256:1234567890098765432112345667890098765", combined: "test.com:8000/foo@sha256:1234567890098765432112345667890098765", }, - { - name: "test.com:8000/foo", - digest: "sha256:1234567890098765432112345667890098765", - tag: "latest", - combined: "test.com:8000/foo:latest@sha256:1234567890098765432112345667890098765", - }, } for _, testcase := range testcases { failf := func(format string, v ...interface{}) { @@ -567,13 +543,6 @@ func TestWithDigest(t *testing.T) { if err != nil { failf("error parsing name: %s", err) } - if testcase.tag != "" { - tagged, err := WithTag(named, testcase.tag) - if err != nil { - failf("error adding tag") - } - named = tagged - } digested, err := WithDigest(named, testcase.digest) if err != nil { failf("WithDigest failed: %s", err) @@ -583,77 +552,3 @@ func TestWithDigest(t *testing.T) { } } } - -func TestParseNamed(t *testing.T) { - testcases := []struct { - input string - domain string - name string - err error - }{ - { - input: "test.com/foo", - domain: "test.com", - name: "foo", - }, - { - input: "test:8080/foo", - domain: "test:8080", - name: "foo", - }, - { - input: "test_com/foo", - err: ErrNameNotCanonical, - }, - { - input: "test.com", - err: ErrNameNotCanonical, - }, - { - input: "foo", - err: ErrNameNotCanonical, - }, - { - input: "library/foo", - err: ErrNameNotCanonical, - }, - { - input: "docker.io/library/foo", - domain: "docker.io", - name: "library/foo", - }, - // Ambiguous case, parser will add "library/" to foo - { - input: "docker.io/foo", - err: ErrNameNotCanonical, - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - named, err := ParseNamed(testcase.input) - if err != nil && testcase.err == nil { - failf("error parsing name: %s", err) - continue - } else if err == nil && testcase.err != nil { - failf("parsing succeded: expected error %v", testcase.err) - continue - } else if err != testcase.err { - failf("unexpected error %v, expected %v", err, testcase.err) - continue - } else if err != nil { - continue - } - - domain, name := SplitHostname(named) - if domain != testcase.domain { - failf("unexpected domain: got %q, expected %q", domain, testcase.domain) - } - if name != testcase.name { - failf("unexpected name: got %q, expected %q", name, testcase.name) - } - } -} diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go index 786034932..9a7d366bc 100644 --- a/vendor/github.com/docker/distribution/reference/regexp.go +++ b/vendor/github.com/docker/distribution/reference/regexp.go @@ -19,18 +19,18 @@ var ( alphaNumericRegexp, optional(repeated(separatorRegexp, alphaNumericRegexp))) - // domainComponentRegexp restricts the registry domain component of a - // repository name to start with a component as defined by DomainRegexp + // hostnameComponentRegexp restricts the registry hostname component of a + // repository name to start with a component as defined by hostnameRegexp // and followed by an optional port. - domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - // DomainRegexp defines the structure of potential domain components + // hostnameRegexp defines the structure of potential hostname components // that may be part of image names. This is purposely a subset of what is // allowed by DNS to ensure backwards compatibility with Docker image // names. - DomainRegexp = expression( - domainComponentRegexp, - optional(repeated(literal(`.`), domainComponentRegexp)), + hostnameRegexp = expression( + hostnameComponentRegexp, + optional(repeated(literal(`.`), hostnameComponentRegexp)), optional(literal(`:`), match(`[0-9]+`))) // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. @@ -48,17 +48,17 @@ var ( anchoredDigestRegexp = anchored(DigestRegexp) // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the domain and name part omitting + // regexp has capturing groups for the hostname and name part omitting // the separating forward slash from either. NameRegexp = expression( - optional(DomainRegexp, literal(`/`)), + optional(hostnameRegexp, literal(`/`)), nameComponentRegexp, optional(repeated(literal(`/`), nameComponentRegexp))) // anchoredNameRegexp is used to parse a name value, capturing the - // domain and trailing components. + // hostname and trailing components. anchoredNameRegexp = anchored( - optional(capture(DomainRegexp), literal(`/`)), + optional(capture(hostnameRegexp), literal(`/`)), capture(nameComponentRegexp, optional(repeated(literal(`/`), nameComponentRegexp)))) @@ -68,25 +68,6 @@ var ( ReferenceRegexp = anchored(capture(NameRegexp), optional(literal(":"), capture(TagRegexp)), optional(literal("@"), capture(DigestRegexp))) - - // IdentifierRegexp is the format for string identifier used as a - // content addressable identifier using sha256. These identifiers - // are like digests without the algorithm, since sha256 is used. - IdentifierRegexp = match(`([a-f0-9]{64})`) - - // ShortIdentifierRegexp is the format used to represent a prefix - // of an identifier. A prefix may be used to match a sha256 identifier - // within a list of trusted identifiers. - ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) - - // anchoredIdentifierRegexp is used to check or match an - // identifier value, anchored at start and end of string. - anchoredIdentifierRegexp = anchored(IdentifierRegexp) - - // anchoredShortIdentifierRegexp is used to check if a value - // is a possible identifier prefix, anchored at start and end - // of string. - anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) ) // match compiles the string to a regular expression. diff --git a/vendor/github.com/docker/distribution/reference/regexp_test.go b/vendor/github.com/docker/distribution/reference/regexp_test.go index 09bc81927..2ec39377a 100644 --- a/vendor/github.com/docker/distribution/reference/regexp_test.go +++ b/vendor/github.com/docker/distribution/reference/regexp_test.go @@ -33,7 +33,7 @@ func checkRegexp(t *testing.T, r *regexp.Regexp, m regexpMatch) { } } -func TestDomainRegexp(t *testing.T) { +func TestHostRegexp(t *testing.T) { hostcases := []regexpMatch{ { input: "test.com", @@ -116,7 +116,7 @@ func TestDomainRegexp(t *testing.T) { match: true, }, } - r := regexp.MustCompile(`^` + DomainRegexp.String() + `$`) + r := regexp.MustCompile(`^` + hostnameRegexp.String() + `$`) for i := range hostcases { checkRegexp(t, r, hostcases[i]) } @@ -487,67 +487,3 @@ func TestReferenceRegexp(t *testing.T) { } } - -func TestIdentifierRegexp(t *testing.T) { - fullCases := []regexpMatch{ - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", - match: true, - }, - { - input: "7EC43B381E5AEFE6E04EFB0B3F0693FF2A4A50652D64AEC573905F2DB5889A1C", - match: false, - }, - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf", - match: false, - }, - { - input: "sha256:da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", - match: false, - }, - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf98218482", - match: false, - }, - } - - shortCases := []regexpMatch{ - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", - match: true, - }, - { - input: "7EC43B381E5AEFE6E04EFB0B3F0693FF2A4A50652D64AEC573905F2DB5889A1C", - match: false, - }, - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf", - match: true, - }, - { - input: "sha256:da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", - match: false, - }, - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf98218482", - match: false, - }, - { - input: "da304", - match: false, - }, - { - input: "da304e", - match: true, - }, - } - - for i := range fullCases { - checkRegexp(t, anchoredIdentifierRegexp, fullCases[i]) - } - - for i := range shortCases { - checkRegexp(t, anchoredShortIdentifierRegexp, shortCases[i]) - } -} diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go index 1da1d533f..1ede31ebb 100644 --- a/vendor/github.com/docker/distribution/registry.go +++ b/vendor/github.com/docker/distribution/registry.go @@ -35,7 +35,7 @@ type Namespace interface { // reference. Repository(ctx context.Context, name reference.Named) (Repository, error) - // Repositories fills 'repos' with a lexicographically sorted catalog of repositories + // Repositories fills 'repos' with a lexigraphically sorted catalog of repositories // up to the size of 'repos' and returns the value 'n' for the number of entries // which were filled. 'last' contains an offset in the catalog, and 'err' will be // set to io.EOF if there are no more entries to obtain. diff --git a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go index a9616c58a..9979abae6 100644 --- a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go +++ b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go @@ -4,9 +4,9 @@ import ( "net/http" "regexp" + "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" - "github.com/opencontainers/go-digest" ) var ( diff --git a/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go b/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go deleted file mode 100644 index 9bc41a3a6..000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go +++ /dev/null @@ -1,161 +0,0 @@ -package v2 - -import ( - "fmt" - "regexp" - "strings" - "unicode" -) - -var ( - // according to rfc7230 - reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`) - reQuotedValue = regexp.MustCompile(`^[^\\"]+`) - reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`) -) - -// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains -// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The -// function parses only the first element of the list, which is set by the very first proxy. It returns a map -// of corresponding key-value pairs and an unparsed slice of the input string. -// -// Examples of Forwarded header values: -// -// 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown -// 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80" -// -// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into -// {"for": "192.0.2.43:443", "host": "registry.example.org"}. -func parseForwardedHeader(forwarded string) (map[string]string, string, error) { - // Following are states of forwarded header parser. Any state could transition to a failure. - const ( - // terminating state; can transition to Parameter - stateElement = iota - // terminating state; can transition to KeyValueDelimiter - stateParameter - // can transition to Value - stateKeyValueDelimiter - // can transition to one of { QuotedValue, PairEnd } - stateValue - // can transition to one of { EscapedCharacter, PairEnd } - stateQuotedValue - // can transition to one of { QuotedValue } - stateEscapedCharacter - // terminating state; can transition to one of { Parameter, Element } - statePairEnd - ) - - var ( - parameter string - value string - parse = forwarded[:] - res = map[string]string{} - state = stateElement - ) - -Loop: - for { - // skip spaces unless in quoted value - if state != stateQuotedValue && state != stateEscapedCharacter { - parse = strings.TrimLeftFunc(parse, unicode.IsSpace) - } - - if len(parse) == 0 { - if state != stateElement && state != statePairEnd && state != stateParameter { - return nil, parse, fmt.Errorf("unexpected end of input") - } - // terminating - break - } - - switch state { - // terminate at list element delimiter - case stateElement: - if parse[0] == ',' { - parse = parse[1:] - break Loop - } - state = stateParameter - - // parse parameter (the key of key-value pair) - case stateParameter: - match := reToken.FindString(parse) - if len(match) == 0 { - return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse)) - } - parameter = strings.ToLower(match) - parse = parse[len(match):] - state = stateKeyValueDelimiter - - // parse '=' - case stateKeyValueDelimiter: - if parse[0] != '=' { - return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse)) - } - parse = parse[1:] - state = stateValue - - // parse value or quoted value - case stateValue: - if parse[0] == '"' { - parse = parse[1:] - state = stateQuotedValue - } else { - value = reToken.FindString(parse) - if len(value) == 0 { - return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse)) - } - if _, exists := res[parameter]; exists { - return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse)) - } - res[parameter] = value - parse = parse[len(value):] - value = "" - state = statePairEnd - } - - // parse a part of quoted value until the first backslash - case stateQuotedValue: - match := reQuotedValue.FindString(parse) - value += match - parse = parse[len(match):] - switch { - case len(parse) == 0: - return nil, parse, fmt.Errorf("unterminated quoted string") - case parse[0] == '"': - res[parameter] = value - value = "" - parse = parse[1:] - state = statePairEnd - case parse[0] == '\\': - parse = parse[1:] - state = stateEscapedCharacter - } - - // parse escaped character in a quoted string, ignore the backslash - // transition back to QuotedValue state - case stateEscapedCharacter: - c := reEscapedCharacter.FindString(parse) - if len(c) == 0 { - return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1) - } - value += c - parse = parse[1:] - state = stateQuotedValue - - // expect either a new key-value pair, new list or end of input - case statePairEnd: - switch parse[0] { - case ';': - parse = parse[1:] - state = stateParameter - case ',': - state = stateElement - default: - return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse)) - } - } - } - - return res, parse, nil -} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/headerparser_test.go b/vendor/github.com/docker/distribution/registry/api/v2/headerparser_test.go deleted file mode 100644 index b8c37490d..000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/headerparser_test.go +++ /dev/null @@ -1,161 +0,0 @@ -package v2 - -import ( - "testing" -) - -func TestParseForwardedHeader(t *testing.T) { - for _, tc := range []struct { - name string - raw string - expected map[string]string - expectedRest string - expectedError bool - }{ - { - name: "empty", - raw: "", - }, - { - name: "one pair", - raw: " key = value ", - expected: map[string]string{"key": "value"}, - }, - { - name: "two pairs", - raw: " key1 = value1; key2=value2", - expected: map[string]string{"key1": "value1", "key2": "value2"}, - }, - { - name: "uppercase parameter", - raw: "KeY=VaL", - expected: map[string]string{"key": "VaL"}, - }, - { - name: "missing key=value pair - be tolerant", - raw: "key=val;", - expected: map[string]string{"key": "val"}, - }, - { - name: "quoted values", - raw: `key="val";param = "[[ $((1 + 1)) == 3 ]] && echo panic!;" ; p=" abcd "`, - expected: map[string]string{"key": "val", "param": "[[ $((1 + 1)) == 3 ]] && echo panic!;", "p": " abcd "}, - }, - { - name: "empty quoted value", - raw: `key=""`, - expected: map[string]string{"key": ""}, - }, - { - name: "quoted double quotes", - raw: `key="\"value\""`, - expected: map[string]string{"key": `"value"`}, - }, - { - name: "quoted backslash", - raw: `key="\"\\\""`, - expected: map[string]string{"key": `"\"`}, - }, - { - name: "ignore subsequent elements", - raw: "key=a, param= b", - expected: map[string]string{"key": "a"}, - expectedRest: " param= b", - }, - { - name: "empty element - be tolerant", - raw: " , key=val", - expectedRest: " key=val", - }, - { - name: "obscure key", - raw: `ob₷C&r€ = value`, - expected: map[string]string{`ob₷c&r€`: "value"}, - }, - { - name: "duplicate parameter", - raw: "key=a; p=b; key=c", - expectedError: true, - }, - { - name: "empty parameter", - raw: "=value", - expectedError: true, - }, - { - name: "empty value", - raw: "key= ", - expectedError: true, - }, - { - name: "empty value before a new element ", - raw: "key=,", - expectedError: true, - }, - { - name: "empty value before a new pair", - raw: "key=;", - expectedError: true, - }, - { - name: "just parameter", - raw: "key", - expectedError: true, - }, - { - name: "missing key-value", - raw: "a=b;;", - expectedError: true, - }, - { - name: "unclosed quoted value", - raw: `key="value`, - expectedError: true, - }, - { - name: "escaped terminating dquote", - raw: `key="value\"`, - expectedError: true, - }, - { - name: "just a quoted value", - raw: `"key=val"`, - expectedError: true, - }, - { - name: "quoted key", - raw: `"key"=val`, - expectedError: true, - }, - } { - parsed, rest, err := parseForwardedHeader(tc.raw) - if err != nil && !tc.expectedError { - t.Errorf("[%s] got unexpected error: %v", tc.name, err) - } - if err == nil && tc.expectedError { - t.Errorf("[%s] got unexpected non-error", tc.name) - } - if err != nil || tc.expectedError { - continue - } - for key, value := range tc.expected { - v, exists := parsed[key] - if !exists { - t.Errorf("[%s] missing expected parameter %q", tc.name, key) - continue - } - if v != value { - t.Errorf("[%s] got unexpected value for parameter %q: %q != %q", tc.name, key, v, value) - } - } - for key, value := range parsed { - if _, exists := tc.expected[key]; !exists { - t.Errorf("[%s] got unexpected key/value pair: %q=%q", tc.name, key, value) - } - } - - if rest != tc.expectedRest { - t.Errorf("[%s] got unexpected unparsed string: %q != %q", tc.name, rest, tc.expectedRest) - } - } -} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/github.com/docker/distribution/registry/api/v2/urls.go index 1337bdb12..a959aaa89 100644 --- a/vendor/github.com/docker/distribution/registry/api/v2/urls.go +++ b/vendor/github.com/docker/distribution/registry/api/v2/urls.go @@ -1,7 +1,6 @@ package v2 import ( - "fmt" "net/http" "net/url" "strings" @@ -47,42 +46,30 @@ func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { // NewURLBuilderFromRequest uses information from an *http.Request to // construct the root url. func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { - var ( - scheme = "http" - host = r.Host - ) + var scheme string - if r.TLS != nil { + forwardedProto := r.Header.Get("X-Forwarded-Proto") + + switch { + case len(forwardedProto) > 0: + scheme = forwardedProto + case r.TLS != nil: scheme = "https" - } else if len(r.URL.Scheme) > 0 { + case len(r.URL.Scheme) > 0: scheme = r.URL.Scheme + default: + scheme = "http" } - // Handle fowarded headers - // Prefer "Forwarded" header as defined by rfc7239 if given - // see https://tools.ietf.org/html/rfc7239 - if forwarded := r.Header.Get("Forwarded"); len(forwarded) > 0 { - forwardedHeader, _, err := parseForwardedHeader(forwarded) - if err == nil { - if fproto := forwardedHeader["proto"]; len(fproto) > 0 { - scheme = fproto - } - if fhost := forwardedHeader["host"]; len(fhost) > 0 { - host = fhost - } - } - } else { - if forwardedProto := r.Header.Get("X-Forwarded-Proto"); len(forwardedProto) > 0 { - scheme = forwardedProto - } - if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 { - // According to the Apache mod_proxy docs, X-Forwarded-Host can be a - // comma-separated list of hosts, to which each proxy appends the - // requested host. We want to grab the first from this comma-separated - // list. - hosts := strings.SplitN(forwardedHost, ",", 2) - host = strings.TrimSpace(hosts[0]) - } + host := r.Host + forwardedHost := r.Header.Get("X-Forwarded-Host") + if len(forwardedHost) > 0 { + // According to the Apache mod_proxy docs, X-Forwarded-Host can be a + // comma-separated list of hosts, to which each proxy appends the + // requested host. We want to grab the first from this comma-separated + // list. + hosts := strings.SplitN(forwardedHost, ",", 2) + host = strings.TrimSpace(hosts[0]) } basePath := routeDescriptorsMap[RouteNameBase].Path @@ -150,8 +137,6 @@ func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { tagOrDigest = v.Tag() case reference.Digested: tagOrDigest = v.Digest().String() - default: - return "", fmt.Errorf("reference must have a tag or digest") } manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls_test.go b/vendor/github.com/docker/distribution/registry/api/v2/urls_test.go index 4f854b23b..10aadd52e 100644 --- a/vendor/github.com/docker/distribution/registry/api/v2/urls_test.go +++ b/vendor/github.com/docker/distribution/registry/api/v2/urls_test.go @@ -1,10 +1,8 @@ package v2 import ( - "fmt" "net/http" "net/url" - "reflect" "testing" "github.com/docker/distribution/reference" @@ -13,48 +11,35 @@ import ( type urlBuilderTestCase struct { description string expectedPath string - expectedErr error build func() (string, error) } func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { - fooBarRef, _ := reference.WithName("foo/bar") + fooBarRef, _ := reference.ParseNamed("foo/bar") return []urlBuilderTestCase{ { description: "test base url", expectedPath: "/v2/", - expectedErr: nil, build: urlBuilder.BuildBaseURL, }, { description: "test tags url", expectedPath: "/v2/foo/bar/tags/list", - expectedErr: nil, build: func() (string, error) { return urlBuilder.BuildTagsURL(fooBarRef) }, }, { - description: "test manifest url tagged ref", + description: "test manifest url", expectedPath: "/v2/foo/bar/manifests/tag", - expectedErr: nil, build: func() (string, error) { ref, _ := reference.WithTag(fooBarRef, "tag") return urlBuilder.BuildManifestURL(ref) }, }, - { - description: "test manifest url bare ref", - expectedPath: "", - expectedErr: fmt.Errorf("reference must have a tag or digest"), - build: func() (string, error) { - return urlBuilder.BuildManifestURL(fooBarRef) - }, - }, { description: "build blob url", expectedPath: "/v2/foo/bar/blobs/sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", - expectedErr: nil, build: func() (string, error) { ref, _ := reference.WithDigest(fooBarRef, "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5") return urlBuilder.BuildBlobURL(ref) @@ -63,7 +48,6 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { { description: "build blob upload url", expectedPath: "/v2/foo/bar/blobs/uploads/", - expectedErr: nil, build: func() (string, error) { return urlBuilder.BuildBlobUploadURL(fooBarRef) }, @@ -71,7 +55,6 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { { description: "build blob upload url with digest and size", expectedPath: "/v2/foo/bar/blobs/uploads/?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", - expectedErr: nil, build: func() (string, error) { return urlBuilder.BuildBlobUploadURL(fooBarRef, url.Values{ "size": []string{"10000"}, @@ -82,7 +65,6 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { { description: "build blob upload chunk url", expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part", - expectedErr: nil, build: func() (string, error) { return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part") }, @@ -90,7 +72,6 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { { description: "build blob upload chunk url with digest and size", expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", - expectedErr: nil, build: func() (string, error) { return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part", url.Values{ "size": []string{"10000"}, @@ -120,14 +101,9 @@ func TestURLBuilder(t *testing.T) { for _, testCase := range makeURLBuilderTestCases(urlBuilder) { url, err := testCase.build() - expectedErr := testCase.expectedErr - if !reflect.DeepEqual(expectedErr, err) { - t.Fatalf("%s: Expecting %v but got error %v", testCase.description, expectedErr, err) + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) } - if expectedErr != nil { - continue - } - expectedURL := testCase.expectedPath if !relative { expectedURL = root + expectedURL @@ -160,12 +136,8 @@ func TestURLBuilderWithPrefix(t *testing.T) { for _, testCase := range makeURLBuilderTestCases(urlBuilder) { url, err := testCase.build() - expectedErr := testCase.expectedErr - if !reflect.DeepEqual(expectedErr, err) { - t.Fatalf("%s: Expecting %v but got error %v", testCase.description, expectedErr, err) - } - if expectedErr != nil { - continue + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) } expectedURL := testCase.expectedPath @@ -193,213 +165,50 @@ func TestBuilderFromRequest(t *testing.T) { t.Fatal(err) } + forwardedProtoHeader := make(http.Header, 1) + forwardedProtoHeader.Set("X-Forwarded-Proto", "https") + + forwardedHostHeader1 := make(http.Header, 1) + forwardedHostHeader1.Set("X-Forwarded-Host", "first.example.com") + + forwardedHostHeader2 := make(http.Header, 1) + forwardedHostHeader2.Set("X-Forwarded-Host", "first.example.com, proxy1.example.com") + testRequests := []struct { - name string request *http.Request base string configHost url.URL }{ { - name: "no forwarded header", request: &http.Request{URL: u, Host: u.Host}, base: "http://example.com", }, + { - name: "https protocol forwarded with a non-standard header", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "X-Custom-Forwarded-Proto": []string{"https"}, - }}, - base: "http://example.com", + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "http://example.com", }, { - name: "forwarded protocol is the same", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "X-Forwarded-Proto": []string{"https"}, - }}, - base: "https://example.com", + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "https://example.com", }, { - name: "forwarded host with a non-standard header", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "X-Forwarded-Host": []string{"first.example.com"}, - }}, - base: "http://first.example.com", + request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader1}, + base: "http://first.example.com", }, { - name: "forwarded multiple hosts a with non-standard header", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "X-Forwarded-Host": []string{"first.example.com, proxy1.example.com"}, - }}, - base: "http://first.example.com", + request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader2}, + base: "http://first.example.com", }, { - name: "host configured in config file takes priority", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "X-Forwarded-Host": []string{"first.example.com, proxy1.example.com"}, - }}, - base: "https://third.example.com:5000", + request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader2}, + base: "https://third.example.com:5000", configHost: url.URL{ Scheme: "https", Host: "third.example.com:5000", }, }, - { - name: "forwarded host and port with just one non-standard header", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "X-Forwarded-Host": []string{"first.example.com:443"}, - }}, - base: "http://first.example.com:443", - }, - { - name: "forwarded port with a non-standard header", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "X-Forwarded-Host": []string{"example.com:5000"}, - "X-Forwarded-Port": []string{"5000"}, - }}, - base: "http://example.com:5000", - }, - { - name: "forwarded multiple ports with a non-standard header", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "X-Forwarded-Port": []string{"443 , 5001"}, - }}, - base: "http://example.com", - }, - { - name: "forwarded standard port with non-standard headers", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "X-Forwarded-Proto": []string{"https"}, - "X-Forwarded-Host": []string{"example.com"}, - "X-Forwarded-Port": []string{"443"}, - }}, - base: "https://example.com", - }, - { - name: "forwarded standard port with non-standard headers and explicit port", - request: &http.Request{URL: u, Host: u.Host + ":443", Header: http.Header{ - "X-Forwarded-Proto": []string{"https"}, - "X-Forwarded-Host": []string{u.Host + ":443"}, - "X-Forwarded-Port": []string{"443"}, - }}, - base: "https://example.com:443", - }, - { - name: "several non-standard headers", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "X-Forwarded-Proto": []string{"https"}, - "X-Forwarded-Host": []string{" first.example.com:12345 "}, - }}, - base: "https://first.example.com:12345", - }, - { - name: "forwarded host with port supplied takes priority", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "X-Forwarded-Host": []string{"first.example.com:5000"}, - "X-Forwarded-Port": []string{"80"}, - }}, - base: "http://first.example.com:5000", - }, - { - name: "malformed forwarded port", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "X-Forwarded-Host": []string{"first.example.com"}, - "X-Forwarded-Port": []string{"abcd"}, - }}, - base: "http://first.example.com", - }, - { - name: "forwarded protocol and addr using standard header", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "Forwarded": []string{`proto=https;host="192.168.22.30:80"`}, - }}, - base: "https://192.168.22.30:80", - }, - { - name: "forwarded host takes priority over for", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "Forwarded": []string{`host="reg.example.com:5000";for="192.168.22.30"`}, - }}, - base: "http://reg.example.com:5000", - }, - { - name: "forwarded host and protocol using standard header", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "Forwarded": []string{`host=reg.example.com;proto=https`}, - }}, - base: "https://reg.example.com", - }, - { - name: "process just the first standard forwarded header", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "Forwarded": []string{`host="reg.example.com:88";proto=http`, `host=reg.example.com;proto=https`}, - }}, - base: "http://reg.example.com:88", - }, - { - name: "process just the first list element of standard header", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "Forwarded": []string{`host="reg.example.com:443";proto=https, host="reg.example.com:80";proto=http`}, - }}, - base: "https://reg.example.com:443", - }, - { - name: "IPv6 address use host", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "Forwarded": []string{`for="2607:f0d0:1002:51::4";host="[2607:f0d0:1002:51::4]:5001"`}, - "X-Forwarded-Port": []string{"5002"}, - }}, - base: "http://[2607:f0d0:1002:51::4]:5001", - }, - { - name: "IPv6 address with port", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "Forwarded": []string{`host="[2607:f0d0:1002:51::4]:4000"`}, - "X-Forwarded-Port": []string{"5001"}, - }}, - base: "http://[2607:f0d0:1002:51::4]:4000", - }, - { - name: "non-standard and standard forward headers", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "X-Forwarded-Proto": []string{`https`}, - "X-Forwarded-Host": []string{`first.example.com`}, - "X-Forwarded-Port": []string{``}, - "Forwarded": []string{`host=first.example.com; proto=https`}, - }}, - base: "https://first.example.com", - }, - { - name: "standard header takes precedence over non-standard headers", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "X-Forwarded-Proto": []string{`http`}, - "Forwarded": []string{`host=second.example.com; proto=https`}, - "X-Forwarded-Host": []string{`first.example.com`}, - "X-Forwarded-Port": []string{`4000`}, - }}, - base: "https://second.example.com", - }, - { - name: "incomplete standard header uses default", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "X-Forwarded-Proto": []string{`https`}, - "Forwarded": []string{`for=127.0.0.1`}, - "X-Forwarded-Host": []string{`first.example.com`}, - "X-Forwarded-Port": []string{`4000`}, - }}, - base: "http://" + u.Host, - }, - { - name: "standard with just proto", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "X-Forwarded-Proto": []string{`https`}, - "Forwarded": []string{`proto=https`}, - "X-Forwarded-Host": []string{`first.example.com`}, - "X-Forwarded-Port": []string{`4000`}, - }}, - base: "https://" + u.Host, - }, } - doTest := func(relative bool) { for _, tr := range testRequests { var builder *URLBuilder @@ -411,26 +220,35 @@ func TestBuilderFromRequest(t *testing.T) { for _, testCase := range makeURLBuilderTestCases(builder) { buildURL, err := testCase.build() - expectedErr := testCase.expectedErr - if !reflect.DeepEqual(expectedErr, err) { - t.Fatalf("%s: Expecting %v but got error %v", testCase.description, expectedErr, err) - } - if expectedErr != nil { - continue + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) } - expectedURL := testCase.expectedPath - if !relative { - expectedURL = tr.base + expectedURL + var expectedURL string + proto, ok := tr.request.Header["X-Forwarded-Proto"] + if !ok { + expectedURL = testCase.expectedPath + if !relative { + expectedURL = tr.base + expectedURL + } + } else { + urlBase, err := url.Parse(tr.base) + if err != nil { + t.Fatal(err) + } + urlBase.Scheme = proto[0] + expectedURL = testCase.expectedPath + if !relative { + expectedURL = urlBase.String() + expectedURL + } } if buildURL != expectedURL { - t.Errorf("[relative=%t, request=%q, case=%q]: %q != %q", relative, tr.name, testCase.description, buildURL, expectedURL) + t.Fatalf("%s: %q != %q", testCase.description, buildURL, expectedURL) } } } } - doTest(true) doTest(false) } @@ -484,12 +302,8 @@ func TestBuilderFromRequestWithPrefix(t *testing.T) { for _, testCase := range makeURLBuilderTestCases(builder) { buildURL, err := testCase.build() - expectedErr := testCase.expectedErr - if !reflect.DeepEqual(expectedErr, err) { - t.Fatalf("%s: Expecting %v but got error %v", testCase.description, expectedErr, err) - } - if expectedErr != nil { - continue + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) } var expectedURL string diff --git a/vendor/github.com/docker/distribution/registry/auth/auth.go b/vendor/github.com/docker/distribution/registry/auth/auth.go index 1c9af8821..0cb37235b 100644 --- a/vendor/github.com/docker/distribution/registry/auth/auth.go +++ b/vendor/github.com/docker/distribution/registry/auth/auth.go @@ -66,9 +66,8 @@ type UserInfo struct { // Resource describes a resource by type and name. type Resource struct { - Type string - Class string - Name string + Type string + Name string } // Access describes a specific action that is @@ -136,39 +135,6 @@ func (uic userInfoContext) Value(key interface{}) interface{} { return uic.Context.Value(key) } -// WithResources returns a context with the authorized resources. -func WithResources(ctx context.Context, resources []Resource) context.Context { - return resourceContext{ - Context: ctx, - resources: resources, - } -} - -type resourceContext struct { - context.Context - resources []Resource -} - -type resourceKey struct{} - -func (rc resourceContext) Value(key interface{}) interface{} { - if key == (resourceKey{}) { - return rc.resources - } - - return rc.Context.Value(key) -} - -// AuthorizedResources returns the list of resources which have -// been authorized for this request. -func AuthorizedResources(ctx context.Context) []Resource { - if resources, ok := ctx.Value(resourceKey{}).([]Resource); ok { - return resources - } - - return nil -} - // InitFunc is the type of an AccessController factory function and is used // to register the constructor for different AccesController backends. type InitFunc func(options map[string]interface{}) (AccessController, error) diff --git a/vendor/github.com/docker/distribution/registry/auth/silly/access_test.go b/vendor/github.com/docker/distribution/registry/auth/silly/access_test.go index 0a5103e6c..a7c14cb9d 100644 --- a/vendor/github.com/docker/distribution/registry/auth/silly/access_test.go +++ b/vendor/github.com/docker/distribution/registry/auth/silly/access_test.go @@ -16,7 +16,7 @@ func TestSillyAccessController(t *testing.T) { } server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := context.WithRequest(context.Background(), r) + ctx := context.WithValue(nil, "http.request", r) authCtx, err := ac.Authorized(ctx) if err != nil { switch err := err.(type) { diff --git a/vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go b/vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go index 4e8b7f1ce..52b7f3692 100644 --- a/vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go +++ b/vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go @@ -261,8 +261,6 @@ func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth. } } - ctx = auth.WithResources(ctx, token.resources()) - return auth.WithUser(ctx, auth.UserInfo{Name: token.Claims.Subject}), nil } diff --git a/vendor/github.com/docker/distribution/registry/auth/token/token.go b/vendor/github.com/docker/distribution/registry/auth/token/token.go index 850f5813f..634eb75b2 100644 --- a/vendor/github.com/docker/distribution/registry/auth/token/token.go +++ b/vendor/github.com/docker/distribution/registry/auth/token/token.go @@ -34,7 +34,6 @@ var ( // ResourceActions stores allowed actions on a named and typed resource. type ResourceActions struct { Type string `json:"type"` - Class string `json:"class,omitempty"` Name string `json:"name"` Actions []string `json:"actions"` } @@ -350,29 +349,6 @@ func (t *Token) accessSet() accessSet { return accessSet } -func (t *Token) resources() []auth.Resource { - if t.Claims == nil { - return nil - } - - resourceSet := map[auth.Resource]struct{}{} - for _, resourceActions := range t.Claims.Access { - resource := auth.Resource{ - Type: resourceActions.Type, - Class: resourceActions.Class, - Name: resourceActions.Name, - } - resourceSet[resource] = struct{}{} - } - - resources := make([]auth.Resource, 0, len(resourceSet)) - for resource := range resourceSet { - resources = append(resources, resource) - } - - return resources -} - func (t *Token) compactRaw() string { return fmt.Sprintf("%s.%s", t.Raw, joseBase64UrlEncode(t.Signature)) } diff --git a/vendor/github.com/docker/distribution/registry/auth/token/token_test.go b/vendor/github.com/docker/distribution/registry/auth/token/token_test.go index 03dce6fa6..27206f9b4 100644 --- a/vendor/github.com/docker/distribution/registry/auth/token/token_test.go +++ b/vendor/github.com/docker/distribution/registry/auth/token/token_test.go @@ -354,7 +354,7 @@ func TestAccessController(t *testing.T) { Action: "baz", } - ctx := context.WithRequest(context.Background(), req) + ctx := context.WithValue(nil, "http.request", req) authCtx, err := accessController.Authorized(ctx, testAccess) challenge, ok := err.(auth.Challenge) if !ok { @@ -454,27 +454,6 @@ func TestAccessController(t *testing.T) { if userInfo.Name != "foo" { t.Fatalf("expected user name %q, got %q", "foo", userInfo.Name) } - - // 5. Supply a token with full admin rights, which is represented as "*". - token, err = makeTestToken( - issuer, service, - []*ResourceActions{{ - Type: testAccess.Type, - Name: testAccess.Name, - Actions: []string{"*"}, - }}, - rootKeys[0], 1, time.Now(), time.Now().Add(5*time.Minute), - ) - if err != nil { - t.Fatal(err) - } - - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) - - _, err = accessController.Authorized(ctx, testAccess) - if err != nil { - t.Fatalf("accessController returned unexpected error: %s", err) - } } // This tests that newAccessController can handle PEM blocks in the certificate diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/vendor/github.com/docker/distribution/registry/client/auth/addr.go similarity index 97% rename from vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go rename to vendor/github.com/docker/distribution/registry/client/auth/addr.go index 2c3ebe165..6e7775288 100644 --- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go +++ b/vendor/github.com/docker/distribution/registry/client/auth/addr.go @@ -1,4 +1,4 @@ -package challenge +package auth import ( "net/url" diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/authchallenge.go similarity index 91% rename from vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go rename to vendor/github.com/docker/distribution/registry/client/auth/authchallenge.go index c9bdfc355..69d9d6fe0 100644 --- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go +++ b/vendor/github.com/docker/distribution/registry/client/auth/authchallenge.go @@ -1,4 +1,4 @@ -package challenge +package auth import ( "fmt" @@ -18,12 +18,12 @@ type Challenge struct { Parameters map[string]string } -// Manager manages the challenges for endpoints. +// ChallengeManager manages the challenges for endpoints. // The challenges are pulled out of HTTP responses. Only // responses which expect challenges should be added to // the manager, since a non-unauthorized request will be // viewed as not requiring challenges. -type Manager interface { +type ChallengeManager interface { // GetChallenges returns the challenges for the given // endpoint URL. GetChallenges(endpoint url.URL) ([]Challenge, error) @@ -37,19 +37,19 @@ type Manager interface { AddResponse(resp *http.Response) error } -// NewSimpleManager returns an instance of -// Manger which only maps endpoints to challenges +// NewSimpleChallengeManager returns an instance of +// ChallengeManger which only maps endpoints to challenges // based on the responses which have been added the // manager. The simple manager will make no attempt to // perform requests on the endpoints or cache the responses // to a backend. -func NewSimpleManager() Manager { - return &simpleManager{ +func NewSimpleChallengeManager() ChallengeManager { + return &simpleChallengeManager{ Challanges: make(map[string][]Challenge), } } -type simpleManager struct { +type simpleChallengeManager struct { sync.RWMutex Challanges map[string][]Challenge } @@ -59,7 +59,7 @@ func normalizeURL(endpoint *url.URL) { endpoint.Host = canonicalAddr(endpoint) } -func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { +func (m *simpleChallengeManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { normalizeURL(&endpoint) m.RLock() @@ -68,7 +68,7 @@ func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { return challenges, nil } -func (m *simpleManager) AddResponse(resp *http.Response) error { +func (m *simpleChallengeManager) AddResponse(resp *http.Response) error { challenges := ResponseChallenges(resp) if resp.Request == nil { return fmt.Errorf("missing request reference") diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge_test.go b/vendor/github.com/docker/distribution/registry/client/auth/authchallenge_test.go similarity index 97% rename from vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge_test.go rename to vendor/github.com/docker/distribution/registry/client/auth/authchallenge_test.go index d4986b39e..2716fba52 100644 --- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge_test.go +++ b/vendor/github.com/docker/distribution/registry/client/auth/authchallenge_test.go @@ -1,4 +1,4 @@ -package challenge +package auth import ( "fmt" @@ -50,7 +50,7 @@ func TestAuthChallengeNormalization(t *testing.T) { func testAuthChallengeNormalization(t *testing.T, host string) { - scm := NewSimpleManager() + scm := NewSimpleChallengeManager() url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host)) if err != nil { @@ -86,7 +86,7 @@ func testAuthChallengeNormalization(t *testing.T, host string) { func testAuthChallengeConcurrent(t *testing.T, host string) { - scm := NewSimpleManager() + scm := NewSimpleChallengeManager() url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host)) if err != nil { diff --git a/vendor/github.com/docker/distribution/registry/client/auth/session.go b/vendor/github.com/docker/distribution/registry/client/auth/session.go index 3ca5e8b3e..d03d8ff0e 100644 --- a/vendor/github.com/docker/distribution/registry/client/auth/session.go +++ b/vendor/github.com/docker/distribution/registry/client/auth/session.go @@ -12,7 +12,6 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/distribution/registry/client" - "github.com/docker/distribution/registry/client/auth/challenge" "github.com/docker/distribution/registry/client/transport" ) @@ -59,7 +58,7 @@ type CredentialStore interface { // schemes. The handlers are tried in order, the higher priority authentication // methods should be first. The challengeMap holds a list of challenges for // a given root API endpoint (for example "https://registry-1.docker.io/v2/"). -func NewAuthorizer(manager challenge.Manager, handlers ...AuthenticationHandler) transport.RequestModifier { +func NewAuthorizer(manager ChallengeManager, handlers ...AuthenticationHandler) transport.RequestModifier { return &endpointAuthorizer{ challenges: manager, handlers: handlers, @@ -67,7 +66,7 @@ func NewAuthorizer(manager challenge.Manager, handlers ...AuthenticationHandler) } type endpointAuthorizer struct { - challenges challenge.Manager + challenges ChallengeManager handlers []AuthenticationHandler transport http.RoundTripper } @@ -95,11 +94,11 @@ func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { if len(challenges) > 0 { for _, handler := range ea.handlers { - for _, c := range challenges { - if c.Scheme != handler.Scheme() { + for _, challenge := range challenges { + if challenge.Scheme != handler.Scheme() { continue } - if err := handler.AuthorizeRequest(req, c.Parameters); err != nil { + if err := handler.AuthorizeRequest(req, challenge.Parameters); err != nil { return err } } @@ -147,20 +146,13 @@ type Scope interface { // to a repository. type RepositoryScope struct { Repository string - Class string Actions []string } // String returns the string representation of the repository // using the scope grammar func (rs RepositoryScope) String() string { - repoType := "repository" - // Keep existing format for image class to maintain backwards compatibility - // with authorization servers which do not support the expanded grammar. - if rs.Class != "" && rs.Class != "image" { - repoType = fmt.Sprintf("%s(%s)", repoType, rs.Class) - } - return fmt.Sprintf("%s:%s:%s", repoType, rs.Repository, strings.Join(rs.Actions, ",")) + return fmt.Sprintf("repository:%s:%s", rs.Repository, strings.Join(rs.Actions, ",")) } // RegistryScope represents a token scope for access diff --git a/vendor/github.com/docker/distribution/registry/client/auth/session_test.go b/vendor/github.com/docker/distribution/registry/client/auth/session_test.go index 4f54c75cc..cfae4f978 100644 --- a/vendor/github.com/docker/distribution/registry/client/auth/session_test.go +++ b/vendor/github.com/docker/distribution/registry/client/auth/session_test.go @@ -9,7 +9,6 @@ import ( "testing" "time" - "github.com/docker/distribution/registry/client/auth/challenge" "github.com/docker/distribution/registry/client/transport" "github.com/docker/distribution/testutil" ) @@ -66,7 +65,7 @@ func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, au // ping pings the provided endpoint to determine its required authorization challenges. // If a version header is provided, the versions will be returned. -func ping(manager challenge.Manager, endpoint, versionHeader string) ([]APIVersion, error) { +func ping(manager ChallengeManager, endpoint, versionHeader string) ([]APIVersion, error) { resp, err := http.Get(endpoint) if err != nil { return nil, err @@ -150,7 +149,7 @@ func TestEndpointAuthorizeToken(t *testing.T) { e, c := testServerWithAuth(m, authenicate, validCheck) defer c() - challengeManager1 := challenge.NewSimpleManager() + challengeManager1 := NewSimpleChallengeManager() versions, err := ping(challengeManager1, e+"/v2/", "x-api-version") if err != nil { t.Fatal(err) @@ -177,7 +176,7 @@ func TestEndpointAuthorizeToken(t *testing.T) { e2, c2 := testServerWithAuth(m, authenicate, validCheck) defer c2() - challengeManager2 := challenge.NewSimpleManager() + challengeManager2 := NewSimpleChallengeManager() versions, err = ping(challengeManager2, e2+"/v2/", "x-multi-api-version") if err != nil { t.Fatal(err) @@ -274,7 +273,7 @@ func TestEndpointAuthorizeRefreshToken(t *testing.T) { e, c := testServerWithAuth(m, authenicate, validCheck) defer c() - challengeManager1 := challenge.NewSimpleManager() + challengeManager1 := NewSimpleChallengeManager() versions, err := ping(challengeManager1, e+"/v2/", "x-api-version") if err != nil { t.Fatal(err) @@ -307,7 +306,7 @@ func TestEndpointAuthorizeRefreshToken(t *testing.T) { e2, c2 := testServerWithAuth(m, authenicate, validCheck) defer c2() - challengeManager2 := challenge.NewSimpleManager() + challengeManager2 := NewSimpleChallengeManager() versions, err = ping(challengeManager2, e2+"/v2/", "x-api-version") if err != nil { t.Fatal(err) @@ -340,7 +339,7 @@ func TestEndpointAuthorizeRefreshToken(t *testing.T) { e3, c3 := testServerWithAuth(m, authenicate, validCheck) defer c3() - challengeManager3 := challenge.NewSimpleManager() + challengeManager3 := NewSimpleChallengeManager() versions, err = ping(challengeManager3, e3+"/v2/", "x-api-version") if err != nil { t.Fatal(err) @@ -402,7 +401,7 @@ func TestEndpointAuthorizeV2RefreshToken(t *testing.T) { e, c := testServerWithAuth(m, authenicate, validCheck) defer c() - challengeManager1 := challenge.NewSimpleManager() + challengeManager1 := NewSimpleChallengeManager() versions, err := ping(challengeManager1, e+"/v2/", "x-api-version") if err != nil { t.Fatal(err) @@ -497,7 +496,7 @@ func TestEndpointAuthorizeTokenBasic(t *testing.T) { password: password, } - challengeManager := challenge.NewSimpleManager() + challengeManager := NewSimpleChallengeManager() _, err := ping(challengeManager, e+"/v2/", "") if err != nil { t.Fatal(err) @@ -615,7 +614,7 @@ func TestEndpointAuthorizeTokenBasicWithExpiresIn(t *testing.T) { password: password, } - challengeManager := challenge.NewSimpleManager() + challengeManager := NewSimpleChallengeManager() _, err := ping(challengeManager, e+"/v2/", "") if err != nil { t.Fatal(err) @@ -766,7 +765,7 @@ func TestEndpointAuthorizeTokenBasicWithExpiresInAndIssuedAt(t *testing.T) { password: password, } - challengeManager := challenge.NewSimpleManager() + challengeManager := NewSimpleChallengeManager() _, err := ping(challengeManager, e+"/v2/", "") if err != nil { t.Fatal(err) @@ -846,7 +845,7 @@ func TestEndpointAuthorizeBasic(t *testing.T) { password: password, } - challengeManager := challenge.NewSimpleManager() + challengeManager := NewSimpleChallengeManager() _, err := ping(challengeManager, e+"/v2/", "") if err != nil { t.Fatal(err) diff --git a/vendor/github.com/docker/distribution/registry/client/errors.go b/vendor/github.com/docker/distribution/registry/client/errors.go index 52d49d5d2..f73e3c230 100644 --- a/vendor/github.com/docker/distribution/registry/client/errors.go +++ b/vendor/github.com/docker/distribution/registry/client/errors.go @@ -9,7 +9,6 @@ import ( "net/http" "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/client/auth/challenge" ) // ErrNoErrorsInBody is returned when an HTTP response body parses to an empty @@ -83,52 +82,21 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error { return errors } -func makeErrorList(err error) []error { - if errL, ok := err.(errcode.Errors); ok { - return []error(errL) - } - return []error{err} -} - -func mergeErrors(err1, err2 error) error { - return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...)) -} - // HandleErrorResponse returns error parsed from HTTP response for an // unsuccessful HTTP response code (in the range 400 - 499 inclusive). An // UnexpectedHTTPStatusError returned for response code outside of expected // range. func HandleErrorResponse(resp *http.Response) error { - if resp.StatusCode >= 400 && resp.StatusCode < 500 { - // Check for OAuth errors within the `WWW-Authenticate` header first - // See https://tools.ietf.org/html/rfc6750#section-3 - for _, c := range challenge.ResponseChallenges(resp) { - if c.Scheme == "bearer" { - var err errcode.Error - // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 - switch c.Parameters["error"] { - case "invalid_token": - err.Code = errcode.ErrorCodeUnauthorized - case "insufficient_scope": - err.Code = errcode.ErrorCodeDenied - default: - continue - } - if description := c.Parameters["error_description"]; description != "" { - err.Message = description - } else { - err.Message = err.Code.Message() - } - - return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body)) - } - } + if resp.StatusCode == 401 { err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) - if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 { + if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) } return err } + if resp.StatusCode >= 400 && resp.StatusCode < 500 { + return parseHTTPErrorResponse(resp.StatusCode, resp.Body) + } return &UnexpectedHTTPStatusError{Status: resp.Status} } diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go index b82a968e2..1ebd0b183 100644 --- a/vendor/github.com/docker/distribution/registry/client/repository.go +++ b/vendor/github.com/docker/distribution/registry/client/repository.go @@ -15,12 +15,12 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client/transport" "github.com/docker/distribution/registry/storage/cache" "github.com/docker/distribution/registry/storage/cache/memory" - "github.com/opencontainers/go-digest" ) // Registry provides an interface for calling Repositories, which returns a catalog of repositories. @@ -268,7 +268,7 @@ func descriptorFromResponse(response *http.Response) (distribution.Descriptor, e return desc, nil } - dgst, err := digest.Parse(digestHeader) + dgst, err := digest.ParseDigest(digestHeader) if err != nil { return distribution.Descriptor{}, err } @@ -475,7 +475,7 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis return nil, distribution.ErrManifestNotModified } else if SuccessStatus(resp.StatusCode) { if contentDgst != nil { - dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest")) + dgst, err := digest.ParseDigest(resp.Header.Get("Docker-Content-Digest")) if err == nil { *contentDgst = dgst } @@ -553,7 +553,7 @@ func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options . if SuccessStatus(resp.StatusCode) { dgstHeader := resp.Header.Get("Docker-Content-Digest") - dgst, err := digest.Parse(dgstHeader) + dgst, err := digest.ParseDigest(dgstHeader) if err != nil { return "", err } @@ -661,7 +661,7 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut if err != nil { return distribution.Descriptor{}, err } - dgstr := digest.Canonical.Digester() + dgstr := digest.Canonical.New() n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) if err != nil { return distribution.Descriptor{}, err diff --git a/vendor/github.com/docker/distribution/registry/client/repository_test.go b/vendor/github.com/docker/distribution/registry/client/repository_test.go index f22fa33d4..a232e03ec 100644 --- a/vendor/github.com/docker/distribution/registry/client/repository_test.go +++ b/vendor/github.com/docker/distribution/registry/client/repository_test.go @@ -16,6 +16,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/reference" @@ -24,7 +25,6 @@ import ( "github.com/docker/distribution/testutil" "github.com/docker/distribution/uuid" "github.com/docker/libtrust" - "github.com/opencontainers/go-digest" ) func testServer(rrm testutil.RequestResponseMap) (string, func()) { @@ -100,7 +100,7 @@ func addTestCatalog(route string, content []byte, link string, m *testutil.Reque func TestBlobDelete(t *testing.T) { dgst, _ := newRandomBlob(1024) var m testutil.RequestResponseMap - repo, _ := reference.WithName("test.example.com/repo1") + repo, _ := reference.ParseNamed("test.example.com/repo1") m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "DELETE", @@ -139,7 +139,7 @@ func TestBlobFetch(t *testing.T) { defer c() ctx := context.Background() - repo, _ := reference.WithName("test.example.com/repo1") + repo, _ := reference.ParseNamed("test.example.com/repo1") r, err := NewRepository(ctx, repo, e, nil) if err != nil { t.Fatal(err) @@ -160,7 +160,7 @@ func TestBlobFetch(t *testing.T) { func TestBlobExistsNoContentLength(t *testing.T) { var m testutil.RequestResponseMap - repo, _ := reference.WithName("biff") + repo, _ := reference.ParseNamed("biff") dgst, content := newRandomBlob(1024) m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ @@ -219,7 +219,7 @@ func TestBlobExists(t *testing.T) { defer c() ctx := context.Background() - repo, _ := reference.WithName("test.example.com/repo1") + repo, _ := reference.ParseNamed("test.example.com/repo1") r, err := NewRepository(ctx, repo, e, nil) if err != nil { t.Fatal(err) @@ -251,7 +251,7 @@ func TestBlobUploadChunked(t *testing.T) { b1[512:513], b1[513:1024], } - repo, _ := reference.WithName("test.example.com/uploadrepo") + repo, _ := reference.ParseNamed("test.example.com/uploadrepo") uuids := []string{uuid.Generate().String()} m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ @@ -366,7 +366,7 @@ func TestBlobUploadChunked(t *testing.T) { func TestBlobUploadMonolithic(t *testing.T) { dgst, b1 := newRandomBlob(1024) var m testutil.RequestResponseMap - repo, _ := reference.WithName("test.example.com/uploadrepo") + repo, _ := reference.ParseNamed("test.example.com/uploadrepo") uploadID := uuid.Generate().String() m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ @@ -474,9 +474,9 @@ func TestBlobUploadMonolithic(t *testing.T) { func TestBlobMount(t *testing.T) { dgst, content := newRandomBlob(1024) var m testutil.RequestResponseMap - repo, _ := reference.WithName("test.example.com/uploadrepo") + repo, _ := reference.ParseNamed("test.example.com/uploadrepo") - sourceRepo, _ := reference.WithName("test.example.com/sourcerepo") + sourceRepo, _ := reference.ParseNamed("test.example.com/sourcerepo") canonicalRef, _ := reference.WithDigest(sourceRepo, dgst) m = append(m, testutil.RequestResponseMapping{ @@ -678,7 +678,7 @@ func checkEqualManifest(m1, m2 *schema1.SignedManifest) error { func TestV1ManifestFetch(t *testing.T) { ctx := context.Background() - repo, _ := reference.WithName("test.example.com/repo") + repo, _ := reference.ParseNamed("test.example.com/repo") m1, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap _, pl, err := m1.Payload() @@ -755,7 +755,7 @@ func TestV1ManifestFetch(t *testing.T) { } func TestManifestFetchWithEtag(t *testing.T) { - repo, _ := reference.WithName("test.example.com/repo/by/tag") + repo, _ := reference.ParseNamed("test.example.com/repo/by/tag") _, d1, p1 := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap addTestManifestWithEtag(repo, "latest", p1, &m, d1.String()) @@ -785,7 +785,7 @@ func TestManifestFetchWithEtag(t *testing.T) { } func TestManifestDelete(t *testing.T) { - repo, _ := reference.WithName("test.example.com/repo/delete") + repo, _ := reference.ParseNamed("test.example.com/repo/delete") _, dgst1, _ := newRandomSchemaV1Manifest(repo, "latest", 6) _, dgst2, _ := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap @@ -825,7 +825,7 @@ func TestManifestDelete(t *testing.T) { } func TestManifestPut(t *testing.T) { - repo, _ := reference.WithName("test.example.com/repo/delete") + repo, _ := reference.ParseNamed("test.example.com/repo/delete") m1, dgst, _ := newRandomSchemaV1Manifest(repo, "other", 6) _, payload, err := m1.Payload() @@ -890,7 +890,7 @@ func TestManifestPut(t *testing.T) { } func TestManifestTags(t *testing.T) { - repo, _ := reference.WithName("test.example.com/repo/tags/list") + repo, _ := reference.ParseNamed("test.example.com/repo/tags/list") tagsList := []byte(strings.TrimSpace(` { "name": "test.example.com/repo/tags/list", @@ -952,7 +952,7 @@ func TestManifestTags(t *testing.T) { } func TestObtainsErrorForMissingTag(t *testing.T) { - repo, _ := reference.WithName("test.example.com/repo") + repo, _ := reference.ParseNamed("test.example.com/repo") var m testutil.RequestResponseMap var errors errcode.Errors @@ -998,7 +998,7 @@ func TestManifestTagsPaginated(t *testing.T) { s := httptest.NewServer(http.NotFoundHandler()) defer s.Close() - repo, _ := reference.WithName("test.example.com/repo/tags/list") + repo, _ := reference.ParseNamed("test.example.com/repo/tags/list") tagsList := []string{"tag1", "tag2", "funtag"} var m testutil.RequestResponseMap for i := 0; i < 3; i++ { @@ -1067,7 +1067,7 @@ func TestManifestTagsPaginated(t *testing.T) { } func TestManifestUnauthorized(t *testing.T) { - repo, _ := reference.WithName("test.example.com/repo") + repo, _ := reference.ParseNamed("test.example.com/repo") _, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap diff --git a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go index e5ff09d75..e1b17a03a 100644 --- a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go +++ b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go @@ -181,7 +181,6 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) } - req.Header.Add("Accept-Encoding", "identity") resp, err := hrs.client.Do(req) if err != nil { return nil, err diff --git a/vendor/github.com/docker/distribution/registry/handlers/api_test.go b/vendor/github.com/docker/distribution/registry/handlers/api_test.go index c8e756de6..9d64fbbf4 100644 --- a/vendor/github.com/docker/distribution/registry/handlers/api_test.go +++ b/vendor/github.com/docker/distribution/registry/handlers/api_test.go @@ -3,7 +3,6 @@ package handlers import ( "bytes" "encoding/json" - "errors" "fmt" "io" "io/ioutil" @@ -22,6 +21,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/configuration" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema1" @@ -29,24 +29,16 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/factory" _ "github.com/docker/distribution/registry/storage/driver/testdriver" "github.com/docker/distribution/testutil" "github.com/docker/libtrust" "github.com/gorilla/handlers" - "github.com/opencontainers/go-digest" ) var headerConfig = http.Header{ "X-Content-Type-Options": []string{"nosniff"}, } -const ( - // digestSha256EmptyTar is the canonical sha256 digest of empty data - digestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" -) - // TestCheckAPI hits the base endpoint (/v2/) ensures we return the specified // 200 OK response. func TestCheckAPI(t *testing.T) { @@ -280,7 +272,7 @@ func makeBlobArgs(t *testing.T) blobArgs { layerFile: layerFile, layerDigest: layerDigest, } - args.imageName, _ = reference.WithName("foo/bar") + args.imageName, _ = reference.ParseNamed("foo/bar") return args } @@ -522,7 +514,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { // Now, push just a chunk layerFile.Seek(0, 0) - canonicalDigester := digest.Canonical.Digester() + canonicalDigester := digest.Canonical.New() if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { t.Fatalf("error copying to digest: %v", err) } @@ -560,7 +552,10 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { }) // Verify the body - verifier := layerDigest.Verifier() + verifier, err := digest.NewDigestVerifier(layerDigest) + if err != nil { + t.Fatalf("unexpected error getting digest verifier: %s", err) + } io.Copy(verifier, resp.Body) if !verifier.Verified() { @@ -680,7 +675,7 @@ func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) { pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) layerFile.Seek(0, os.SEEK_SET) - canonicalDigester := digest.Canonical.Digester() + canonicalDigester := digest.Canonical.New() if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { t.Fatalf("error copying to digest: %v", err) } @@ -705,7 +700,7 @@ func TestDeleteDisabled(t *testing.T) { env := newTestEnv(t, false) defer env.Shutdown() - imageName, _ := reference.WithName("foo/bar") + imageName, _ := reference.ParseNamed("foo/bar") // "build" our layer file layerFile, layerDigest, err := testutil.CreateRandomTarFile() if err != nil { @@ -732,7 +727,7 @@ func TestDeleteReadOnly(t *testing.T) { env := newTestEnv(t, true) defer env.Shutdown() - imageName, _ := reference.WithName("foo/bar") + imageName, _ := reference.ParseNamed("foo/bar") // "build" our layer file layerFile, layerDigest, err := testutil.CreateRandomTarFile() if err != nil { @@ -762,7 +757,7 @@ func TestStartPushReadOnly(t *testing.T) { defer env.Shutdown() env.app.readOnly = true - imageName, _ := reference.WithName("foo/bar") + imageName, _ := reference.ParseNamed("foo/bar") layerUploadURL, err := env.builder.BuildBlobUploadURL(imageName) if err != nil { @@ -800,8 +795,8 @@ type manifestArgs struct { } func TestManifestAPI(t *testing.T) { - schema1Repo, _ := reference.WithName("foo/schema1") - schema2Repo, _ := reference.WithName("foo/schema2") + schema1Repo, _ := reference.ParseNamed("foo/schema1") + schema2Repo, _ := reference.ParseNamed("foo/schema2") deleteEnabled := false env1 := newTestEnv(t, deleteEnabled) @@ -818,96 +813,9 @@ func TestManifestAPI(t *testing.T) { testManifestAPIManifestList(t, env2, schema2Args) } -// storageManifestErrDriverFactory implements the factory.StorageDriverFactory interface. -type storageManifestErrDriverFactory struct{} - -const ( - repositoryWithManifestNotFound = "manifesttagnotfound" - repositoryWithManifestInvalidPath = "manifestinvalidpath" - repositoryWithManifestBadLink = "manifestbadlink" - repositoryWithGenericStorageError = "genericstorageerr" -) - -func (factory *storageManifestErrDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - // Initialize the mock driver - var errGenericStorage = errors.New("generic storage error") - return &mockErrorDriver{ - returnErrs: []mockErrorMapping{ - { - pathMatch: fmt.Sprintf("%s/_manifests/tags", repositoryWithManifestNotFound), - content: nil, - err: storagedriver.PathNotFoundError{}, - }, - { - pathMatch: fmt.Sprintf("%s/_manifests/tags", repositoryWithManifestInvalidPath), - content: nil, - err: storagedriver.InvalidPathError{}, - }, - { - pathMatch: fmt.Sprintf("%s/_manifests/tags", repositoryWithManifestBadLink), - content: []byte("this is a bad sha"), - err: nil, - }, - { - pathMatch: fmt.Sprintf("%s/_manifests/tags", repositoryWithGenericStorageError), - content: nil, - err: errGenericStorage, - }, - }, - }, nil -} - -type mockErrorMapping struct { - pathMatch string - content []byte - err error -} - -// mockErrorDriver implements StorageDriver to force storage error on manifest request -type mockErrorDriver struct { - storagedriver.StorageDriver - returnErrs []mockErrorMapping -} - -func (dr *mockErrorDriver) GetContent(ctx context.Context, path string) ([]byte, error) { - for _, returns := range dr.returnErrs { - if strings.Contains(path, returns.pathMatch) { - return returns.content, returns.err - } - } - return nil, errors.New("Unknown storage error") -} - -func TestGetManifestWithStorageError(t *testing.T) { - factory.Register("storagemanifesterror", &storageManifestErrDriverFactory{}) - config := configuration.Configuration{ - Storage: configuration.Storage{ - "storagemanifesterror": configuration.Parameters{}, - "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ - "enabled": false, - }}, - }, - } - config.HTTP.Headers = headerConfig - env1 := newTestEnvWithConfig(t, &config) - defer env1.Shutdown() - - repo, _ := reference.WithName(repositoryWithManifestNotFound) - testManifestWithStorageError(t, env1, repo, http.StatusNotFound, v2.ErrorCodeManifestUnknown) - - repo, _ = reference.WithName(repositoryWithGenericStorageError) - testManifestWithStorageError(t, env1, repo, http.StatusInternalServerError, errcode.ErrorCodeUnknown) - - repo, _ = reference.WithName(repositoryWithManifestInvalidPath) - testManifestWithStorageError(t, env1, repo, http.StatusInternalServerError, errcode.ErrorCodeUnknown) - - repo, _ = reference.WithName(repositoryWithManifestBadLink) - testManifestWithStorageError(t, env1, repo, http.StatusInternalServerError, errcode.ErrorCodeUnknown) -} - func TestManifestDelete(t *testing.T) { - schema1Repo, _ := reference.WithName("foo/schema1") - schema2Repo, _ := reference.WithName("foo/schema2") + schema1Repo, _ := reference.ParseNamed("foo/schema1") + schema2Repo, _ := reference.ParseNamed("foo/schema2") deleteEnabled := true env := newTestEnv(t, deleteEnabled) @@ -919,7 +827,7 @@ func TestManifestDelete(t *testing.T) { } func TestManifestDeleteDisabled(t *testing.T) { - schema1Repo, _ := reference.WithName("foo/schema1") + schema1Repo, _ := reference.ParseNamed("foo/schema1") deleteEnabled := false env := newTestEnv(t, deleteEnabled) defer env.Shutdown() @@ -927,7 +835,7 @@ func TestManifestDeleteDisabled(t *testing.T) { } func testManifestDeleteDisabled(t *testing.T, env *testEnv, imageName reference.Named) { - ref, _ := reference.WithDigest(imageName, digestSha256EmptyTar) + ref, _ := reference.WithDigest(imageName, digest.DigestSha256EmptyTar) manifestURL, err := env.builder.BuildManifestURL(ref) if err != nil { t.Fatalf("unexpected error getting manifest url: %v", err) @@ -942,26 +850,6 @@ func testManifestDeleteDisabled(t *testing.T, env *testEnv, imageName reference. checkResponse(t, "status of disabled delete of manifest", resp, http.StatusMethodNotAllowed) } -func testManifestWithStorageError(t *testing.T, env *testEnv, imageName reference.Named, expectedStatusCode int, expectedErrorCode errcode.ErrorCode) { - tag := "latest" - tagRef, _ := reference.WithTag(imageName, tag) - manifestURL, err := env.builder.BuildManifestURL(tagRef) - if err != nil { - t.Fatalf("unexpected error getting manifest url: %v", err) - } - - // ----------------------------- - // Attempt to fetch the manifest - resp, err := http.Get(manifestURL) - if err != nil { - t.Fatalf("unexpected error getting manifest: %v", err) - } - defer resp.Body.Close() - checkResponse(t, "getting non-existent manifest", resp, expectedStatusCode) - checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, expectedErrorCode) - return -} - func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Named) manifestArgs { tag := "thetag" args := manifestArgs{imageName: imageName} @@ -1330,7 +1218,7 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Name Config: distribution.Descriptor{ Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", Size: 3253, - MediaType: schema2.MediaTypeImageConfig, + MediaType: schema2.MediaTypeConfig, }, Layers: []distribution.Descriptor{ { @@ -2187,7 +2075,7 @@ func doPushLayer(t *testing.T, ub *v2.URLBuilder, name reference.Named, dgst dig // pushLayer pushes the layer content returning the url on success. func pushLayer(t *testing.T, ub *v2.URLBuilder, name reference.Named, dgst digest.Digest, uploadURLBase string, body io.Reader) string { - digester := digest.Canonical.Digester() + digester := digest.Canonical.New() resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, digester.Hash())) if err != nil { @@ -2254,7 +2142,7 @@ func doPushChunk(t *testing.T, uploadURLBase string, body io.Reader) (*http.Resp uploadURL := u.String() - digester := digest.Canonical.Digester() + digester := digest.Canonical.New() req, err := http.NewRequest("PATCH", uploadURL, io.TeeReader(body, digester.Hash())) if err != nil { @@ -2403,7 +2291,7 @@ func checkErr(t *testing.T, err error, msg string) { } func createRepository(env *testEnv, t *testing.T, imageName string, tag string) digest.Digest { - imageNameRef, err := reference.WithName(imageName) + imageNameRef, err := reference.ParseNamed(imageName) if err != nil { t.Fatalf("unable to parse reference: %v", err) } @@ -2474,7 +2362,7 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) { env := newTestEnvMirror(t, deleteEnabled) defer env.Shutdown() - imageName, _ := reference.WithName("foo/bar") + imageName, _ := reference.ParseNamed("foo/bar") tag := "latest" tagRef, _ := reference.WithTag(imageName, tag) manifestURL, err := env.builder.BuildManifestURL(tagRef) @@ -2520,7 +2408,7 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) { checkResponse(t, fmt.Sprintf("starting layer push to cache %v", imageName), resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) // Blob Delete - ref, _ := reference.WithDigest(imageName, digestSha256EmptyTar) + ref, _ := reference.WithDigest(imageName, digest.DigestSha256EmptyTar) blobURL, err := env.builder.BuildBlobURL(ref) resp, err = httpDelete(blobURL) checkResponse(t, "deleting blob from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) @@ -2567,7 +2455,7 @@ func TestProxyManifestGetByTag(t *testing.T) { } truthConfig.HTTP.Headers = headerConfig - imageName, _ := reference.WithName("foo/bar") + imageName, _ := reference.ParseNamed("foo/bar") tag := "latest" truthEnv := newTestEnvWithConfig(t, &truthConfig) diff --git a/vendor/github.com/docker/distribution/registry/handlers/app.go b/vendor/github.com/docker/distribution/registry/handlers/app.go index 5b4c9f37d..4df15ae6e 100644 --- a/vendor/github.com/docker/distribution/registry/handlers/app.go +++ b/vendor/github.com/docker/distribution/registry/handlers/app.go @@ -100,7 +100,7 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App { app.register(v2.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler { return http.HandlerFunc(apiBase) }) - app.register(v2.RouteNameManifest, manifestDispatcher) + app.register(v2.RouteNameManifest, imageManifestDispatcher) app.register(v2.RouteNameCatalog, catalogDispatcher) app.register(v2.RouteNameTags, tagsDispatcher) app.register(v2.RouteNameBlob, blobDispatcher) @@ -213,10 +213,6 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App { options = append(options, storage.EnableRedirect) } - if !config.Validation.Enabled { - config.Validation.Enabled = !config.Validation.Disabled - } - // configure validation if config.Validation.Enabled { if len(config.Validation.Manifests.URLs.Allow) == 0 && len(config.Validation.Manifests.URLs.Deny) == 0 { @@ -345,7 +341,7 @@ func (app *App) RegisterHealthChecks(healthRegistries ...*health.Registry) { } storageDriverCheck := func() error { - _, err := app.driver.Stat(app, "/") // "/" should always exist + _, err := app.driver.List(app, "/") // "/" should always exist return err // any error will be treated as failure } @@ -465,8 +461,6 @@ func (app *App) configureEvents(configuration *configuration.Configuration) { } } -type redisStartAtKey struct{} - func (app *App) configureRedis(configuration *configuration.Configuration) { if configuration.Redis.Addr == "" { ctxu.GetLogger(app).Infof("redis not configured") @@ -476,11 +470,11 @@ func (app *App) configureRedis(configuration *configuration.Configuration) { pool := &redis.Pool{ Dial: func() (redis.Conn, error) { // TODO(stevvooe): Yet another use case for contextual timing. - ctx := context.WithValue(app, redisStartAtKey{}, time.Now()) + ctx := context.WithValue(app, "redis.connect.startedat", time.Now()) done := func(err error) { logger := ctxu.GetLoggerWithField(ctx, "redis.connect.duration", - ctxu.Since(ctx, redisStartAtKey{})) + ctxu.Since(ctx, "redis.connect.startedat")) if err != nil { logger.Errorf("redis: error connecting: %v", err) } else { @@ -596,19 +590,24 @@ func (app *App) configureSecret(configuration *configuration.Configuration) { func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() // ensure that request body is always closed. - // Prepare the context with our own little decorations. - ctx := r.Context() - ctx = ctxu.WithRequest(ctx, r) - ctx, w = ctxu.WithResponseWriter(ctx, w) - ctx = ctxu.WithLogger(ctx, ctxu.GetRequestLogger(ctx)) - r = r.WithContext(ctx) + // Instantiate an http context here so we can track the error codes + // returned by the request router. + ctx := defaultContextManager.context(app, w, r) defer func() { status, ok := ctx.Value("http.response.status").(int) if ok && status >= 200 && status <= 399 { - ctxu.GetResponseLogger(r.Context()).Infof("response completed") + ctxu.GetResponseLogger(ctx).Infof("response completed") } }() + defer defaultContextManager.release(ctx) + + // NOTE(stevvooe): Total hack to get instrumented responsewriter from context. + var err error + w, err = ctxu.GetResponseWriter(ctx) + if err != nil { + ctxu.GetLogger(ctx).Warnf("response writer not found in context") + } // Set a header with the Docker Distribution API Version for all responses. w.Header().Add("Docker-Distribution-API-Version", "registry/2.0") @@ -644,11 +643,8 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { // Add username to request logging context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, auth.UserNameKey)) - // sync up context on the request. - r = r.WithContext(context) - if app.nameRequired(r) { - nameRef, err := reference.WithName(getName(context)) + nameRef, err := reference.ParseNamed(getName(context)) if err != nil { ctxu.GetLogger(context).Errorf("error parsing reference from context: %v", err) context.Errors = append(context.Errors, distribution.ErrRepositoryNameInvalid{ @@ -711,18 +707,6 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { }) } -type errCodeKey struct{} - -func (errCodeKey) String() string { return "err.code" } - -type errMessageKey struct{} - -func (errMessageKey) String() string { return "err.message" } - -type errDetailKey struct{} - -func (errDetailKey) String() string { return "err.detail" } - func (app *App) logError(context context.Context, errors errcode.Errors) { for _, e1 := range errors { var c ctxu.Context @@ -730,23 +714,23 @@ func (app *App) logError(context context.Context, errors errcode.Errors) { switch e1.(type) { case errcode.Error: e, _ := e1.(errcode.Error) - c = ctxu.WithValue(context, errCodeKey{}, e.Code) - c = ctxu.WithValue(c, errMessageKey{}, e.Code.Message()) - c = ctxu.WithValue(c, errDetailKey{}, e.Detail) + c = ctxu.WithValue(context, "err.code", e.Code) + c = ctxu.WithValue(c, "err.message", e.Code.Message()) + c = ctxu.WithValue(c, "err.detail", e.Detail) case errcode.ErrorCode: e, _ := e1.(errcode.ErrorCode) - c = ctxu.WithValue(context, errCodeKey{}, e) - c = ctxu.WithValue(c, errMessageKey{}, e.Message()) + c = ctxu.WithValue(context, "err.code", e) + c = ctxu.WithValue(c, "err.message", e.Message()) default: // just normal go 'error' - c = ctxu.WithValue(context, errCodeKey{}, errcode.ErrorCodeUnknown) - c = ctxu.WithValue(c, errMessageKey{}, e1.Error()) + c = ctxu.WithValue(context, "err.code", errcode.ErrorCodeUnknown) + c = ctxu.WithValue(c, "err.message", e1.Error()) } c = ctxu.WithLogger(c, ctxu.GetLogger(c, - errCodeKey{}, - errMessageKey{}, - errDetailKey{})) + "err.code", + "err.message", + "err.detail")) ctxu.GetResponseLogger(c).Errorf("response completed with error") } } @@ -754,7 +738,7 @@ func (app *App) logError(context context.Context, errors errcode.Errors) { // context constructs the context object for the application. This only be // called once per request. func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { - ctx := r.Context() + ctx := defaultContextManager.context(app, w, r) ctx = ctxu.WithVars(ctx, r) ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, "vars.name", @@ -859,11 +843,8 @@ func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listene // nameRequired returns true if the route requires a name. func (app *App) nameRequired(r *http.Request) bool { route := mux.CurrentRoute(r) - if route == nil { - return true - } routeName := route.GetName() - return routeName != v2.RouteNameBase && routeName != v2.RouteNameCatalog + return route == nil || (routeName != v2.RouteNameBase && routeName != v2.RouteNameCatalog) } // apiBase implements a simple yes-man for doing overall checks against the @@ -902,10 +883,12 @@ func appendAccessRecords(records []auth.Access, method string, repo string) []au Action: "push", }) case "DELETE": + // DELETE access requires full admin rights, which is represented + // as "*". This may not be ideal. records = append(records, auth.Access{ Resource: resource, - Action: "delete", + Action: "*", }) } return records diff --git a/vendor/github.com/docker/distribution/registry/handlers/app_test.go b/vendor/github.com/docker/distribution/registry/handlers/app_test.go index 12c0b61c1..385fa4c6b 100644 --- a/vendor/github.com/docker/distribution/registry/handlers/app_test.go +++ b/vendor/github.com/docker/distribution/registry/handlers/app_test.go @@ -229,9 +229,9 @@ func TestAppendAccessRecords(t *testing.T) { Resource: expectedResource, Action: "push", } - expectedDeleteRecord := auth.Access{ + expectedAllRecord := auth.Access{ Resource: expectedResource, - Action: "delete", + Action: "*", } records := []auth.Access{} @@ -271,7 +271,7 @@ func TestAppendAccessRecords(t *testing.T) { records = []auth.Access{} result = appendAccessRecords(records, "DELETE", repo) - expectedResult = []auth.Access{expectedDeleteRecord} + expectedResult = []auth.Access{expectedAllRecord} if ok := reflect.DeepEqual(result, expectedResult); !ok { t.Fatalf("Actual access record differs from expected") } diff --git a/vendor/github.com/docker/distribution/registry/handlers/blob.go b/vendor/github.com/docker/distribution/registry/handlers/blob.go index 5c31cc767..fb250acd2 100644 --- a/vendor/github.com/docker/distribution/registry/handlers/blob.go +++ b/vendor/github.com/docker/distribution/registry/handlers/blob.go @@ -5,10 +5,10 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/gorilla/handlers" - "github.com/opencontainers/go-digest" ) // blobDispatcher uses the request context to build a blobHandler. diff --git a/vendor/github.com/docker/distribution/registry/handlers/blobupload.go b/vendor/github.com/docker/distribution/registry/handlers/blobupload.go index 963fe4e7d..3afb47398 100644 --- a/vendor/github.com/docker/distribution/registry/handlers/blobupload.go +++ b/vendor/github.com/docker/distribution/registry/handlers/blobupload.go @@ -7,12 +7,12 @@ import ( "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/storage" "github.com/gorilla/handlers" - "github.com/opencontainers/go-digest" ) // blobUploadDispatcher constructs and returns the blob upload handler for the @@ -211,7 +211,7 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht return } - dgst, err := digest.Parse(dgstStr) + dgst, err := digest.ParseDigest(dgstStr) if err != nil { // no digest? return error, but allow retry. buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest parsing failed")) @@ -329,12 +329,12 @@ func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http. // successful, the blob is linked into the blob store and 201 Created is // returned with the canonical url of the blob. func (buh *blobUploadHandler) createBlobMountOption(fromRepo, mountDigest string) (distribution.BlobCreateOption, error) { - dgst, err := digest.Parse(mountDigest) + dgst, err := digest.ParseDigest(mountDigest) if err != nil { return nil, err } - ref, err := reference.WithName(fromRepo) + ref, err := reference.ParseNamed(fromRepo) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/distribution/registry/handlers/context.go b/vendor/github.com/docker/distribution/registry/handlers/context.go index 6c1be5b31..552db2df6 100644 --- a/vendor/github.com/docker/distribution/registry/handlers/context.go +++ b/vendor/github.com/docker/distribution/registry/handlers/context.go @@ -3,13 +3,14 @@ package handlers import ( "fmt" "net/http" + "sync" "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/auth" - "github.com/opencontainers/go-digest" "golang.org/x/net/context" ) @@ -61,7 +62,7 @@ func getDigest(ctx context.Context) (dgst digest.Digest, err error) { return "", errDigestNotAvailable } - d, err := digest.Parse(dgstStr) + d, err := digest.ParseDigest(dgstStr) if err != nil { ctxu.GetLogger(ctx).Errorf("error parsing digest=%q: %v", dgstStr, err) return "", err @@ -90,3 +91,62 @@ func getUserName(ctx context.Context, r *http.Request) string { return username } + +// contextManager allows us to associate net/context.Context instances with a +// request, based on the memory identity of http.Request. This prepares http- +// level context, which is not application specific. If this is called, +// (*contextManager).release must be called on the context when the request is +// completed. +// +// Providing this circumvents a lot of necessity for dispatchers with the +// benefit of instantiating the request context much earlier. +// +// TODO(stevvooe): Consider making this facility a part of the context package. +type contextManager struct { + contexts map[*http.Request]context.Context + mu sync.Mutex +} + +// defaultContextManager is just a global instance to register request contexts. +var defaultContextManager = newContextManager() + +func newContextManager() *contextManager { + return &contextManager{ + contexts: make(map[*http.Request]context.Context), + } +} + +// context either returns a new context or looks it up in the manager. +func (cm *contextManager) context(parent context.Context, w http.ResponseWriter, r *http.Request) context.Context { + cm.mu.Lock() + defer cm.mu.Unlock() + + ctx, ok := cm.contexts[r] + if ok { + return ctx + } + + if parent == nil { + parent = ctxu.Background() + } + + ctx = ctxu.WithRequest(parent, r) + ctx, w = ctxu.WithResponseWriter(ctx, w) + ctx = ctxu.WithLogger(ctx, ctxu.GetRequestLogger(ctx)) + cm.contexts[r] = ctx + + return ctx +} + +// releases frees any associated with resources from request. +func (cm *contextManager) release(ctx context.Context) { + cm.mu.Lock() + defer cm.mu.Unlock() + + r, err := ctxu.GetRequest(ctx) + if err != nil { + ctxu.GetLogger(ctx).Errorf("no request found in context during release") + return + } + delete(cm.contexts, r) +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/manifests.go b/vendor/github.com/docker/distribution/registry/handlers/images.go similarity index 72% rename from vendor/github.com/docker/distribution/registry/handlers/manifests.go rename to vendor/github.com/docker/distribution/registry/handlers/images.go index f406903fa..df7f869be 100644 --- a/vendor/github.com/docker/distribution/registry/handlers/manifests.go +++ b/vendor/github.com/docker/distribution/registry/handlers/images.go @@ -8,15 +8,14 @@ import ( "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/auth" "github.com/gorilla/handlers" - "github.com/opencontainers/go-digest" ) // These constants determine which architecture and OS to choose from a @@ -26,36 +25,36 @@ const ( defaultOS = "linux" ) -// manifestDispatcher takes the request context and builds the -// appropriate handler for handling manifest requests. -func manifestDispatcher(ctx *Context, r *http.Request) http.Handler { - manifestHandler := &manifestHandler{ +// imageManifestDispatcher takes the request context and builds the +// appropriate handler for handling image manifest requests. +func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler { + imageManifestHandler := &imageManifestHandler{ Context: ctx, } reference := getReference(ctx) - dgst, err := digest.Parse(reference) + dgst, err := digest.ParseDigest(reference) if err != nil { // We just have a tag - manifestHandler.Tag = reference + imageManifestHandler.Tag = reference } else { - manifestHandler.Digest = dgst + imageManifestHandler.Digest = dgst } mhandler := handlers.MethodHandler{ - "GET": http.HandlerFunc(manifestHandler.GetManifest), - "HEAD": http.HandlerFunc(manifestHandler.GetManifest), + "GET": http.HandlerFunc(imageManifestHandler.GetImageManifest), + "HEAD": http.HandlerFunc(imageManifestHandler.GetImageManifest), } if !ctx.readOnly { - mhandler["PUT"] = http.HandlerFunc(manifestHandler.PutManifest) - mhandler["DELETE"] = http.HandlerFunc(manifestHandler.DeleteManifest) + mhandler["PUT"] = http.HandlerFunc(imageManifestHandler.PutImageManifest) + mhandler["DELETE"] = http.HandlerFunc(imageManifestHandler.DeleteImageManifest) } return mhandler } -// manifestHandler handles http operations on image manifests. -type manifestHandler struct { +// imageManifestHandler handles http operations on image manifests. +type imageManifestHandler struct { *Context // One of tag or digest gets set, depending on what is present in context. @@ -63,8 +62,8 @@ type manifestHandler struct { Digest digest.Digest } -// GetManifest fetches the image manifest from the storage backend, if it exists. -func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) { +// GetImageManifest fetches the image manifest from the storage backend, if it exists. +func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(imh).Debug("GetImageManifest") manifests, err := imh.Repository.Manifests(imh) if err != nil { @@ -77,11 +76,7 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) tags := imh.Repository.Tags(imh) desc, err := tags.Get(imh, imh.Tag) if err != nil { - if _, ok := err.(distribution.ErrTagUnknown); ok { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) - } else { - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) return } imh.Digest = desc.Digest @@ -98,11 +93,7 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) } manifest, err = manifests.Get(imh, imh.Digest, options...) if err != nil { - if _, ok := err.(distribution.ErrManifestUnknownRevision); ok { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) - } else { - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) return } @@ -169,11 +160,7 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) manifest, err = manifests.Get(imh, manifestDigest) if err != nil { - if _, ok := err.(distribution.ErrManifestUnknownRevision); ok { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) - } else { - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) return } @@ -183,8 +170,6 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) if err != nil { return } - } else { - imh.Digest = manifestDigest } } @@ -200,16 +185,12 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) w.Write(p) } -func (imh *manifestHandler) convertSchema2Manifest(schema2Manifest *schema2.DeserializedManifest) (distribution.Manifest, error) { +func (imh *imageManifestHandler) convertSchema2Manifest(schema2Manifest *schema2.DeserializedManifest) (distribution.Manifest, error) { targetDescriptor := schema2Manifest.Target() blobs := imh.Repository.Blobs(imh) configJSON, err := blobs.Get(imh, targetDescriptor.Digest) if err != nil { - if err == distribution.ErrBlobUnknown { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) - } else { - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) return nil, err } @@ -224,7 +205,7 @@ func (imh *manifestHandler) convertSchema2Manifest(schema2Manifest *schema2.Dese } builder := schema1.NewConfigManifestBuilder(imh.Repository.Blobs(imh), imh.Context.App.trustKey, ref, configJSON) - for _, d := range schema2Manifest.Layers { + for _, d := range schema2Manifest.References() { if err := builder.AppendReference(d); err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) return nil, err @@ -249,8 +230,8 @@ func etagMatch(r *http.Request, etag string) bool { return false } -// PutManifest validates and stores a manifest in the registry. -func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) { +// PutImageManifest validates and stores an image in the registry. +func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(imh).Debug("PutImageManifest") manifests, err := imh.Repository.Manifests(imh) if err != nil { @@ -288,12 +269,6 @@ func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) if imh.Tag != "" { options = append(options, distribution.WithTag(imh.Tag)) } - - if err := imh.applyResourcePolicy(manifest); err != nil { - imh.Errors = append(imh.Errors, err) - return - } - _, err = manifests.Put(imh, manifest, options...) if err != nil { // TODO(stevvooe): These error handling switches really need to be @@ -364,75 +339,8 @@ func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) w.WriteHeader(http.StatusCreated) } -// applyResourcePolicy checks whether the resource class matches what has -// been authorized and allowed by the policy configuration. -func (imh *manifestHandler) applyResourcePolicy(manifest distribution.Manifest) error { - allowedClasses := imh.App.Config.Policy.Repository.Classes - if len(allowedClasses) == 0 { - return nil - } - - var class string - switch m := manifest.(type) { - case *schema1.SignedManifest: - class = "image" - case *schema2.DeserializedManifest: - switch m.Config.MediaType { - case schema2.MediaTypeImageConfig: - class = "image" - case schema2.MediaTypePluginConfig: - class = "plugin" - default: - message := fmt.Sprintf("unknown manifest class for %s", m.Config.MediaType) - return errcode.ErrorCodeDenied.WithMessage(message) - } - } - - if class == "" { - return nil - } - - // Check to see if class is allowed in registry - var allowedClass bool - for _, c := range allowedClasses { - if class == c { - allowedClass = true - break - } - } - if !allowedClass { - message := fmt.Sprintf("registry does not allow %s manifest", class) - return errcode.ErrorCodeDenied.WithMessage(message) - } - - resources := auth.AuthorizedResources(imh) - n := imh.Repository.Named().Name() - - var foundResource bool - for _, r := range resources { - if r.Name == n { - if r.Class == "" { - r.Class = "image" - } - if r.Class == class { - return nil - } - foundResource = true - } - } - - // resource was found but no matching class was found - if foundResource { - message := fmt.Sprintf("repository not authorized for %s manifest", class) - return errcode.ErrorCodeDenied.WithMessage(message) - } - - return nil - -} - -// DeleteManifest removes the manifest with the given digest from the registry. -func (imh *manifestHandler) DeleteManifest(w http.ResponseWriter, r *http.Request) { +// DeleteImageManifest removes the manifest with the given digest from the registry. +func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(imh).Debug("DeleteImageManifest") manifests, err := imh.Repository.Manifests(imh) diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxyauth.go b/vendor/github.com/docker/distribution/registry/proxy/proxyauth.go index 7b405afcf..a9cc43a61 100644 --- a/vendor/github.com/docker/distribution/registry/proxy/proxyauth.go +++ b/vendor/github.com/docker/distribution/registry/proxy/proxyauth.go @@ -3,13 +3,11 @@ package proxy import ( "net/http" "net/url" - "strings" - "github.com/docker/distribution/context" "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/auth/challenge" ) +const tokenURL = "https://auth.docker.io/token" const challengeHeader = "Docker-Distribution-Api-Version" type userpass struct { @@ -35,44 +33,17 @@ func (c credentials) SetRefreshToken(u *url.URL, service, token string) { } // configureAuth stores credentials for challenge responses -func configureAuth(username, password, remoteURL string) (auth.CredentialStore, error) { - creds := map[string]userpass{} - - authURLs, err := getAuthURLs(remoteURL) - if err != nil { - return nil, err - } - - for _, url := range authURLs { - context.GetLogger(context.Background()).Infof("Discovered token authentication URL: %s", url) - creds[url] = userpass{ +func configureAuth(username, password string) (auth.CredentialStore, error) { + creds := map[string]userpass{ + tokenURL: { username: username, password: password, - } + }, } - return credentials{creds: creds}, nil } -func getAuthURLs(remoteURL string) ([]string, error) { - authURLs := []string{} - - resp, err := http.Get(remoteURL + "/v2/") - if err != nil { - return nil, err - } - defer resp.Body.Close() - - for _, c := range challenge.ResponseChallenges(resp) { - if strings.EqualFold(c.Scheme, "bearer") { - authURLs = append(authURLs, c.Parameters["realm"]) - } - } - - return authURLs, nil -} - -func ping(manager challenge.Manager, endpoint, versionHeader string) error { +func ping(manager auth.ChallengeManager, endpoint, versionHeader string) error { resp, err := http.Get(endpoint) if err != nil { return err diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore.go b/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore.go index c3f1b92f1..6cd572133 100644 --- a/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore.go +++ b/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore.go @@ -9,9 +9,9 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/proxy/scheduler" - "github.com/opencontainers/go-digest" ) // todo(richardscothern): from cache control header or config file diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go b/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go index 0bba01a6f..8e3a06920 100644 --- a/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go +++ b/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go @@ -11,13 +11,13 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/proxy/scheduler" "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver/filesystem" "github.com/docker/distribution/registry/storage/driver/inmemory" - "github.com/opencontainers/go-digest" ) var sbsMu sync.Mutex @@ -115,7 +115,7 @@ func (te *testEnv) RemoteStats() *map[string]int { // Populate remote store and record the digests func makeTestEnv(t *testing.T, name string) *testEnv { - nameRef, err := reference.WithName(name) + nameRef, err := reference.ParseNamed(name) if err != nil { t.Fatalf("unable to parse reference: %s", err) } diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore.go b/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore.go index e0a9f7d3f..f08e285db 100644 --- a/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore.go +++ b/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore.go @@ -5,9 +5,9 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/proxy/scheduler" - "github.com/opencontainers/go-digest" ) // todo(richardscothern): from cache control header or config diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go b/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go index ca0845b90..0d6b7171f 100644 --- a/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go +++ b/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go @@ -7,18 +7,17 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/auth/challenge" "github.com/docker/distribution/registry/proxy/scheduler" "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" "github.com/docker/libtrust" - "github.com/opencontainers/go-digest" ) type statsManifest struct { @@ -78,12 +77,12 @@ func (m *mockChallenger) credentialStore() auth.CredentialStore { return nil } -func (m *mockChallenger) challengeManager() challenge.Manager { +func (m *mockChallenger) challengeManager() auth.ChallengeManager { return nil } func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { - nameRef, err := reference.WithName(name) + nameRef, err := reference.ParseNamed(name) if err != nil { t.Fatalf("unable to parse reference: %s", err) } @@ -112,7 +111,7 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE stats: make(map[string]int), } - manifestDigest, err := populateRepo(ctx, t, truthRepo, name, tag) + manifestDigest, err := populateRepo(t, ctx, truthRepo, name, tag) if err != nil { t.Fatalf(err.Error()) } @@ -149,7 +148,7 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE } } -func populateRepo(ctx context.Context, t *testing.T, repository distribution.Repository, name, tag string) (digest.Digest, error) { +func populateRepo(t *testing.T, ctx context.Context, repository distribution.Repository, name, tag string) (digest.Digest, error) { m := schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxyregistry.go b/vendor/github.com/docker/distribution/registry/proxy/proxyregistry.go index d64dcbb95..c63bc619f 100644 --- a/vendor/github.com/docker/distribution/registry/proxy/proxyregistry.go +++ b/vendor/github.com/docker/distribution/registry/proxy/proxyregistry.go @@ -12,7 +12,6 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/auth/challenge" "github.com/docker/distribution/registry/client/transport" "github.com/docker/distribution/registry/proxy/scheduler" "github.com/docker/distribution/registry/storage" @@ -92,7 +91,7 @@ func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Name return nil, err } - cs, err := configureAuth(config.Username, config.Password, config.RemoteURL) + cs, err := configureAuth(config.Username, config.Password) if err != nil { return nil, err } @@ -103,7 +102,7 @@ func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Name remoteURL: *remoteURL, authChallenger: &remoteAuthChallenger{ remoteURL: *remoteURL, - cm: challenge.NewSimpleManager(), + cm: auth.NewSimpleChallengeManager(), cs: cs, }, }, nil @@ -178,14 +177,14 @@ func (pr *proxyingRegistry) BlobStatter() distribution.BlobStatter { // authChallenger encapsulates a request to the upstream to establish credential challenges type authChallenger interface { tryEstablishChallenges(context.Context) error - challengeManager() challenge.Manager + challengeManager() auth.ChallengeManager credentialStore() auth.CredentialStore } type remoteAuthChallenger struct { remoteURL url.URL sync.Mutex - cm challenge.Manager + cm auth.ChallengeManager cs auth.CredentialStore } @@ -193,7 +192,7 @@ func (r *remoteAuthChallenger) credentialStore() auth.CredentialStore { return r.cs } -func (r *remoteAuthChallenger) challengeManager() challenge.Manager { +func (r *remoteAuthChallenger) challengeManager() auth.ChallengeManager { return r.cm } diff --git a/vendor/github.com/docker/distribution/registry/registry.go b/vendor/github.com/docker/distribution/registry/registry.go index ee3d6b0bd..2adcb1e3e 100644 --- a/vendor/github.com/docker/distribution/registry/registry.go +++ b/vendor/github.com/docker/distribution/registry/registry.go @@ -12,7 +12,7 @@ import ( "rsc.io/letsencrypt" log "github.com/Sirupsen/logrus" - logstash "github.com/bshuster-repo/logrus-logstash-hook" + "github.com/Sirupsen/logrus/formatters/logstash" "github.com/bugsnag/bugsnag-go" "github.com/docker/distribution/configuration" "github.com/docker/distribution/context" diff --git a/vendor/github.com/docker/distribution/registry/storage/blob_test.go b/vendor/github.com/docker/distribution/registry/storage/blob_test.go index a263dd6cd..767526bb2 100644 --- a/vendor/github.com/docker/distribution/registry/storage/blob_test.go +++ b/vendor/github.com/docker/distribution/registry/storage/blob_test.go @@ -13,18 +13,18 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver/testdriver" "github.com/docker/distribution/testutil" - "github.com/opencontainers/go-digest" ) // TestWriteSeek tests that the current file size can be // obtained using Seek func TestWriteSeek(t *testing.T) { ctx := context.Background() - imageName, _ := reference.WithName("foo/bar") + imageName, _ := reference.ParseNamed("foo/bar") driver := testdriver.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { @@ -60,7 +60,7 @@ func TestSimpleBlobUpload(t *testing.T) { } ctx := context.Background() - imageName, _ := reference.WithName("foo/bar") + imageName, _ := reference.ParseNamed("foo/bar") driver := testdriver.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { @@ -255,7 +255,7 @@ func TestSimpleBlobUpload(t *testing.T) { // other tests. func TestSimpleBlobRead(t *testing.T) { ctx := context.Background() - imageName, _ := reference.WithName("foo/bar") + imageName, _ := reference.ParseNamed("foo/bar") driver := testdriver.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { @@ -366,8 +366,8 @@ func TestBlobMount(t *testing.T) { } ctx := context.Background() - imageName, _ := reference.WithName("foo/bar") - sourceImageName, _ := reference.WithName("foo/source") + imageName, _ := reference.ParseNamed("foo/bar") + sourceImageName, _ := reference.ParseNamed("foo/source") driver := testdriver.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { @@ -518,7 +518,7 @@ func TestBlobMount(t *testing.T) { // TestLayerUploadZeroLength uploads zero-length func TestLayerUploadZeroLength(t *testing.T) { ctx := context.Background() - imageName, _ := reference.WithName("foo/bar") + imageName, _ := reference.ParseNamed("foo/bar") driver := testdriver.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { @@ -530,7 +530,7 @@ func TestLayerUploadZeroLength(t *testing.T) { } bs := repository.Blobs(ctx) - simpleUpload(t, bs, []byte{}, digestSha256Empty) + simpleUpload(t, bs, []byte{}, digest.DigestSha256EmptyTar) } func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expectedDigest digest.Digest) { diff --git a/vendor/github.com/docker/distribution/registry/storage/blobserver.go b/vendor/github.com/docker/distribution/registry/storage/blobserver.go index 739bf3cb3..2655e0113 100644 --- a/vendor/github.com/docker/distribution/registry/storage/blobserver.go +++ b/vendor/github.com/docker/distribution/registry/storage/blobserver.go @@ -7,8 +7,8 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/storage/driver" - "github.com/opencontainers/go-digest" ) // TODO(stevvooe): This should configurable in the future. diff --git a/vendor/github.com/docker/distribution/registry/storage/blobstore.go b/vendor/github.com/docker/distribution/registry/storage/blobstore.go index 9f9071ca6..4274cc9e8 100644 --- a/vendor/github.com/docker/distribution/registry/storage/blobstore.go +++ b/vendor/github.com/docker/distribution/registry/storage/blobstore.go @@ -5,8 +5,8 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/storage/driver" - "github.com/opencontainers/go-digest" ) // blobStore implements the read side of the blob store interface over a @@ -145,7 +145,7 @@ func (bs *blobStore) readlink(ctx context.Context, path string) (digest.Digest, return "", err } - linked, err := digest.Parse(string(content)) + linked, err := digest.ParseDigest(string(content)) if err != nil { return "", err } diff --git a/vendor/github.com/docker/distribution/registry/storage/blobwriter.go b/vendor/github.com/docker/distribution/registry/storage/blobwriter.go index d51e27ad3..668a6fc9b 100644 --- a/vendor/github.com/docker/distribution/registry/storage/blobwriter.go +++ b/vendor/github.com/docker/distribution/registry/storage/blobwriter.go @@ -10,19 +10,14 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/opencontainers/go-digest" ) var ( errResumableDigestNotAvailable = errors.New("resumable digest not available") ) -const ( - // digestSha256Empty is the canonical sha256 digest of empty data - digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" -) - // blobWriter is used to control the various aspects of resumable // blob upload. type blobWriter struct { @@ -239,8 +234,12 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri // paths. We may be able to make the size-based check a stronger // guarantee, so this may be defensive. if !verified { - digester := digest.Canonical.Digester() - verifier := desc.Digest.Verifier() + digester := digest.Canonical.New() + + digestVerifier, err := digest.NewDigestVerifier(desc.Digest) + if err != nil { + return distribution.Descriptor{}, err + } // Read the file from the backend driver and validate it. fr, err := newFileReader(ctx, bw.driver, bw.path, desc.Size) @@ -251,12 +250,12 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri tr := io.TeeReader(fr, digester.Hash()) - if _, err := io.Copy(verifier, tr); err != nil { + if _, err := io.Copy(digestVerifier, tr); err != nil { return distribution.Descriptor{}, err } canonical = digester.Digest() - verified = verifier.Verified() + verified = digestVerifier.Verified() } } @@ -314,7 +313,7 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor // If no data was received, we may not actually have a file on disk. Check // the size here and write a zero-length file to blobPath if this is the // case. For the most part, this should only ever happen with zero-length - // blobs. + // tars. if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: @@ -322,8 +321,8 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor // get a hash, then the underlying file is deleted, we risk moving // a zero-length blob into a nonzero-length blob location. To // prevent this horrid thing, we employ the hack of only allowing - // to this happen for the digest of an empty blob. - if desc.Digest == digestSha256Empty { + // to this happen for the digest of an empty tar. + if desc.Digest == digest.DigestSha256EmptyTar { return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{}) } diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go index 0bbd52957..cba5addd3 100644 --- a/vendor/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go +++ b/vendor/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go @@ -6,8 +6,8 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/storage/cache" - "github.com/opencontainers/go-digest" ) // CheckBlobDescriptorCache takes a cache implementation through a common set @@ -16,12 +16,12 @@ import ( func CheckBlobDescriptorCache(t *testing.T, provider cache.BlobDescriptorCacheProvider) { ctx := context.Background() - checkBlobDescriptorCacheEmptyRepository(ctx, t, provider) - checkBlobDescriptorCacheSetAndRead(ctx, t, provider) - checkBlobDescriptorCacheClear(ctx, t, provider) + checkBlobDescriptorCacheEmptyRepository(t, ctx, provider) + checkBlobDescriptorCacheSetAndRead(t, ctx, provider) + checkBlobDescriptorCacheClear(t, ctx, provider) } -func checkBlobDescriptorCacheEmptyRepository(ctx context.Context, t *testing.T, provider cache.BlobDescriptorCacheProvider) { +func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { if _, err := provider.Stat(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); err != distribution.ErrBlobUnknown { t.Fatalf("expected unknown blob error with empty store: %v", err) } @@ -59,7 +59,7 @@ func checkBlobDescriptorCacheEmptyRepository(ctx context.Context, t *testing.T, } } -func checkBlobDescriptorCacheSetAndRead(ctx context.Context, t *testing.T, provider cache.BlobDescriptorCacheProvider) { +func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { localDigest := digest.Digest("sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111") expected := distribution.Descriptor{ Digest: "sha256:abc1111111111111111111111111111111111111111111111111111111111111", @@ -143,7 +143,7 @@ func checkBlobDescriptorCacheSetAndRead(ctx context.Context, t *testing.T, provi } } -func checkBlobDescriptorCacheClear(ctx context.Context, t *testing.T, provider cache.BlobDescriptorCacheProvider) { +func checkBlobDescriptorCacheClear(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { localDigest := digest.Digest("sha384:def111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111") expected := distribution.Descriptor{ Digest: "sha256:def1111111111111111111111111111111111111111111111111111111111111", diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go index f647616bc..94ca8a90c 100644 --- a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go +++ b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go @@ -2,7 +2,7 @@ package cache import ( "github.com/docker/distribution/context" - "github.com/opencontainers/go-digest" + "github.com/docker/distribution/digest" "github.com/docker/distribution" ) diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go index b2fcaf4e8..cf125e187 100644 --- a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go +++ b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go @@ -5,9 +5,9 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache" - "github.com/opencontainers/go-digest" ) type inMemoryBlobDescriptorCacheProvider struct { @@ -26,7 +26,7 @@ func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider } func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { - if _, err := reference.ParseNormalizedNamed(repo); err != nil { + if _, err := reference.ParseNamed(repo); err != nil { return nil, err } diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis.go b/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis.go index 5a5819ac7..cb264b098 100644 --- a/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis.go +++ b/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis.go @@ -5,10 +5,10 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache" "github.com/garyburd/redigo/redis" - "github.com/opencontainers/go-digest" ) // redisBlobStatService provides an implementation of @@ -41,7 +41,7 @@ func NewRedisBlobDescriptorCacheProvider(pool *redis.Pool) cache.BlobDescriptorC // RepositoryScoped returns the scoped cache. func (rbds *redisBlobDescriptorService) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { - if _, err := reference.ParseNormalizedNamed(repo); err != nil { + if _, err := reference.ParseNamed(repo); err != nil { return nil, err } diff --git a/vendor/github.com/docker/distribution/registry/storage/catalog_test.go b/vendor/github.com/docker/distribution/registry/storage/catalog_test.go index 90dc12cec..de96407dd 100644 --- a/vendor/github.com/docker/distribution/registry/storage/catalog_test.go +++ b/vendor/github.com/docker/distribution/registry/storage/catalog_test.go @@ -8,12 +8,12 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" - "github.com/opencontainers/go-digest" ) type setupEnv struct { @@ -44,7 +44,7 @@ func setupFS(t *testing.T) *setupEnv { } for _, repo := range repos { - makeRepo(ctx, t, repo, registry) + makeRepo(t, ctx, repo, registry) } expected := []string{ @@ -67,8 +67,8 @@ func setupFS(t *testing.T) *setupEnv { } } -func makeRepo(ctx context.Context, t *testing.T, name string, reg distribution.Namespace) { - named, err := reference.WithName(name) +func makeRepo(t *testing.T, ctx context.Context, name string, reg distribution.Namespace) { + named, err := reference.ParseNamed(name) if err != nil { t.Fatal(err) } diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/azure/azure.go b/vendor/github.com/docker/distribution/registry/storage/driver/azure/azure.go index 45d2b1e5c..b06b08764 100644 --- a/vendor/github.com/docker/distribution/registry/storage/driver/azure/azure.go +++ b/vendor/github.com/docker/distribution/registry/storage/driver/azure/azure.go @@ -86,8 +86,7 @@ func New(accountName, accountKey, container, realm string) (*Driver, error) { blobClient := api.GetBlobService() // Create registry container - containerRef := blobClient.GetContainerReference(container) - if _, err = containerRef.CreateIfNotExists(); err != nil { + if _, err = blobClient.CreateContainerIfNotExists(container, azure.ContainerAccessTypePrivate); err != nil { return nil, err } @@ -112,39 +111,24 @@ func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { return nil, err } - defer blob.Close() return ioutil.ReadAll(blob) } // PutContent stores the []byte content at a location designated by "path". func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - if limit := 64 * 1024 * 1024; len(contents) > limit { // max size for block blobs uploaded via single "Put Blob" - return fmt.Errorf("uploading %d bytes with PutContent is not supported; limit: %d bytes", len(contents), limit) + if _, err := d.client.DeleteBlobIfExists(d.container, path); err != nil { + return err } - - // Historically, blobs uploaded via PutContent used to be of type AppendBlob - // (https://github.com/docker/distribution/pull/1438). We can't replace - // these blobs atomically via a single "Put Blob" operation without - // deleting them first. Once we detect they are BlockBlob type, we can - // overwrite them with an atomically "Put Blob" operation. - // - // While we delete the blob and create a new one, there will be a small - // window of inconsistency and if the Put Blob fails, we may end up with - // losing the existing data while migrating it to BlockBlob type. However, - // expectation is the clients pushing will be retrying when they get an error - // response. - props, err := d.client.GetBlobProperties(d.container, path) - if err != nil && !is404(err) { - return fmt.Errorf("failed to get blob properties: %v", err) + writer, err := d.Writer(ctx, path, false) + if err != nil { + return err } - if err == nil && props.BlobType != azure.BlobTypeBlock { - if err := d.client.DeleteBlob(d.container, path, nil); err != nil { - return fmt.Errorf("failed to delete legacy blob (%s): %v", props.BlobType, err) - } + defer writer.Close() + _, err = writer.Write(contents) + if err != nil { + return err } - - r := bytes.NewReader(contents) - return d.client.CreateBlockBlobFromReader(d.container, path, uint64(len(contents)), r, nil) + return writer.Commit() } // Reader retrieves an io.ReadCloser for the content stored at "path" with a @@ -167,7 +151,7 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read } bytesRange := fmt.Sprintf("%v-", offset) - resp, err := d.client.GetBlobRange(d.container, path, bytesRange, nil) + resp, err := d.client.GetBlobRange(d.container, path, bytesRange) if err != nil { return nil, err } @@ -190,7 +174,7 @@ func (d *driver) Writer(ctx context.Context, path string, append bool) (storaged } size = blobProperties.ContentLength } else { - err := d.client.DeleteBlob(d.container, path, nil) + err := d.client.DeleteBlob(d.container, path) if err != nil { return nil, err } @@ -238,9 +222,7 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, if !strings.HasSuffix(virtContainerPath, "/") { virtContainerPath += "/" } - - containerRef := d.client.GetContainerReference(d.container) - blobs, err := containerRef.ListBlobs(azure.ListBlobsParameters{ + blobs, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ Prefix: virtContainerPath, MaxResults: 1, }) @@ -290,12 +272,12 @@ func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) e return err } - return d.client.DeleteBlob(d.container, sourcePath, nil) + return d.client.DeleteBlob(d.container, sourcePath) } // Delete recursively deletes all objects stored at "path" and its subpaths. func (d *driver) Delete(ctx context.Context, path string) error { - ok, err := d.client.DeleteBlobIfExists(d.container, path, nil) + ok, err := d.client.DeleteBlobIfExists(d.container, path) if err != nil { return err } @@ -310,7 +292,7 @@ func (d *driver) Delete(ctx context.Context, path string) error { } for _, b := range blobs { - if err = d.client.DeleteBlob(d.container, b, nil); err != nil { + if err = d.client.DeleteBlob(d.container, b); err != nil { return err } } @@ -377,9 +359,8 @@ func (d *driver) listBlobs(container, virtPath string) ([]string, error) { out := []string{} marker := "" - containerRef := d.client.GetContainerReference(d.container) for { - resp, err := containerRef.ListBlobs(azure.ListBlobsParameters{ + resp, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ Marker: marker, Prefix: virtPath, }) @@ -461,7 +442,7 @@ func (w *writer) Cancel() error { return fmt.Errorf("already committed") } w.cancelled = true - return w.driver.client.DeleteBlob(w.driver.container, w.path, nil) + return w.driver.client.DeleteBlob(w.driver.container, w.path) } func (w *writer) Commit() error { @@ -489,7 +470,7 @@ func (bw *blockWriter) Write(p []byte) (int, error) { if offset+chunkSize > len(p) { chunkSize = len(p) - offset } - err := bw.client.AppendBlock(bw.container, bw.path, p[offset:offset+chunkSize], nil) + err := bw.client.AppendBlock(bw.container, bw.path, p[offset:offset+chunkSize]) if err != nil { return n, err } diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/base/base.go b/vendor/github.com/docker/distribution/registry/storage/driver/base/base.go index e14f7edb1..064bda60f 100644 --- a/vendor/github.com/docker/distribution/registry/storage/driver/base/base.go +++ b/vendor/github.com/docker/distribution/registry/storage/driver/base/base.go @@ -137,7 +137,7 @@ func (base *Base) Stat(ctx context.Context, path string) (storagedriver.FileInfo ctx, done := context.WithTrace(ctx) defer done("%s.Stat(%q)", base.Name(), path) - if !storagedriver.PathRegexp.MatchString(path) && path != "/" { + if !storagedriver.PathRegexp.MatchString(path) { return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/base/regulator.go b/vendor/github.com/docker/distribution/registry/storage/driver/base/regulator.go index 1e929f836..185160a4b 100644 --- a/vendor/github.com/docker/distribution/registry/storage/driver/base/regulator.go +++ b/vendor/github.com/docker/distribution/registry/storage/driver/base/regulator.go @@ -38,7 +38,11 @@ func (r *regulator) enter() { func (r *regulator) exit() { r.L.Lock() - r.Signal() + // We only need to signal to a waiting FS operation if we're already at the + // limit of threads used + if r.available == 0 { + r.Signal() + } r.available++ r.L.Unlock() } diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/base/regulator_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/base/regulator_test.go deleted file mode 100644 index e4c0ad586..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/base/regulator_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package base - -import ( - "sync" - "testing" - "time" -) - -func TestRegulatorEnterExit(t *testing.T) { - const limit = 500 - - r := NewRegulator(nil, limit).(*regulator) - - for try := 0; try < 50; try++ { - run := make(chan struct{}) - - var firstGroupReady sync.WaitGroup - var firstGroupDone sync.WaitGroup - firstGroupReady.Add(limit) - firstGroupDone.Add(limit) - for i := 0; i < limit; i++ { - go func() { - r.enter() - firstGroupReady.Done() - <-run - r.exit() - firstGroupDone.Done() - }() - } - firstGroupReady.Wait() - - // now we exhausted all the limit, let's run a little bit more - var secondGroupReady sync.WaitGroup - var secondGroupDone sync.WaitGroup - for i := 0; i < 50; i++ { - secondGroupReady.Add(1) - secondGroupDone.Add(1) - go func() { - secondGroupReady.Done() - r.enter() - r.exit() - secondGroupDone.Done() - }() - } - secondGroupReady.Wait() - - // allow the first group to return resources - close(run) - - done := make(chan struct{}) - go func() { - secondGroupDone.Wait() - close(done) - }() - select { - case <-done: - case <-time.After(5 * time.Second): - t.Fatal("some r.enter() are still locked") - } - - firstGroupDone.Wait() - - if r.available != limit { - t.Fatalf("r.available: got %d, want %d", r.available, limit) - } - } -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/oss/oss.go b/vendor/github.com/docker/distribution/registry/storage/driver/oss/oss.go index 9797f42d0..7ae703346 100644 --- a/vendor/github.com/docker/distribution/registry/storage/driver/oss/oss.go +++ b/vendor/github.com/docker/distribution/registry/storage/driver/oss/oss.go @@ -351,8 +351,7 @@ func (d *driver) List(ctx context.Context, opath string) ([]string, error) { prefix = "/" } - ossPath := d.ossPath(path) - listResponse, err := d.Bucket.List(ossPath, "/", "", listMax) + listResponse, err := d.Bucket.List(d.ossPath(path), "/", "", listMax) if err != nil { return nil, parseError(opath, err) } @@ -370,7 +369,7 @@ func (d *driver) List(ctx context.Context, opath string) ([]string, error) { } if listResponse.IsTruncated { - listResponse, err = d.Bucket.List(ossPath, "/", listResponse.NextMarker, listMax) + listResponse, err = d.Bucket.List(d.ossPath(path), "/", listResponse.NextMarker, listMax) if err != nil { return nil, err } @@ -379,11 +378,6 @@ func (d *driver) List(ctx context.Context, opath string) ([]string, error) { } } - // This is to cover for the cases when the first key equal to ossPath. - if len(files) > 0 && files[0] == strings.Replace(ossPath, d.ossPath(""), prefix, 1) { - files = files[1:] - } - if opath != "/" { if len(files) == 0 && len(directories) == 0 { // Treat empty response as missing directory, since we don't actually @@ -395,17 +389,15 @@ func (d *driver) List(ctx context.Context, opath string) ([]string, error) { return append(files, directories...), nil } -const maxConcurrency = 10 - // Move moves an object stored at sourcePath to destPath, removing the original // object. func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { logrus.Infof("Move from %s to %s", d.ossPath(sourcePath), d.ossPath(destPath)) - err := d.Bucket.CopyLargeFileInParallel(d.ossPath(sourcePath), d.ossPath(destPath), + + err := d.Bucket.CopyLargeFile(d.ossPath(sourcePath), d.ossPath(destPath), d.getContentType(), getPermissions(), - oss.Options{}, - maxConcurrency) + oss.Options{}) if err != nil { logrus.Errorf("Failed for move from %s to %s: %v", d.ossPath(sourcePath), d.ossPath(destPath), err) return parseError(sourcePath, err) @@ -416,8 +408,7 @@ func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) e // Delete recursively deletes all objects stored at "path" and its subpaths. func (d *driver) Delete(ctx context.Context, path string) error { - ossPath := d.ossPath(path) - listResponse, err := d.Bucket.List(ossPath, "", "", listMax) + listResponse, err := d.Bucket.List(d.ossPath(path), "", "", listMax) if err != nil || len(listResponse.Contents) == 0 { return storagedriver.PathNotFoundError{Path: path} } @@ -425,25 +416,15 @@ func (d *driver) Delete(ctx context.Context, path string) error { ossObjects := make([]oss.Object, listMax) for len(listResponse.Contents) > 0 { - numOssObjects := len(listResponse.Contents) for index, key := range listResponse.Contents { - // Stop if we encounter a key that is not a subpath (so that deleting "/a" does not delete "/ab"). - if len(key.Key) > len(ossPath) && (key.Key)[len(ossPath)] != '/' { - numOssObjects = index - break - } ossObjects[index].Key = key.Key } - err := d.Bucket.DelMulti(oss.Delete{Quiet: false, Objects: ossObjects[0:numOssObjects]}) + err := d.Bucket.DelMulti(oss.Delete{Quiet: false, Objects: ossObjects[0:len(listResponse.Contents)]}) if err != nil { return nil } - if numOssObjects < len(listResponse.Contents) { - return nil - } - listResponse, err = d.Bucket.List(d.ossPath(path), "", "", listMax) if err != nil { return err diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3.go b/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3.go index 19407d807..e808f7606 100644 --- a/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3.go +++ b/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3.go @@ -76,8 +76,8 @@ const noStorageClass = "NONE" // validRegions maps known s3 region identifiers to region descriptors var validRegions = map[string]struct{}{} -// validObjectACLs contains known s3 object Acls -var validObjectACLs = map[string]struct{}{} +// validObjectAcls contains known s3 object Acls +var validObjectAcls = map[string]struct{}{} //DriverParameters A struct that encapsulates all of the driver parameters after all values have been set type DriverParameters struct { @@ -97,20 +97,16 @@ type DriverParameters struct { RootDirectory string StorageClass string UserAgent string - ObjectACL string - SessionToken string + ObjectAcl string } func init() { for _, region := range []string{ "us-east-1", - "us-east-2", "us-west-1", "us-west-2", "eu-west-1", - "eu-west-2", "eu-central-1", - "ap-south-1", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", @@ -118,12 +114,11 @@ func init() { "sa-east-1", "cn-north-1", "us-gov-west-1", - "ca-central-1", } { validRegions[region] = struct{}{} } - for _, objectACL := range []string{ + for _, objectAcl := range []string{ s3.ObjectCannedACLPrivate, s3.ObjectCannedACLPublicRead, s3.ObjectCannedACLPublicReadWrite, @@ -132,7 +127,7 @@ func init() { s3.ObjectCannedACLBucketOwnerRead, s3.ObjectCannedACLBucketOwnerFullControl, } { - validObjectACLs[objectACL] = struct{}{} + validObjectAcls[objectAcl] = struct{}{} } // Register this as the default s3 driver in addition to s3aws @@ -158,7 +153,7 @@ type driver struct { MultipartCopyThresholdSize int64 RootDirectory string StorageClass string - ObjectACL string + ObjectAcl string } type baseEmbed struct { @@ -318,22 +313,20 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { userAgent = "" } - objectACL := s3.ObjectCannedACLPrivate - objectACLParam := parameters["objectacl"] - if objectACLParam != nil { - objectACLString, ok := objectACLParam.(string) + objectAcl := s3.ObjectCannedACLPrivate + objectAclParam := parameters["objectacl"] + if objectAclParam != nil { + objectAclString, ok := objectAclParam.(string) if !ok { - return nil, fmt.Errorf("Invalid value for objectacl parameter: %v", objectACLParam) + return nil, fmt.Errorf("Invalid value for objectacl parameter: %v", objectAclParam) } - if _, ok = validObjectACLs[objectACLString]; !ok { - return nil, fmt.Errorf("Invalid value for objectacl parameter: %v", objectACLParam) + if _, ok = validObjectAcls[objectAclString]; !ok { + return nil, fmt.Errorf("Invalid value for objectacl parameter: %v", objectAclParam) } - objectACL = objectACLString + objectAcl = objectAclString } - sessionToken := "" - params := DriverParameters{ fmt.Sprint(accessKey), fmt.Sprint(secretKey), @@ -351,8 +344,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { fmt.Sprint(rootDirectory), storageClass, fmt.Sprint(userAgent), - objectACL, - fmt.Sprint(sessionToken), + objectAcl, } return New(params) @@ -397,20 +389,29 @@ func New(params DriverParameters) (*Driver, error) { } awsConfig := aws.NewConfig() - creds := credentials.NewChainCredentials([]credentials.Provider{ - &credentials.StaticProvider{ - Value: credentials.Value{ - AccessKeyID: params.AccessKey, - SecretAccessKey: params.SecretKey, - SessionToken: params.SessionToken, + var creds *credentials.Credentials + if params.RegionEndpoint == "" { + creds = credentials.NewChainCredentials([]credentials.Provider{ + &credentials.StaticProvider{ + Value: credentials.Value{ + AccessKeyID: params.AccessKey, + SecretAccessKey: params.SecretKey, + }, }, - }, - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{}, - &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())}, - }) - - if params.RegionEndpoint != "" { + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{}, + &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())}, + }) + } else { + creds = credentials.NewChainCredentials([]credentials.Provider{ + &credentials.StaticProvider{ + Value: credentials.Value{ + AccessKeyID: params.AccessKey, + SecretAccessKey: params.SecretKey, + }, + }, + &credentials.EnvProvider{}, + }) awsConfig.WithS3ForcePathStyle(true) awsConfig.WithEndpoint(params.RegionEndpoint) } @@ -458,7 +459,7 @@ func New(params DriverParameters) (*Driver, error) { MultipartCopyThresholdSize: params.MultipartCopyThresholdSize, RootDirectory: params.RootDirectory, StorageClass: params.StorageClass, - ObjectACL: params.ObjectACL, + ObjectAcl: params.ObjectAcl, } return &Driver{ @@ -706,11 +707,15 @@ func (d *driver) copy(ctx context.Context, sourcePath string, destPath string) e return nil } + // Even in the worst case, a multipart copy should take no more + // than a few minutes, so 30 minutes is very conservative. + expires := time.Now().Add(time.Duration(30) * time.Minute) createResp, err := d.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ Bucket: aws.String(d.Bucket), Key: aws.String(d.s3Path(destPath)), ContentType: d.getContentType(), ACL: d.getACL(), + Expires: aws.Time(expires), SSEKMSKeyId: d.getSSEKMSKeyID(), ServerSideEncryption: d.getEncryptionMode(), StorageClass: d.getStorageClass(), @@ -779,12 +784,10 @@ func min(a, b int) int { // We must be careful since S3 does not guarantee read after delete consistency func (d *driver) Delete(ctx context.Context, path string) error { s3Objects := make([]*s3.ObjectIdentifier, 0, listMax) - s3Path := d.s3Path(path) listObjectsInput := &s3.ListObjectsInput{ Bucket: aws.String(d.Bucket), - Prefix: aws.String(s3Path), + Prefix: aws.String(d.s3Path(path)), } -ListLoop: for { // list all the objects resp, err := d.S3.ListObjects(listObjectsInput) @@ -797,10 +800,6 @@ ListLoop: } for _, key := range resp.Contents { - // Stop if we encounter a key that is not a subpath (so that deleting "/a" does not delete "/ab"). - if len(*key.Key) > len(s3Path) && (*key.Key)[len(s3Path)] != '/' { - break ListLoop - } s3Objects = append(s3Objects, &s3.ObjectIdentifier{ Key: key.Key, }) @@ -913,7 +912,7 @@ func (d *driver) getContentType() *string { } func (d *driver) getACL() *string { - return aws.String(d.ObjectACL) + return aws.String(d.ObjectAcl) } func (d *driver) getStorageClass() *string { diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_test.go index 363a22eb4..16c579cbb 100644 --- a/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_test.go +++ b/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_test.go @@ -33,10 +33,9 @@ func init() { secure := os.Getenv("S3_SECURE") v4Auth := os.Getenv("S3_V4_AUTH") region := os.Getenv("AWS_REGION") - objectACL := os.Getenv("S3_OBJECT_ACL") + objectAcl := os.Getenv("S3_OBJECT_ACL") root, err := ioutil.TempDir("", "driver-") regionEndpoint := os.Getenv("REGION_ENDPOINT") - sessionToken := os.Getenv("AWS_SESSION_TOKEN") if err != nil { panic(err) } @@ -84,8 +83,7 @@ func init() { rootDirectory, storageClass, driverName + "-test", - objectACL, - sessionToken, + objectAcl, } return New(parameters) diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_v2_signer.go b/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_v2_signer.go index cb8010874..6950f1bc1 100644 --- a/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_v2_signer.go +++ b/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_v2_signer.go @@ -124,8 +124,6 @@ func (v2 *signer) Sign() error { md5, ctype, date, xamz string xamzDate bool sarray []string - smap map[string]string - sharray []string ) headers := v2.Request.Header @@ -137,11 +135,7 @@ func (v2 *signer) Sign() error { host, canonicalPath := parsedURL.Host, parsedURL.Path v2.Request.Header["Host"] = []string{host} v2.Request.Header["date"] = []string{v2.Time.In(time.UTC).Format(time.RFC1123)} - if credValue.SessionToken != "" { - v2.Request.Header["x-amz-security-token"] = []string{credValue.SessionToken} - } - smap = make(map[string]string) for k, v := range headers { k = strings.ToLower(k) switch k { @@ -156,20 +150,16 @@ func (v2 *signer) Sign() error { default: if strings.HasPrefix(k, "x-amz-") { vall := strings.Join(v, ",") - smap[k] = k + ":" + vall + sarray = append(sarray, k+":"+vall) if k == "x-amz-date" { xamzDate = true date = "" } - sharray = append(sharray, k) } } } - if len(sharray) > 0 { - sort.StringSlice(sharray).Sort() - for _, h := range sharray { - sarray = append(sarray, smap[h]) - } + if len(sarray) > 0 { + sort.StringSlice(sarray).Sort() xamz = strings.Join(sarray, "\n") + "\n" } diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/s3-goamz/s3.go b/vendor/github.com/docker/distribution/registry/storage/driver/s3-goamz/s3.go index b16ca49a7..aa2d31b71 100644 --- a/vendor/github.com/docker/distribution/registry/storage/driver/s3-goamz/s3.go +++ b/vendor/github.com/docker/distribution/registry/storage/driver/s3-goamz/s3.go @@ -266,8 +266,10 @@ func New(params DriverParameters) (*Driver, error) { if params.V4Auth { s3obj.Signature = aws.V4Signature - } else if mustV4Auth(params.Region.Name) { - return nil, fmt.Errorf("The %s region only works with v4 authentication", params.Region.Name) + } else { + if params.Region.Name == "eu-central-1" { + return nil, fmt.Errorf("The eu-central-1 region only works with v4 authentication") + } } bucket := s3obj.Bucket(params.Bucket) @@ -441,14 +443,14 @@ func (d *driver) List(ctx context.Context, opath string) ([]string, error) { directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1)) } - if !listResponse.IsTruncated { + if listResponse.IsTruncated { + listResponse, err = d.Bucket.List(d.s3Path(path), "/", listResponse.NextMarker, listMax) + if err != nil { + return nil, err + } + } else { break } - - listResponse, err = d.Bucket.List(d.s3Path(path), "/", listResponse.NextMarker, listMax) - if err != nil { - return nil, err - } } if opath != "/" { @@ -477,8 +479,7 @@ func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) e // Delete recursively deletes all objects stored at "path" and its subpaths. func (d *driver) Delete(ctx context.Context, path string) error { - s3Path := d.s3Path(path) - listResponse, err := d.Bucket.List(s3Path, "", "", listMax) + listResponse, err := d.Bucket.List(d.s3Path(path), "", "", listMax) if err != nil || len(listResponse.Contents) == 0 { return storagedriver.PathNotFoundError{Path: path} } @@ -486,25 +487,15 @@ func (d *driver) Delete(ctx context.Context, path string) error { s3Objects := make([]s3.Object, listMax) for len(listResponse.Contents) > 0 { - numS3Objects := len(listResponse.Contents) for index, key := range listResponse.Contents { - // Stop if we encounter a key that is not a subpath (so that deleting "/a" does not delete "/ab"). - if len(key.Key) > len(s3Path) && (key.Key)[len(s3Path)] != '/' { - numS3Objects = index - break - } s3Objects[index].Key = key.Key } - err := d.Bucket.DelMulti(s3.Delete{Quiet: false, Objects: s3Objects[0:numS3Objects]}) + err := d.Bucket.DelMulti(s3.Delete{Quiet: false, Objects: s3Objects[0:len(listResponse.Contents)]}) if err != nil { return nil } - if numS3Objects < len(listResponse.Contents) { - return nil - } - listResponse, err = d.Bucket.List(d.s3Path(path), "", "", listMax) if err != nil { return err @@ -555,6 +546,11 @@ func parseError(path string, err error) error { return err } +func hasCode(err error, code string) bool { + s3err, ok := err.(*aws.Error) + return ok && s3err.Code == code +} + func (d *driver) getOptions() s3.Options { return s3.Options{ SSE: d.Encrypt, @@ -566,17 +562,6 @@ func getPermissions() s3.ACL { return s3.Private } -// mustV4Auth checks whether must use v4 auth in specific region. -// Please see documentation at http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html -func mustV4Auth(region string) bool { - switch region { - case "eu-central-1", "cn-north-1", "us-east-2", - "ca-central-1", "ap-south-1", "ap-northeast-2", "eu-west-2": - return true - } - return false -} - func (d *driver) getContentType() string { return "application/octet-stream" } diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/swift/swift.go b/vendor/github.com/docker/distribution/registry/storage/driver/swift/swift.go index 4b7aa4e9f..242f13102 100644 --- a/vendor/github.com/docker/distribution/registry/storage/driver/swift/swift.go +++ b/vendor/github.com/docker/distribution/registry/storage/driver/swift/swift.go @@ -108,7 +108,7 @@ func (factory *swiftDriverFactory) Create(parameters map[string]interface{}) (st } type driver struct { - Conn *swift.Connection + Conn swift.Connection Container string Prefix string BulkDeleteSupport bool @@ -177,7 +177,7 @@ func New(params Parameters) (*Driver, error) { TLSClientConfig: &tls.Config{InsecureSkipVerify: params.InsecureSkipVerify}, } - ct := &swift.Connection{ + ct := swift.Connection{ UserName: params.Username, ApiKey: params.Password, AuthUrl: params.AuthURL, @@ -888,7 +888,7 @@ func (w *writer) waitForSegmentsToShowUp() error { } type segmentWriter struct { - conn *swift.Connection + conn swift.Connection container string segmentsPath string segmentNumber int diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go b/vendor/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go index d8afe0c85..87f9c6ace 100644 --- a/vendor/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go +++ b/vendor/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go @@ -15,10 +15,9 @@ import ( "testing" "time" - "gopkg.in/check.v1" - "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" + "gopkg.in/check.v1" ) // Test hooks up gocheck into the "go test" runner. @@ -718,52 +717,6 @@ func (suite *DriverSuite) TestDeleteFolder(c *check.C) { c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) } -// TestDeleteOnlyDeletesSubpaths checks that deleting path A does not -// delete path B when A is a prefix of B but B is not a subpath of A (so that -// deleting "/a" does not delete "/ab"). This matters for services like S3 that -// do not implement directories. -func (suite *DriverSuite) TestDeleteOnlyDeletesSubpaths(c *check.C) { - dirname := randomPath(32) - filename := randomPath(32) - contents := randomContents(32) - - defer suite.deletePath(c, firstPart(dirname)) - - err := suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename), contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename+"suffix"), contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, dirname, filename), contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, dirname+"suffix", filename), contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, filename)) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename+"suffix")) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, dirname)) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, dirname, filename)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, dirname+"suffix", filename)) - c.Assert(err, check.IsNil) -} - // TestStatCall runs verifies the implementation of the storagedriver's Stat call. func (suite *DriverSuite) TestStatCall(c *check.C) { content := randomContents(4096) diff --git a/vendor/github.com/docker/distribution/registry/storage/filereader_test.go b/vendor/github.com/docker/distribution/registry/storage/filereader_test.go index e522d6056..5926020cc 100644 --- a/vendor/github.com/docker/distribution/registry/storage/filereader_test.go +++ b/vendor/github.com/docker/distribution/registry/storage/filereader_test.go @@ -8,8 +8,8 @@ import ( "testing" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/storage/driver/inmemory" - "github.com/opencontainers/go-digest" ) func TestSimpleRead(t *testing.T) { @@ -41,7 +41,11 @@ func TestSimpleRead(t *testing.T) { t.Fatalf("error allocating file reader: %v", err) } - verifier := dgst.Verifier() + verifier, err := digest.NewDigestVerifier(dgst) + if err != nil { + t.Fatalf("error getting digest verifier: %s", err) + } + io.Copy(verifier, fr) if !verifier.Verified() { diff --git a/vendor/github.com/docker/distribution/registry/storage/garbagecollect.go b/vendor/github.com/docker/distribution/registry/storage/garbagecollect.go index 392898933..bc3404169 100644 --- a/vendor/github.com/docker/distribution/registry/storage/garbagecollect.go +++ b/vendor/github.com/docker/distribution/registry/storage/garbagecollect.go @@ -5,9 +5,10 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/driver" - "github.com/opencontainers/go-digest" ) func emit(format string, a ...interface{}) { @@ -24,10 +25,12 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis // mark markSet := make(map[digest.Digest]struct{}) err := repositoryEnumerator.Enumerate(ctx, func(repoName string) error { - emit(repoName) + if dryRun { + emit(repoName) + } var err error - named, err := reference.WithName(repoName) + named, err := reference.ParseNamed(repoName) if err != nil { return fmt.Errorf("failed to parse repo name %s: %v", repoName, err) } @@ -48,7 +51,9 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error { // Mark the manifest's blob - emit("%s: marking manifest %s ", repoName, dgst) + if dryRun { + emit("%s: marking manifest %s ", repoName, dgst) + } markSet[dgst] = struct{}{} manifest, err := manifestService.Get(ctx, dgst) @@ -59,7 +64,19 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis descriptors := manifest.References() for _, descriptor := range descriptors { markSet[descriptor.Digest] = struct{}{} - emit("%s: marking blob %s", repoName, descriptor.Digest) + if dryRun { + emit("%s: marking blob %s", repoName, descriptor.Digest) + } + } + + switch manifest.(type) { + case *schema2.DeserializedManifest: + config := manifest.(*schema2.DeserializedManifest).Config + if dryRun { + emit("%s: marking configuration %s", repoName, config.Digest) + } + markSet[config.Digest] = struct{}{} + break } return nil @@ -80,7 +97,7 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis }) if err != nil { - return fmt.Errorf("failed to mark: %v", err) + return fmt.Errorf("failed to mark: %v\n", err) } // sweep @@ -96,17 +113,19 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis if err != nil { return fmt.Errorf("error enumerating blobs: %v", err) } - emit("\n%d blobs marked, %d blobs eligible for deletion", len(markSet), len(deleteSet)) + if dryRun { + emit("\n%d blobs marked, %d blobs eligible for deletion", len(markSet), len(deleteSet)) + } // Construct vacuum vacuum := NewVacuum(ctx, storageDriver) for dgst := range deleteSet { - emit("blob eligible for deletion: %s", dgst) if dryRun { + emit("blob eligible for deletion: %s", dgst) continue } err = vacuum.RemoveBlob(string(dgst)) if err != nil { - return fmt.Errorf("failed to delete blob %s: %v", dgst, err) + return fmt.Errorf("failed to delete blob %s: %v\n", dgst, err) } } diff --git a/vendor/github.com/docker/distribution/registry/storage/garbagecollect_test.go b/vendor/github.com/docker/distribution/registry/storage/garbagecollect_test.go index 2e36fddb0..88492d812 100644 --- a/vendor/github.com/docker/distribution/registry/storage/garbagecollect_test.go +++ b/vendor/github.com/docker/distribution/registry/storage/garbagecollect_test.go @@ -7,12 +7,12 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" "github.com/docker/libtrust" - "github.com/opencontainers/go-digest" ) type image struct { @@ -39,7 +39,7 @@ func makeRepository(t *testing.T, registry distribution.Namespace, name string) ctx := context.Background() // Initialize a dummy repository - named, err := reference.WithName(name) + named, err := reference.ParseNamed(name) if err != nil { t.Fatalf("Failed to parse name %s: %v", name, err) } @@ -145,7 +145,7 @@ func TestNoDeletionNoEffect(t *testing.T) { ctx := context.Background() inmemoryDriver := inmemory.New() - registry := createRegistry(t, inmemoryDriver) + registry := createRegistry(t, inmemory.New()) repo := makeRepository(t, registry, "palailogos") manifestService, err := repo.Manifests(ctx) diff --git a/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go b/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go index a1929eed3..6a5e8d033 100644 --- a/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go +++ b/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go @@ -8,10 +8,10 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/uuid" - "github.com/opencontainers/go-digest" ) // linkPathFunc describes a function that can resolve a link based on the @@ -321,7 +321,7 @@ func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string blobStore: lbs, id: uuid, startedAt: startedAt, - digester: digest.Canonical.Digester(), + digester: digest.Canonical.New(), fileWriter: fw, driver: lbs.driver, path: path, diff --git a/vendor/github.com/docker/distribution/registry/storage/linkedblobstore_test.go b/vendor/github.com/docker/distribution/registry/storage/linkedblobstore_test.go index a059a7781..f0f63d87b 100644 --- a/vendor/github.com/docker/distribution/registry/storage/linkedblobstore_test.go +++ b/vendor/github.com/docker/distribution/registry/storage/linkedblobstore_test.go @@ -9,14 +9,14 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" - "github.com/opencontainers/go-digest" + "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/testutil" ) func TestLinkedBlobStoreCreateWithMountFrom(t *testing.T) { - fooRepoName, _ := reference.WithName("nm/foo") + fooRepoName, _ := reference.ParseNamed("nm/foo") fooEnv := newManifestStoreTestEnv(t, fooRepoName, "thetag") ctx := context.Background() stats, err := mockRegistry(t, fooEnv.registry) @@ -54,7 +54,7 @@ func TestLinkedBlobStoreCreateWithMountFrom(t *testing.T) { } // create another repository nm/bar - barRepoName, _ := reference.WithName("nm/bar") + barRepoName, _ := reference.ParseNamed("nm/bar") barRepo, err := fooEnv.registry.Repository(ctx, barRepoName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -94,7 +94,7 @@ func TestLinkedBlobStoreCreateWithMountFrom(t *testing.T) { clearStats(stats) // create yet another repository nm/baz - bazRepoName, _ := reference.WithName("nm/baz") + bazRepoName, _ := reference.ParseNamed("nm/baz") bazRepo, err := fooEnv.registry.Repository(ctx, bazRepoName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) diff --git a/vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go b/vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go index aee73b85f..e24062cd0 100644 --- a/vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go +++ b/vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go @@ -6,8 +6,8 @@ import ( "encoding/json" "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/manifestlist" - "github.com/opencontainers/go-digest" ) // manifestListHandler is a ManifestHandler that covers schema2 manifest lists. diff --git a/vendor/github.com/docker/distribution/registry/storage/manifeststore.go b/vendor/github.com/docker/distribution/registry/storage/manifeststore.go index 4cca5157a..9e8065bb7 100644 --- a/vendor/github.com/docker/distribution/registry/storage/manifeststore.go +++ b/vendor/github.com/docker/distribution/registry/storage/manifeststore.go @@ -6,11 +6,11 @@ import ( "encoding/json" "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" - "github.com/opencontainers/go-digest" ) // A ManifestHandler gets and puts manifests of a particular type. diff --git a/vendor/github.com/docker/distribution/registry/storage/manifeststore_test.go b/vendor/github.com/docker/distribution/registry/storage/manifeststore_test.go index e398058be..cbd30c044 100644 --- a/vendor/github.com/docker/distribution/registry/storage/manifeststore_test.go +++ b/vendor/github.com/docker/distribution/registry/storage/manifeststore_test.go @@ -8,6 +8,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/reference" @@ -16,7 +17,6 @@ import ( "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" "github.com/docker/libtrust" - "github.com/opencontainers/go-digest" ) type manifestStoreTestEnv struct { @@ -60,7 +60,7 @@ func TestManifestStorage(t *testing.T) { } func testManifestStorage(t *testing.T, options ...RegistryOption) { - repoName, _ := reference.WithName("foo/bar") + repoName, _ := reference.ParseNamed("foo/bar") env := newManifestStoreTestEnv(t, repoName, "thetag", options...) ctx := context.Background() ms, err := env.repository.Manifests(ctx) diff --git a/vendor/github.com/docker/distribution/registry/storage/paths.go b/vendor/github.com/docker/distribution/registry/storage/paths.go index b6d9b9b56..1b142b88f 100644 --- a/vendor/github.com/docker/distribution/registry/storage/paths.go +++ b/vendor/github.com/docker/distribution/registry/storage/paths.go @@ -5,7 +5,7 @@ import ( "path" "strings" - "github.com/opencontainers/go-digest" + "github.com/docker/distribution/digest" ) const ( diff --git a/vendor/github.com/docker/distribution/registry/storage/paths_test.go b/vendor/github.com/docker/distribution/registry/storage/paths_test.go index 677a34b9f..f739552aa 100644 --- a/vendor/github.com/docker/distribution/registry/storage/paths_test.go +++ b/vendor/github.com/docker/distribution/registry/storage/paths_test.go @@ -3,7 +3,7 @@ package storage import ( "testing" - "github.com/opencontainers/go-digest" + "github.com/docker/distribution/digest" ) func TestPathMapper(t *testing.T) { diff --git a/vendor/github.com/docker/distribution/registry/storage/purgeuploads.go b/vendor/github.com/docker/distribution/registry/storage/purgeuploads.go index 925b1ae9b..7576b189c 100644 --- a/vendor/github.com/docker/distribution/registry/storage/purgeuploads.go +++ b/vendor/github.com/docker/distribution/registry/storage/purgeuploads.go @@ -80,7 +80,7 @@ func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriv } - uuid, isContainingDir := uuidFromPath(filePath) + uuid, isContainingDir := uUIDFromPath(filePath) if uuid == "" { // Cannot reliably delete return nil @@ -111,10 +111,10 @@ func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriv return uploads, errors } -// uuidFromPath extracts the upload UUID from a given path +// uUIDFromPath extracts the upload UUID from a given path // If the UUID is the last path component, this is the containing // directory for all upload files -func uuidFromPath(path string) (string, bool) { +func uUIDFromPath(path string) (string, bool) { components := strings.Split(path, "/") for i := len(components) - 1; i >= 0; i-- { if u, err := uuid.Parse(components[i]); err == nil { diff --git a/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go b/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go index 05c53254f..1d221410e 100644 --- a/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go +++ b/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go @@ -8,9 +8,8 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" - "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/schema2" - "github.com/opencontainers/go-digest" ) var ( @@ -72,62 +71,53 @@ func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst schema2.DeserializedManifest, skipDependencyVerification bool) error { var errs distribution.ErrManifestVerification - if skipDependencyVerification { - return nil - } - - manifestService, err := ms.repository.Manifests(ctx) - if err != nil { - return err - } - - blobsService := ms.repository.Blobs(ctx) - - for _, descriptor := range mnfst.References() { - var err error - - switch descriptor.MediaType { - case schema2.MediaTypeForeignLayer: - // Clients download this layer from an external URL, so do not check for - // its presense. - if len(descriptor.URLs) == 0 { - err = errMissingURL - } - allow := ms.manifestURLs.allow - deny := ms.manifestURLs.deny - for _, u := range descriptor.URLs { - var pu *url.URL - pu, err = url.Parse(u) - if err != nil || (pu.Scheme != "http" && pu.Scheme != "https") || pu.Fragment != "" || (allow != nil && !allow.MatchString(u)) || (deny != nil && deny.MatchString(u)) { - err = errInvalidURL - break - } - } - case schema2.MediaTypeManifest, schema1.MediaTypeManifest: - var exists bool - exists, err = manifestService.Exists(ctx, descriptor.Digest) - if err != nil || !exists { - err = distribution.ErrBlobUnknown // just coerce to unknown. - } - - fallthrough // double check the blob store. - default: - // forward all else to blob storage - if len(descriptor.URLs) == 0 { - _, err = blobsService.Stat(ctx, descriptor.Digest) - } - } - + if !skipDependencyVerification { + target := mnfst.Target() + _, err := ms.repository.Blobs(ctx).Stat(ctx, target.Digest) if err != nil { if err != distribution.ErrBlobUnknown { errs = append(errs, err) } // On error here, we always append unknown blob errors. - errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: descriptor.Digest}) + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: target.Digest}) + } + + for _, fsLayer := range mnfst.References() { + var err error + if fsLayer.MediaType != schema2.MediaTypeForeignLayer { + if len(fsLayer.URLs) == 0 { + _, err = ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) + } else { + err = errUnexpectedURL + } + } else { + // Clients download this layer from an external URL, so do not check for + // its presense. + if len(fsLayer.URLs) == 0 { + err = errMissingURL + } + allow := ms.manifestURLs.allow + deny := ms.manifestURLs.deny + for _, u := range fsLayer.URLs { + var pu *url.URL + pu, err = url.Parse(u) + if err != nil || (pu.Scheme != "http" && pu.Scheme != "https") || pu.Fragment != "" || (allow != nil && !allow.MatchString(u)) || (deny != nil && deny.MatchString(u)) { + err = errInvalidURL + break + } + } + } + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) + } } } - if len(errs) != 0 { return errs } diff --git a/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler_test.go b/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler_test.go index 6536f9d3c..73a7e336a 100644 --- a/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler_test.go +++ b/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler_test.go @@ -20,7 +20,7 @@ func TestVerifyManifestForeignLayer(t *testing.T) { repo := makeRepository(t, registry, "test") manifestService := makeManifestService(t, repo) - config, err := repo.Blobs(ctx).Put(ctx, schema2.MediaTypeImageConfig, nil) + config, err := repo.Blobs(ctx).Put(ctx, schema2.MediaTypeConfig, nil) if err != nil { t.Fatal(err) } @@ -57,10 +57,9 @@ func TestVerifyManifestForeignLayer(t *testing.T) { errMissingURL, }, { - // regular layers may have foreign urls layer, []string{"http://foo/bar"}, - nil, + errUnexpectedURL, }, { foreignLayer, diff --git a/vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go b/vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go index 6ca1c6c8c..30d330824 100644 --- a/vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go +++ b/vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go @@ -6,10 +6,10 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/reference" "github.com/docker/libtrust" - "github.com/opencontainers/go-digest" ) // signedManifestHandler is a ManifestHandler that covers schema1 manifests. It diff --git a/vendor/github.com/docker/distribution/registry/storage/tagstore.go b/vendor/github.com/docker/distribution/registry/storage/tagstore.go index d73278869..4386ffcac 100644 --- a/vendor/github.com/docker/distribution/registry/storage/tagstore.go +++ b/vendor/github.com/docker/distribution/registry/storage/tagstore.go @@ -5,8 +5,8 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/opencontainers/go-digest" ) var _ distribution.TagService = &tagStore{} diff --git a/vendor/github.com/docker/distribution/registry/storage/tagstore_test.go b/vendor/github.com/docker/distribution/registry/storage/tagstore_test.go index 396441eec..554a46bf7 100644 --- a/vendor/github.com/docker/distribution/registry/storage/tagstore_test.go +++ b/vendor/github.com/docker/distribution/registry/storage/tagstore_test.go @@ -22,7 +22,7 @@ func testTagStore(t *testing.T) *tagsTestEnv { t.Fatal(err) } - repoRef, _ := reference.WithName("a/b") + repoRef, _ := reference.ParseNamed("a/b") repo, err := reg.Repository(ctx, repoRef) if err != nil { t.Fatal(err) diff --git a/vendor/github.com/docker/distribution/registry/storage/vacuum.go b/vendor/github.com/docker/distribution/registry/storage/vacuum.go index 42c8ef605..3bdfebf27 100644 --- a/vendor/github.com/docker/distribution/registry/storage/vacuum.go +++ b/vendor/github.com/docker/distribution/registry/storage/vacuum.go @@ -4,8 +4,8 @@ import ( "path" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/storage/driver" - "github.com/opencontainers/go-digest" ) // vacuum contains functions for cleaning up repositories and blobs @@ -29,7 +29,7 @@ type Vacuum struct { // RemoveBlob removes a blob from the filesystem func (v Vacuum) RemoveBlob(dgst string) error { - d, err := digest.Parse(dgst) + d, err := digest.ParseDigest(dgst) if err != nil { return err } diff --git a/vendor/github.com/docker/distribution/testutil/manifests.go b/vendor/github.com/docker/distribution/testutil/manifests.go index 8afe82e48..c4f9fef53 100644 --- a/vendor/github.com/docker/distribution/testutil/manifests.go +++ b/vendor/github.com/docker/distribution/testutil/manifests.go @@ -5,12 +5,12 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/docker/libtrust" - "github.com/opencontainers/go-digest" ) // MakeManifestList constructs a manifest list out of a list of manifest digests @@ -73,7 +73,7 @@ func MakeSchema1Manifest(digests []digest.Digest) (distribution.Manifest, error) func MakeSchema2Manifest(repository distribution.Repository, digests []digest.Digest) (distribution.Manifest, error) { ctx := context.Background() blobStore := repository.Blobs(ctx) - builder := schema2.NewManifestBuilder(blobStore, schema2.MediaTypeImageConfig, []byte{}) + builder := schema2.NewManifestBuilder(blobStore, []byte{}) for _, digest := range digests { builder.AppendReference(distribution.Descriptor{Digest: digest}) } diff --git a/vendor/github.com/docker/distribution/testutil/tarfile.go b/vendor/github.com/docker/distribution/testutil/tarfile.go index cb93602f3..a8ba01553 100644 --- a/vendor/github.com/docker/distribution/testutil/tarfile.go +++ b/vendor/github.com/docker/distribution/testutil/tarfile.go @@ -10,7 +10,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" - "github.com/opencontainers/go-digest" + "github.com/docker/distribution/digest" ) // CreateRandomTarFile creates a random tarfile, returning it as an diff --git a/vendor/github.com/docker/distribution/vendor.conf b/vendor/github.com/docker/distribution/vendor.conf deleted file mode 100644 index 2af461111..000000000 --- a/vendor/github.com/docker/distribution/vendor.conf +++ /dev/null @@ -1,43 +0,0 @@ -github.com/Azure/azure-sdk-for-go 088007b3b08cc02b27f2eadfdcd870958460ce7e -github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356 -github.com/Sirupsen/logrus d26492970760ca5d33129d2d799e34be5c4782eb -github.com/aws/aws-sdk-go c6fc52983ea2375810aa38ddb5370e9cdf611716 -github.com/bshuster-repo/logrus-logstash-hook 5f729f2fb50a301153cae84ff5c58981d51c095a -github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 -github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 -github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 -github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 -github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04 -github.com/docker/goamz f0a21f5b2e12f83a505ecf79b633bb2035cf6f85 -github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 -github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 -github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c -github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 -github.com/gorilla/context 14f550f51af52180c2eefed15e5fd18d63c0a64a -github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b -github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604 -github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d -github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 -github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef -github.com/ncw/swift b964f2ca856aac39885e258ad25aec08d5f64ee6 -github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 -github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 -github.com/stevvooe/resumable 2aaf90b2ceea5072cb503ef2a620b08ff3119870 -github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 -github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e -github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128 -github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6 -golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b -golang.org/x/net 4876518f9e71663000c348837735820161a42df7 -golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf -golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb -google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54 -google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19 -google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2 -google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994 -gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 -gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b -gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420 -rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git -github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb diff --git a/vendor/github.com/docker/distribution/version/version.go b/vendor/github.com/docker/distribution/version/version.go index 807bb4a2e..cafe23366 100644 --- a/vendor/github.com/docker/distribution/version/version.go +++ b/vendor/github.com/docker/distribution/version/version.go @@ -8,4 +8,4 @@ var Package = "github.com/docker/distribution" // the latest release tag by hand, always suffixed by "+unknown". During // build, it will be replaced by the actual version. The value here will be // used if the registry is run after a go get based install. -var Version = "v2.6.0+unknown" +var Version = "v2.4.1+unknown" diff --git a/vendor/github.com/gorilla/context/.travis.yml b/vendor/github.com/gorilla/context/.travis.yml index d87d46576..6f440f1e4 100644 --- a/vendor/github.com/gorilla/context/.travis.yml +++ b/vendor/github.com/gorilla/context/.travis.yml @@ -1,7 +1,19 @@ language: go +sudo: false -go: - - 1.0 - - 1.1 - - 1.2 - - tip +matrix: + include: + - go: 1.3 + - go: 1.4 + - go: 1.5 + - go: 1.6 + - go: 1.7 + - go: tip + allow_failures: + - go: tip + +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - go vet $(go list ./... | grep -v /vendor/) + - go test -v -race ./... diff --git a/vendor/github.com/gorilla/context/README.md b/vendor/github.com/gorilla/context/README.md index c60a31b05..08f86693b 100644 --- a/vendor/github.com/gorilla/context/README.md +++ b/vendor/github.com/gorilla/context/README.md @@ -4,4 +4,7 @@ context gorilla/context is a general purpose registry for global request variables. +> Note: gorilla/context, having been born well before `context.Context` existed, does not play well +> with the shallow copying of the request that [`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) (added to net/http Go 1.7 onwards) performs. You should either use *just* gorilla/context, or moving forward, the new `http.Request.Context()`. + Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/vendor/github.com/gorilla/context/context_test.go b/vendor/github.com/gorilla/context/context_test.go index 6ada8ec31..d70e91a23 100644 --- a/vendor/github.com/gorilla/context/context_test.go +++ b/vendor/github.com/gorilla/context/context_test.go @@ -69,7 +69,7 @@ func TestContext(t *testing.T) { // GetAllOk() for empty request values, ok = GetAllOk(emptyR) - assertEqual(value, nil) + assertEqual(len(values), 0) assertEqual(ok, false) // Delete() @@ -98,7 +98,7 @@ func parallelReader(r *http.Request, key string, iterations int, wait, done chan func parallelWriter(r *http.Request, key, value string, iterations int, wait, done chan struct{}) { <-wait for i := 0; i < iterations; i++ { - Get(r, key) + Set(r, key, value) } done <- struct{}{} diff --git a/vendor/github.com/gorilla/context/doc.go b/vendor/github.com/gorilla/context/doc.go index 73c740031..448d1bfca 100644 --- a/vendor/github.com/gorilla/context/doc.go +++ b/vendor/github.com/gorilla/context/doc.go @@ -5,6 +5,12 @@ /* Package context stores values shared during a request lifetime. +Note: gorilla/context, having been born well before `context.Context` existed, +does not play well > with the shallow copying of the request that +[`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) +(added to net/http Go 1.7 onwards) performs. You should either use *just* +gorilla/context, or moving forward, the new `http.Request.Context()`. + For example, a router can set variables extracted from the URL and later application handlers can access those values, or it can be used to store sessions values to be saved at the end of a request. There are several diff --git a/vendor/github.com/gorilla/mux/.travis.yml b/vendor/github.com/gorilla/mux/.travis.yml index d87d46576..f93ce56d1 100644 --- a/vendor/github.com/gorilla/mux/.travis.yml +++ b/vendor/github.com/gorilla/mux/.travis.yml @@ -1,7 +1,21 @@ language: go +sudo: false -go: - - 1.0 - - 1.1 - - 1.2 - - tip +matrix: + include: + - go: 1.2 + - go: 1.3 + - go: 1.4 + - go: 1.5 + - go: 1.6 + - go: 1.7 + - go: tip + +install: + - # Skip + +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md index e60301b03..fa79a6bc3 100644 --- a/vendor/github.com/gorilla/mux/README.md +++ b/vendor/github.com/gorilla/mux/README.md @@ -1,7 +1,299 @@ -mux +gorilla/mux === -[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) +[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) +[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux) -gorilla/mux is a powerful URL router and dispatcher. +![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png) -Read the full documentation here: http://www.gorillatoolkit.org/pkg/mux +http://www.gorillatoolkit.org/pkg/mux + +Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to +their respective handler. + +The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: + +* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`. +* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers. +* URL hosts and paths can have variables with an optional regular expression. +* Registered URLs can be built, or "reversed", which helps maintaining references to resources. +* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching. + +--- + +* [Install](#install) +* [Examples](#examples) +* [Matching Routes](#matching-routes) +* [Static Files](#static-files) +* [Registered URLs](#registered-urls) +* [Full Example](#full-example) + +--- + +## Install + +With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain: + +```sh +go get -u github.com/gorilla/mux +``` + +## Examples + +Let's start registering a couple of URL paths and handlers: + +```go +func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) +} +``` + +Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters. + +Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example: + +```go +r := mux.NewRouter() +r.HandleFunc("/products/{key}", ProductHandler) +r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) +r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) +``` + +The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`: + +```go +vars := mux.Vars(request) +category := vars["category"] +``` + +And this is all you need to know about the basic usage. More advanced options are explained below. + +### Matching Routes + +Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: + +```go +r := mux.NewRouter() +// Only matches if domain is "www.example.com". +r.Host("www.example.com") +// Matches a dynamic subdomain. +r.Host("{subdomain:[a-z]+}.domain.com") +``` + +There are several other matchers that can be added. To match path prefixes: + +```go +r.PathPrefix("/products/") +``` + +...or HTTP methods: + +```go +r.Methods("GET", "POST") +``` + +...or URL schemes: + +```go +r.Schemes("https") +``` + +...or header values: + +```go +r.Headers("X-Requested-With", "XMLHttpRequest") +``` + +...or query values: + +```go +r.Queries("key", "value") +``` + +...or to use a custom matcher function: + +```go +r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 +}) +``` + +...and finally, it is possible to combine several matchers in a single route: + +```go +r.HandleFunc("/products", ProductsHandler). + Host("www.example.com"). + Methods("GET"). + Schemes("http") +``` + +Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting". + +For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it: + +```go +r := mux.NewRouter() +s := r.Host("www.example.com").Subrouter() +``` + +Then register routes in the subrouter: + +```go +s.HandleFunc("/products/", ProductsHandler) +s.HandleFunc("/products/{key}", ProductHandler) +s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) +``` + +The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths: + +```go +r := mux.NewRouter() +s := r.PathPrefix("/products").Subrouter() +// "/products/" +s.HandleFunc("/", ProductsHandler) +// "/products/{key}/" +s.HandleFunc("/{key}/", ProductHandler) +// "/products/{key}/details" +s.HandleFunc("/{key}/details", ProductDetailsHandler) +``` + +### Static Files + +Note that the path provided to `PathPrefix()` represents a "wildcard": calling +`PathPrefix("/static/").Handler(...)` means that the handler will be passed any +request that matches "/static/*". This makes it easy to serve static files with mux: + +```go +func main() { + var dir string + + flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") + flag.Parse() + r := mux.NewRouter() + + // This will serve files under http://localhost:8000/static/ + r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) + + srv := &http.Server{ + Handler: r, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) +} +``` + +### Registered URLs + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example: + +```go +r := mux.NewRouter() +r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") +``` + +To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do: + +```go +url, err := r.Get("article").URL("category", "technology", "id", "42") +``` + +...and the result will be a `url.URL` with the following path: + +``` +"/articles/technology/42" +``` + +This also works for host variables: + +```go +r := mux.NewRouter() +r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + +// url.String() will be "http://news.domain.com/articles/technology/42" +url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +``` + +All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match. + +Regex support also exists for matching Headers within a route. For example, we could do: + +```go +r.HeadersRegexp("Content-Type", "application/(text|json)") +``` + +...and the route will match both requests with a Content-Type of `application/json` as well as `application/text` + +There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do: + +```go +// "http://news.domain.com/" +host, err := r.Get("article").URLHost("subdomain", "news") + +// "/articles/technology/42" +path, err := r.Get("article").URLPath("category", "technology", "id", "42") +``` + +And if you use subrouters, host and path defined separately can be built as well: + +```go +r := mux.NewRouter() +s := r.Host("{subdomain}.domain.com").Subrouter() +s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + +// "http://news.domain.com/articles/technology/42" +url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +``` + +## Full Example + +Here's a complete, runnable example of a small `mux` based server: + +```go +package main + +import ( + "net/http" + "log" + "github.com/gorilla/mux" +) + +func YourHandler(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Gorilla!\n")) +} + +func main() { + r := mux.NewRouter() + // Routes consist of a path and a handler function. + r.HandleFunc("/", YourHandler) + + // Bind to a port and pass our router in + log.Fatal(http.ListenAndServe(":8000", r)) +} +``` + +## License + +BSD licensed. See the LICENSE file for details. diff --git a/vendor/github.com/gorilla/mux/bench_test.go b/vendor/github.com/gorilla/mux/bench_test.go index c5f97b2b2..946289b92 100644 --- a/vendor/github.com/gorilla/mux/bench_test.go +++ b/vendor/github.com/gorilla/mux/bench_test.go @@ -6,6 +6,7 @@ package mux import ( "net/http" + "net/http/httptest" "testing" ) @@ -19,3 +20,30 @@ func BenchmarkMux(b *testing.B) { router.ServeHTTP(nil, request) } } + +func BenchmarkMuxAlternativeInRegexp(b *testing.B) { + router := new(Router) + handler := func(w http.ResponseWriter, r *http.Request) {} + router.HandleFunc("/v1/{v1:(a|b)}", handler) + + requestA, _ := http.NewRequest("GET", "/v1/a", nil) + requestB, _ := http.NewRequest("GET", "/v1/b", nil) + for i := 0; i < b.N; i++ { + router.ServeHTTP(nil, requestA) + router.ServeHTTP(nil, requestB) + } +} + +func BenchmarkManyPathVariables(b *testing.B) { + router := new(Router) + handler := func(w http.ResponseWriter, r *http.Request) {} + router.HandleFunc("/v1/{v1}/{v2}/{v3}/{v4}/{v5}", handler) + + matchingRequest, _ := http.NewRequest("GET", "/v1/1/2/3/4/5", nil) + notMatchingRequest, _ := http.NewRequest("GET", "/v1/1/2/3/4", nil) + recorder := httptest.NewRecorder() + for i := 0; i < b.N; i++ { + router.ServeHTTP(nil, matchingRequest) + router.ServeHTTP(recorder, notMatchingRequest) + } +} diff --git a/vendor/github.com/gorilla/mux/context_gorilla.go b/vendor/github.com/gorilla/mux/context_gorilla.go new file mode 100644 index 000000000..d7adaa8fa --- /dev/null +++ b/vendor/github.com/gorilla/mux/context_gorilla.go @@ -0,0 +1,26 @@ +// +build !go1.7 + +package mux + +import ( + "net/http" + + "github.com/gorilla/context" +) + +func contextGet(r *http.Request, key interface{}) interface{} { + return context.Get(r, key) +} + +func contextSet(r *http.Request, key, val interface{}) *http.Request { + if val == nil { + return r + } + + context.Set(r, key, val) + return r +} + +func contextClear(r *http.Request) { + context.Clear(r) +} diff --git a/vendor/github.com/gorilla/mux/context_gorilla_test.go b/vendor/github.com/gorilla/mux/context_gorilla_test.go new file mode 100644 index 000000000..ffaf384c0 --- /dev/null +++ b/vendor/github.com/gorilla/mux/context_gorilla_test.go @@ -0,0 +1,40 @@ +// +build !go1.7 + +package mux + +import ( + "net/http" + "testing" + + "github.com/gorilla/context" +) + +// Tests that the context is cleared or not cleared properly depending on +// the configuration of the router +func TestKeepContext(t *testing.T) { + func1 := func(w http.ResponseWriter, r *http.Request) {} + + r := NewRouter() + r.HandleFunc("/", func1).Name("func1") + + req, _ := http.NewRequest("GET", "http://localhost/", nil) + context.Set(req, "t", 1) + + res := new(http.ResponseWriter) + r.ServeHTTP(*res, req) + + if _, ok := context.GetOk(req, "t"); ok { + t.Error("Context should have been cleared at end of request") + } + + r.KeepContext = true + + req, _ = http.NewRequest("GET", "http://localhost/", nil) + context.Set(req, "t", 1) + + r.ServeHTTP(*res, req) + if _, ok := context.GetOk(req, "t"); !ok { + t.Error("Context should NOT have been cleared at end of request") + } + +} diff --git a/vendor/github.com/gorilla/mux/context_native.go b/vendor/github.com/gorilla/mux/context_native.go new file mode 100644 index 000000000..209cbea7d --- /dev/null +++ b/vendor/github.com/gorilla/mux/context_native.go @@ -0,0 +1,24 @@ +// +build go1.7 + +package mux + +import ( + "context" + "net/http" +) + +func contextGet(r *http.Request, key interface{}) interface{} { + return r.Context().Value(key) +} + +func contextSet(r *http.Request, key, val interface{}) *http.Request { + if val == nil { + return r + } + + return r.WithContext(context.WithValue(r.Context(), key, val)) +} + +func contextClear(r *http.Request) { + return +} diff --git a/vendor/github.com/gorilla/mux/context_native_test.go b/vendor/github.com/gorilla/mux/context_native_test.go new file mode 100644 index 000000000..c150edf01 --- /dev/null +++ b/vendor/github.com/gorilla/mux/context_native_test.go @@ -0,0 +1,32 @@ +// +build go1.7 + +package mux + +import ( + "context" + "net/http" + "testing" + "time" +) + +func TestNativeContextMiddleware(t *testing.T) { + withTimeout := func(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx, cancel := context.WithTimeout(r.Context(), time.Minute) + defer cancel() + h.ServeHTTP(w, r.WithContext(ctx)) + }) + } + + r := NewRouter() + r.Handle("/path/{foo}", withTimeout(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + vars := Vars(r) + if vars["foo"] != "bar" { + t.Fatal("Expected foo var to be set") + } + }))) + + rec := NewRecorder() + req := newRequest("GET", "/path/bar") + r.ServeHTTP(rec, req) +} diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go index b2deed34c..291ef5e1c 100644 --- a/vendor/github.com/gorilla/mux/doc.go +++ b/vendor/github.com/gorilla/mux/doc.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. /* -Package gorilla/mux implements a request router and dispatcher. +Package mux implements a request router and dispatcher. The name mux stands for "HTTP request multiplexer". Like the standard http.ServeMux, mux.Router matches incoming requests against a list of @@ -60,8 +60,8 @@ Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: r := mux.NewRouter() - // Only matches if domain is "www.domain.com". - r.Host("www.domain.com") + // Only matches if domain is "www.example.com". + r.Host("www.example.com") // Matches a dynamic subdomain. r.Host("{subdomain:[a-z]+}.domain.com") @@ -89,12 +89,12 @@ There are several other matchers that can be added. To match path prefixes: r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { return r.ProtoMajor == 0 - }) + }) ...and finally, it is possible to combine several matchers in a single route: r.HandleFunc("/products", ProductsHandler). - Host("www.domain.com"). + Host("www.example.com"). Methods("GET"). Schemes("http") @@ -103,11 +103,11 @@ a way to group several routes that share the same requirements. We call it "subrouting". For example, let's say we have several URLs that should only match when the -host is "www.domain.com". Create a route for that host and get a "subrouter" +host is "www.example.com". Create a route for that host and get a "subrouter" from it: r := mux.NewRouter() - s := r.Host("www.domain.com").Subrouter() + s := r.Host("www.example.com").Subrouter() Then register routes in the subrouter: @@ -116,7 +116,7 @@ Then register routes in the subrouter: s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) The three URL paths we registered above will only be tested if the domain is -"www.domain.com", because the subrouter is tested first. This is not +"www.example.com", because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. @@ -136,6 +136,31 @@ the inner routes use it as base for their paths: // "/products/{key}/details" s.HandleFunc("/{key}/details", ProductDetailsHandler) +Note that the path provided to PathPrefix() represents a "wildcard": calling +PathPrefix("/static/").Handler(...) means that the handler will be passed any +request that matches "/static/*". This makes it easy to serve static files with mux: + + func main() { + var dir string + + flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") + flag.Parse() + r := mux.NewRouter() + + // This will serve files under http://localhost:8000/static/ + r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) + + srv := &http.Server{ + Handler: r, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) + } + Now let's see how to build registered URLs. Routes can be named. All routes that define a name can have their URLs built, @@ -164,14 +189,21 @@ This also works for host variables: // url.String() will be "http://news.domain.com/articles/technology/42" url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") + "category", "technology", + "id", "42") All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match. +Regex support also exists for matching Headers within a route. For example, we could do: + + r.HeadersRegexp("Content-Type", "application/(text|json)") + +...and the route will match both requests with a Content-Type of `application/json` as well as +`application/text` + There's also a way to build only the URL host or path for a route: use the methods URLHost() or URLPath() instead. For the previous route, we would do: @@ -193,7 +225,7 @@ as well: // "http://news.domain.com/articles/technology/42" url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") + "category", "technology", + "id", "42") */ package mux diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go index 5b5f8e7db..d66ec3841 100644 --- a/vendor/github.com/gorilla/mux/mux.go +++ b/vendor/github.com/gorilla/mux/mux.go @@ -5,11 +5,12 @@ package mux import ( + "errors" "fmt" "net/http" "path" - - "github.com/gorilla/context" + "regexp" + "strings" ) // NewRouter returns a new router instance. @@ -46,8 +47,14 @@ type Router struct { namedRoutes map[string]*Route // See Router.StrictSlash(). This defines the flag for new routes. strictSlash bool - // If true, do not clear the request context after handling the request + // See Router.SkipClean(). This defines the flag for new routes. + skipClean bool + // If true, do not clear the request context after handling the request. + // This has no effect when go1.7+ is used, since the context is stored + // on the request itself. KeepContext bool + // see Router.UseEncodedPath(). This defines a flag for all routes. + useEncodedPath bool } // Match matches registered routes against the request. @@ -57,6 +64,12 @@ func (r *Router) Match(req *http.Request, match *RouteMatch) bool { return true } } + + // Closest match for a router (includes sub-routers) + if r.NotFoundHandler != nil { + match.Handler = r.NotFoundHandler + return true + } return false } @@ -65,35 +78,38 @@ func (r *Router) Match(req *http.Request, match *RouteMatch) bool { // When there is a match, the route variables can be retrieved calling // mux.Vars(request). func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { - // Clean path to canonical form and redirect. - if p := cleanPath(req.URL.Path); p != req.URL.Path { + if !r.skipClean { + path := req.URL.Path + if r.useEncodedPath { + path = getPath(req) + } + // Clean path to canonical form and redirect. + if p := cleanPath(path); p != path { - // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query. - // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: - // http://code.google.com/p/go/issues/detail?id=5252 - url := *req.URL - url.Path = p - p = url.String() + // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query. + // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: + // http://code.google.com/p/go/issues/detail?id=5252 + url := *req.URL + url.Path = p + p = url.String() - w.Header().Set("Location", p) - w.WriteHeader(http.StatusMovedPermanently) - return + w.Header().Set("Location", p) + w.WriteHeader(http.StatusMovedPermanently) + return + } } var match RouteMatch var handler http.Handler if r.Match(req, &match) { handler = match.Handler - setVars(req, match.Vars) - setCurrentRoute(req, match.Route) + req = setVars(req, match.Vars) + req = setCurrentRoute(req, match.Route) } if handler == nil { - handler = r.NotFoundHandler - if handler == nil { - handler = http.NotFoundHandler() - } + handler = http.NotFoundHandler() } if !r.KeepContext { - defer context.Clear(req) + defer contextClear(req) } handler.ServeHTTP(w, req) } @@ -128,6 +144,34 @@ func (r *Router) StrictSlash(value bool) *Router { return r } +// SkipClean defines the path cleaning behaviour for new routes. The initial +// value is false. Users should be careful about which routes are not cleaned +// +// When true, if the route path is "/path//to", it will remain with the double +// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/ +// +// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will +// become /fetch/http/xkcd.com/534 +func (r *Router) SkipClean(value bool) *Router { + r.skipClean = value + return r +} + +// UseEncodedPath tells the router to match the encoded original path +// to the routes. +// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to". +// This behavior has the drawback of needing to match routes against +// r.RequestURI instead of r.URL.Path. Any modifications (such as http.StripPrefix) +// to r.URL.Path will not affect routing when this flag is on and thus may +// induce unintended behavior. +// +// If not called, the router will match the unencoded path to the routes. +// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to" +func (r *Router) UseEncodedPath() *Router { + r.useEncodedPath = true + return r +} + // ---------------------------------------------------------------------------- // parentRoute // ---------------------------------------------------------------------------- @@ -152,13 +196,20 @@ func (r *Router) getRegexpGroup() *routeRegexpGroup { return nil } +func (r *Router) buildVars(m map[string]string) map[string]string { + if r.parent != nil { + m = r.parent.buildVars(m) + } + return m +} + // ---------------------------------------------------------------------------- // Route factories // ---------------------------------------------------------------------------- // NewRoute registers an empty route. func (r *Router) NewRoute() *Route { - route := &Route{parent: r, strictSlash: r.strictSlash} + route := &Route{parent: r, strictSlash: r.strictSlash, skipClean: r.skipClean, useEncodedPath: r.useEncodedPath} r.routes = append(r.routes, route) return route } @@ -224,6 +275,61 @@ func (r *Router) Schemes(schemes ...string) *Route { return r.NewRoute().Schemes(schemes...) } +// BuildVarsFunc registers a new route with a custom function for modifying +// route variables before building a URL. +func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { + return r.NewRoute().BuildVarsFunc(f) +} + +// Walk walks the router and all its sub-routers, calling walkFn for each route +// in the tree. The routes are walked in the order they were added. Sub-routers +// are explored depth-first. +func (r *Router) Walk(walkFn WalkFunc) error { + return r.walk(walkFn, []*Route{}) +} + +// SkipRouter is used as a return value from WalkFuncs to indicate that the +// router that walk is about to descend down to should be skipped. +var SkipRouter = errors.New("skip this router") + +// WalkFunc is the type of the function called for each route visited by Walk. +// At every invocation, it is given the current route, and the current router, +// and a list of ancestor routes that lead to the current route. +type WalkFunc func(route *Route, router *Router, ancestors []*Route) error + +func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { + for _, t := range r.routes { + if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" { + continue + } + + err := walkFn(t, r, ancestors) + if err == SkipRouter { + continue + } + if err != nil { + return err + } + for _, sr := range t.matchers { + if h, ok := sr.(*Router); ok { + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + } + } + if h, ok := t.handler.(*Router); ok { + ancestors = append(ancestors, t) + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + ancestors = ancestors[:len(ancestors)-1] + } + } + return nil +} + // ---------------------------------------------------------------------------- // Context // ---------------------------------------------------------------------------- @@ -244,32 +350,58 @@ const ( // Vars returns the route variables for the current request, if any. func Vars(r *http.Request) map[string]string { - if rv := context.Get(r, varsKey); rv != nil { + if rv := contextGet(r, varsKey); rv != nil { return rv.(map[string]string) } return nil } // CurrentRoute returns the matched route for the current request, if any. +// This only works when called inside the handler of the matched route +// because the matched route is stored in the request context which is cleared +// after the handler returns, unless the KeepContext option is set on the +// Router. func CurrentRoute(r *http.Request) *Route { - if rv := context.Get(r, routeKey); rv != nil { + if rv := contextGet(r, routeKey); rv != nil { return rv.(*Route) } return nil } -func setVars(r *http.Request, val interface{}) { - context.Set(r, varsKey, val) +func setVars(r *http.Request, val interface{}) *http.Request { + return contextSet(r, varsKey, val) } -func setCurrentRoute(r *http.Request, val interface{}) { - context.Set(r, routeKey, val) +func setCurrentRoute(r *http.Request, val interface{}) *http.Request { + return contextSet(r, routeKey, val) } // ---------------------------------------------------------------------------- // Helpers // ---------------------------------------------------------------------------- +// getPath returns the escaped path if possible; doing what URL.EscapedPath() +// which was added in go1.5 does +func getPath(req *http.Request) string { + if req.RequestURI != "" { + // Extract the path from RequestURI (which is escaped unlike URL.Path) + // as detailed here as detailed in https://golang.org/pkg/net/url/#URL + // for < 1.5 server side workaround + // http://localhost/path/here?v=1 -> /path/here + path := req.RequestURI + path = strings.TrimPrefix(path, req.URL.Scheme+`://`) + path = strings.TrimPrefix(path, req.URL.Host) + if i := strings.LastIndex(path, "?"); i > -1 { + path = path[:i] + } + if i := strings.LastIndex(path, "#"); i > -1 { + path = path[:i] + } + return path + } + return req.URL.Path +} + // cleanPath returns the canonical path for p, eliminating . and .. elements. // Borrowed from the net/http package. func cleanPath(p string) string { @@ -285,6 +417,7 @@ func cleanPath(p string) string { if p[len(p)-1] == '/' && np != "/" { np += "/" } + return np } @@ -300,13 +433,24 @@ func uniqueVars(s1, s2 []string) error { return nil } -// mapFromPairs converts variadic string parameters to a string map. -func mapFromPairs(pairs ...string) (map[string]string, error) { +// checkPairs returns the count of strings passed in, and an error if +// the count is not an even number. +func checkPairs(pairs ...string) (int, error) { length := len(pairs) if length%2 != 0 { - return nil, fmt.Errorf( + return length, fmt.Errorf( "mux: number of parameters must be multiple of 2, got %v", pairs) } + return length, nil +} + +// mapFromPairsToString converts variadic string parameters to a +// string to string map. +func mapFromPairsToString(pairs ...string) (map[string]string, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } m := make(map[string]string, length/2) for i := 0; i < length; i += 2 { m[pairs[i]] = pairs[i+1] @@ -314,6 +458,24 @@ func mapFromPairs(pairs ...string) (map[string]string, error) { return m, nil } +// mapFromPairsToRegex converts variadic string paramers to a +// string to regex map. +func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]*regexp.Regexp, length/2) + for i := 0; i < length; i += 2 { + regex, err := regexp.Compile(pairs[i+1]) + if err != nil { + return nil, err + } + m[pairs[i]] = regex + } + return m, nil +} + // matchInArray returns true if the given string value is in the array. func matchInArray(arr []string, value string) bool { for _, v := range arr { @@ -324,9 +486,8 @@ func matchInArray(arr []string, value string) bool { return false } -// matchMap returns true if the given key/value pairs exist in a given map. -func matchMap(toCheck map[string]string, toMatch map[string][]string, - canonicalKey bool) bool { +// matchMapWithString returns true if the given key/value pairs exist in a given map. +func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { for k, v := range toCheck { // Check if key exists. if canonicalKey { @@ -351,3 +512,31 @@ func matchMap(toCheck map[string]string, toMatch map[string][]string, } return true } + +// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against +// the given regex +func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != nil { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v.MatchString(value) { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} diff --git a/vendor/github.com/gorilla/mux/mux_test.go b/vendor/github.com/gorilla/mux/mux_test.go index e455bce8f..39a099c1e 100644 --- a/vendor/github.com/gorilla/mux/mux_test.go +++ b/vendor/github.com/gorilla/mux/mux_test.go @@ -5,13 +5,27 @@ package mux import ( + "bufio" + "bytes" + "errors" "fmt" "net/http" + "strings" "testing" - - "github.com/gorilla/context" ) +func (r *Route) GoString() string { + matchers := make([]string, len(r.matchers)) + for i, m := range r.matchers { + matchers[i] = fmt.Sprintf("%#v", m) + } + return fmt.Sprintf("&Route{matchers:[]matcher{%s}}", strings.Join(matchers, ", ")) +} + +func (r *routeRegexp) GoString() string { + return fmt.Sprintf("&routeRegexp{template: %q, matchHost: %t, matchQuery: %t, strictSlash: %t, regexp: regexp.MustCompile(%q), reverse: %q, varsN: %v, varsR: %v", r.template, r.matchHost, r.matchQuery, r.strictSlash, r.regexp.String(), r.reverse, r.varsN, r.varsR) +} + type routeTest struct { title string // title of the test route *Route // the route being tested @@ -19,6 +33,8 @@ type routeTest struct { vars map[string]string // the expected vars of the match host string // the expected host of the match path string // the expected path of the match + pathTemplate string // the expected path template to match + hostTemplate string // the expected host template to match shouldMatch bool // whether the request is expected to match the route at all shouldRedirect bool // whether the request should result in a redirect } @@ -100,44 +116,129 @@ func TestHost(t *testing.T) { shouldMatch: false, }, { - title: "Host route with pattern, match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, + title: "Host route with pattern, match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + hostTemplate: `aaa.{v1:[a-z]{3}}.ccc`, + shouldMatch: true, }, { - title: "Host route with pattern, wrong host in request URL", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, + title: "Host route with pattern, additional capturing group, match", + route: new(Route).Host("aaa.{v1:[a-z]{2}(?:b|c)}.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + hostTemplate: `aaa.{v1:[a-z]{2}(?:b|c)}.ccc`, + shouldMatch: true, }, { - title: "Host route with multiple patterns, match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, + title: "Host route with pattern, wrong host in request URL", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + hostTemplate: `aaa.{v1:[a-z]{3}}.ccc`, + shouldMatch: false, }, { - title: "Host route with multiple patterns, wrong host in request URL", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, + title: "Host route with multiple patterns, match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, + host: "aaa.bbb.ccc", + path: "", + hostTemplate: `{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}`, + shouldMatch: true, + }, + { + title: "Host route with multiple patterns, wrong host in request URL", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, + host: "aaa.bbb.ccc", + path: "", + hostTemplate: `{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}`, + shouldMatch: false, + }, + { + title: "Host route with hyphenated name and pattern, match", + route: new(Route).Host("aaa.{v-1:[a-z]{3}}.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v-1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + hostTemplate: `aaa.{v-1:[a-z]{3}}.ccc`, + shouldMatch: true, + }, + { + title: "Host route with hyphenated name and pattern, additional capturing group, match", + route: new(Route).Host("aaa.{v-1:[a-z]{2}(?:b|c)}.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v-1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + hostTemplate: `aaa.{v-1:[a-z]{2}(?:b|c)}.ccc`, + shouldMatch: true, + }, + { + title: "Host route with multiple hyphenated names and patterns, match", + route: new(Route).Host("{v-1:[a-z]{3}}.{v-2:[a-z]{3}}.{v-3:[a-z]{3}}"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v-1": "aaa", "v-2": "bbb", "v-3": "ccc"}, + host: "aaa.bbb.ccc", + path: "", + hostTemplate: `{v-1:[a-z]{3}}.{v-2:[a-z]{3}}.{v-3:[a-z]{3}}`, + shouldMatch: true, + }, + { + title: "Path route with single pattern with pipe, match", + route: new(Route).Path("/{category:a|b/c}"), + request: newRequest("GET", "http://localhost/a"), + vars: map[string]string{"category": "a"}, + host: "", + path: "/a", + pathTemplate: `/{category:a|b/c}`, + shouldMatch: true, + }, + { + title: "Path route with single pattern with pipe, match", + route: new(Route).Path("/{category:a|b/c}"), + request: newRequest("GET", "http://localhost/b/c"), + vars: map[string]string{"category": "b/c"}, + host: "", + path: "/b/c", + pathTemplate: `/{category:a|b/c}`, + shouldMatch: true, + }, + { + title: "Path route with multiple patterns with pipe, match", + route: new(Route).Path("/{category:a|b/c}/{product}/{id:[0-9]+}"), + request: newRequest("GET", "http://localhost/a/product_name/1"), + vars: map[string]string{"category": "a", "product": "product_name", "id": "1"}, + host: "", + path: "/a/product_name/1", + pathTemplate: `/{category:a|b/c}/{product}/{id:[0-9]+}`, + shouldMatch: true, + }, + { + title: "Path route with multiple patterns with pipe, match", + route: new(Route).Path("/{category:a|b/c}/{product}/{id:[0-9]+}"), + request: newRequest("GET", "http://localhost/b/c/product_name/1"), + vars: map[string]string{"category": "b/c", "product": "product_name", "id": "1"}, + host: "", + path: "/b/c/product_name/1", + pathTemplate: `/{category:a|b/c}/{product}/{id:[0-9]+}`, + shouldMatch: true, }, } for _, test := range tests { testRoute(t, test) + testTemplate(t, test) } } @@ -162,22 +263,48 @@ func TestPath(t *testing.T) { shouldMatch: true, }, { - title: "Path route, do not match with trailing slash in path", - route: new(Route).Path("/111/"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: false, + title: "Path route, do not match with trailing slash in path", + route: new(Route).Path("/111/"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111", + pathTemplate: `/111/`, + shouldMatch: false, }, { - title: "Path route, do not match with trailing slash in request", - route: new(Route).Path("/111"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: false, + title: "Path route, do not match with trailing slash in request", + route: new(Route).Path("/111"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + pathTemplate: `/111`, + shouldMatch: false, + }, + { + title: "Path route, match root with no host", + route: new(Route).Path("/"), + request: newRequest("GET", "/"), + vars: map[string]string{}, + host: "", + path: "/", + pathTemplate: `/`, + shouldMatch: true, + }, + { + title: "Path route, match root with no host, App Engine format", + route: new(Route).Path("/"), + request: func() *http.Request { + r := newRequest("GET", "http://localhost/") + r.RequestURI = "/" + return r + }(), + vars: map[string]string{}, + host: "", + path: "/", + pathTemplate: `/`, + shouldMatch: true, }, { title: "Path route, wrong path in request in request URL", @@ -189,45 +316,111 @@ func TestPath(t *testing.T) { shouldMatch: false, }, { - title: "Path route with pattern, match", - route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222/333", - shouldMatch: true, + title: "Path route with pattern, match", + route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222/333", + pathTemplate: `/111/{v1:[0-9]{3}}/333`, + shouldMatch: true, }, { - title: "Path route with pattern, URL in request does not match", - route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222/333", - shouldMatch: false, + title: "Path route with pattern, URL in request does not match", + route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222/333", + pathTemplate: `/111/{v1:[0-9]{3}}/333`, + shouldMatch: false, }, { - title: "Path route with multiple patterns, match", - route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, - host: "", - path: "/111/222/333", - shouldMatch: true, + title: "Path route with multiple patterns, match", + route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, + host: "", + path: "/111/222/333", + pathTemplate: `/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}`, + shouldMatch: true, }, { - title: "Path route with multiple patterns, URL in request does not match", - route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, - host: "", - path: "/111/222/333", - shouldMatch: false, + title: "Path route with multiple patterns, URL in request does not match", + route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, + host: "", + path: "/111/222/333", + pathTemplate: `/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}`, + shouldMatch: false, + }, + { + title: "Path route with multiple patterns with pipe, match", + route: new(Route).Path("/{category:a|(?:b/c)}/{product}/{id:[0-9]+}"), + request: newRequest("GET", "http://localhost/a/product_name/1"), + vars: map[string]string{"category": "a", "product": "product_name", "id": "1"}, + host: "", + path: "/a/product_name/1", + pathTemplate: `/{category:a|(?:b/c)}/{product}/{id:[0-9]+}`, + shouldMatch: true, + }, + { + title: "Path route with hyphenated name and pattern, match", + route: new(Route).Path("/111/{v-1:[0-9]{3}}/333"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v-1": "222"}, + host: "", + path: "/111/222/333", + pathTemplate: `/111/{v-1:[0-9]{3}}/333`, + shouldMatch: true, + }, + { + title: "Path route with multiple hyphenated names and patterns, match", + route: new(Route).Path("/{v-1:[0-9]{3}}/{v-2:[0-9]{3}}/{v-3:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v-1": "111", "v-2": "222", "v-3": "333"}, + host: "", + path: "/111/222/333", + pathTemplate: `/{v-1:[0-9]{3}}/{v-2:[0-9]{3}}/{v-3:[0-9]{3}}`, + shouldMatch: true, + }, + { + title: "Path route with multiple hyphenated names and patterns with pipe, match", + route: new(Route).Path("/{product-category:a|(?:b/c)}/{product-name}/{product-id:[0-9]+}"), + request: newRequest("GET", "http://localhost/a/product_name/1"), + vars: map[string]string{"product-category": "a", "product-name": "product_name", "product-id": "1"}, + host: "", + path: "/a/product_name/1", + pathTemplate: `/{product-category:a|(?:b/c)}/{product-name}/{product-id:[0-9]+}`, + shouldMatch: true, + }, + { + title: "Path route with multiple hyphenated names and patterns with pipe and case insensitive, match", + route: new(Route).Path("/{type:(?i:daily|mini|variety)}-{date:\\d{4,4}-\\d{2,2}-\\d{2,2}}"), + request: newRequest("GET", "http://localhost/daily-2016-01-01"), + vars: map[string]string{"type": "daily", "date": "2016-01-01"}, + host: "", + path: "/daily-2016-01-01", + pathTemplate: `/{type:(?i:daily|mini|variety)}-{date:\d{4,4}-\d{2,2}-\d{2,2}}`, + shouldMatch: true, + }, + { + title: "Path route with empty match right after other match", + route: new(Route).Path(`/{v1:[0-9]*}{v2:[a-z]*}/{v3:[0-9]*}`), + request: newRequest("GET", "http://localhost/111/222"), + vars: map[string]string{"v1": "111", "v2": "", "v3": "222"}, + host: "", + path: "/111/222", + pathTemplate: `/{v1:[0-9]*}{v2:[a-z]*}/{v3:[0-9]*}`, + shouldMatch: true, }, } for _, test := range tests { testRoute(t, test) + testTemplate(t, test) + testUseEscapedRoute(t, test) } } @@ -261,108 +454,128 @@ func TestPathPrefix(t *testing.T) { shouldMatch: false, }, { - title: "PathPrefix route with pattern, match", - route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222", - shouldMatch: true, + title: "PathPrefix route with pattern, match", + route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222", + pathTemplate: `/111/{v1:[0-9]{3}}`, + shouldMatch: true, }, { - title: "PathPrefix route with pattern, URL prefix in request does not match", - route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222", - shouldMatch: false, + title: "PathPrefix route with pattern, URL prefix in request does not match", + route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222", + pathTemplate: `/111/{v1:[0-9]{3}}`, + shouldMatch: false, }, { - title: "PathPrefix route with multiple patterns, match", - route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "111", "v2": "222"}, - host: "", - path: "/111/222", - shouldMatch: true, + title: "PathPrefix route with multiple patterns, match", + route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "111", "v2": "222"}, + host: "", + path: "/111/222", + pathTemplate: `/{v1:[0-9]{3}}/{v2:[0-9]{3}}`, + shouldMatch: true, }, { - title: "PathPrefix route with multiple patterns, URL prefix in request does not match", - route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "111", "v2": "222"}, - host: "", - path: "/111/222", - shouldMatch: false, + title: "PathPrefix route with multiple patterns, URL prefix in request does not match", + route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "111", "v2": "222"}, + host: "", + path: "/111/222", + pathTemplate: `/{v1:[0-9]{3}}/{v2:[0-9]{3}}`, + shouldMatch: false, }, } for _, test := range tests { testRoute(t, test) + testTemplate(t, test) + testUseEscapedRoute(t, test) } } func TestHostPath(t *testing.T) { tests := []routeTest{ { - title: "Host and Path route, match", - route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, + title: "Host and Path route, match", + route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{}, + host: "", + path: "", + pathTemplate: `/111/222/333`, + hostTemplate: `aaa.bbb.ccc`, + shouldMatch: true, }, { - title: "Host and Path route, wrong host in request URL", - route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, + title: "Host and Path route, wrong host in request URL", + route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{}, + host: "", + path: "", + pathTemplate: `/111/222/333`, + hostTemplate: `aaa.bbb.ccc`, + shouldMatch: false, }, { - title: "Host and Path route with pattern, match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb", "v2": "222"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: true, + title: "Host and Path route with pattern, match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb", "v2": "222"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + pathTemplate: `/111/{v2:[0-9]{3}}/333`, + hostTemplate: `aaa.{v1:[a-z]{3}}.ccc`, + shouldMatch: true, }, { - title: "Host and Path route with pattern, URL in request does not match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb", "v2": "222"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: false, + title: "Host and Path route with pattern, URL in request does not match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb", "v2": "222"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + pathTemplate: `/111/{v2:[0-9]{3}}/333`, + hostTemplate: `aaa.{v1:[a-z]{3}}.ccc`, + shouldMatch: false, }, { - title: "Host and Path route with multiple patterns, match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: true, + title: "Host and Path route with multiple patterns, match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + pathTemplate: `/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}`, + hostTemplate: `{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}`, + shouldMatch: true, }, { - title: "Host and Path route with multiple patterns, URL in request does not match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: false, + title: "Host and Path route with multiple patterns, URL in request does not match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + pathTemplate: `/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}`, + hostTemplate: `{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}`, + shouldMatch: false, }, } for _, test := range tests { testRoute(t, test) + testTemplate(t, test) + testUseEscapedRoute(t, test) } } @@ -398,10 +611,29 @@ func TestHeaders(t *testing.T) { path: "", shouldMatch: false, }, + { + title: "Headers route, regex header values to match", + route: new(Route).Headers("foo", "ba[zr]"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Headers route, regex header values to match", + route: new(Route).HeadersRegexp("foo", "ba[zr]"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "baz"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, } for _, test := range tests { testRoute(t, test) + testTemplate(t, test) } } @@ -439,6 +671,7 @@ func TestMethods(t *testing.T) { for _, test := range tests { testRoute(t, test) + testTemplate(t, test) } } @@ -454,22 +687,26 @@ func TestQueries(t *testing.T) { shouldMatch: true, }, { - title: "Queries route, match with a query string", - route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://www.example.com/api?foo=bar&baz=ding"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, + title: "Queries route, match with a query string", + route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://www.example.com/api?foo=bar&baz=ding"), + vars: map[string]string{}, + host: "", + path: "", + pathTemplate: `/api`, + hostTemplate: `www.example.com`, + shouldMatch: true, }, { - title: "Queries route, match with a query string out of order", - route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://www.example.com/api?baz=ding&foo=bar"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, + title: "Queries route, match with a query string out of order", + route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://www.example.com/api?baz=ding&foo=bar"), + vars: map[string]string{}, + host: "", + path: "", + pathTemplate: `/api`, + hostTemplate: `www.example.com`, + shouldMatch: true, }, { title: "Queries route, bad query", @@ -516,10 +753,156 @@ func TestQueries(t *testing.T) { path: "", shouldMatch: false, }, + { + title: "Queries route with regexp pattern with quantifier, match", + route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), + request: newRequest("GET", "http://localhost?foo=1"), + vars: map[string]string{"v1": "1"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern with quantifier, additional variable in query string, match", + route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), + request: newRequest("GET", "http://localhost?bar=2&foo=1"), + vars: map[string]string{"v1": "1"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern with quantifier, regexp does not match", + route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), + request: newRequest("GET", "http://localhost?foo=12"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with regexp pattern with quantifier, additional capturing group", + route: new(Route).Queries("foo", "{v1:[0-9]{1}(?:a|b)}"), + request: newRequest("GET", "http://localhost?foo=1a"), + vars: map[string]string{"v1": "1a"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern with quantifier, additional variable in query string, regexp does not match", + route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), + request: newRequest("GET", "http://localhost?foo=12"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with hyphenated name, match", + route: new(Route).Queries("foo", "{v-1}"), + request: newRequest("GET", "http://localhost?foo=bar"), + vars: map[string]string{"v-1": "bar"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with multiple hyphenated names, match", + route: new(Route).Queries("foo", "{v-1}", "baz", "{v-2}"), + request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), + vars: map[string]string{"v-1": "bar", "v-2": "ding"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with hyphenate name and pattern, match", + route: new(Route).Queries("foo", "{v-1:[0-9]+}"), + request: newRequest("GET", "http://localhost?foo=10"), + vars: map[string]string{"v-1": "10"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with hyphenated name and pattern with quantifier, additional capturing group", + route: new(Route).Queries("foo", "{v-1:[0-9]{1}(?:a|b)}"), + request: newRequest("GET", "http://localhost?foo=1a"), + vars: map[string]string{"v-1": "1a"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with empty value, should match", + route: new(Route).Queries("foo", ""), + request: newRequest("GET", "http://localhost?foo=bar"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with empty value and no parameter in request, should not match", + route: new(Route).Queries("foo", ""), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with empty value and empty parameter in request, should match", + route: new(Route).Queries("foo", ""), + request: newRequest("GET", "http://localhost?foo="), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with overlapping value, should not match", + route: new(Route).Queries("foo", "bar"), + request: newRequest("GET", "http://localhost?foo=barfoo"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with no parameter in request, should not match", + route: new(Route).Queries("foo", "{bar}"), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with empty parameter in request, should match", + route: new(Route).Queries("foo", "{bar}"), + request: newRequest("GET", "http://localhost?foo="), + vars: map[string]string{"foo": ""}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, bad submatch", + route: new(Route).Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://localhost?fffoo=bar&baz=dingggg"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, } for _, test := range tests { testRoute(t, test) + testTemplate(t, test) + testUseEscapedRoute(t, test) } } @@ -556,6 +939,7 @@ func TestSchemes(t *testing.T) { } for _, test := range tests { testRoute(t, test) + testTemplate(t, test) } } @@ -590,6 +974,43 @@ func TestMatcherFunc(t *testing.T) { for _, test := range tests { testRoute(t, test) + testTemplate(t, test) + } +} + +func TestBuildVarsFunc(t *testing.T) { + tests := []routeTest{ + { + title: "BuildVarsFunc set on route", + route: new(Route).Path(`/111/{v1:\d}{v2:.*}`).BuildVarsFunc(func(vars map[string]string) map[string]string { + vars["v1"] = "3" + vars["v2"] = "a" + return vars + }), + request: newRequest("GET", "http://localhost/111/2"), + path: "/111/3a", + pathTemplate: `/111/{v1:\d}{v2:.*}`, + shouldMatch: true, + }, + { + title: "BuildVarsFunc set on route and parent route", + route: new(Route).PathPrefix(`/{v1:\d}`).BuildVarsFunc(func(vars map[string]string) map[string]string { + vars["v1"] = "2" + return vars + }).Subrouter().Path(`/{v2:\w}`).BuildVarsFunc(func(vars map[string]string) map[string]string { + vars["v2"] = "b" + return vars + }), + request: newRequest("GET", "http://localhost/1/a"), + path: "/2/b", + pathTemplate: `/{v1:\d}/{v2:\w}`, + shouldMatch: true, + }, + } + + for _, test := range tests { + testRoute(t, test) + testTemplate(t, test) } } @@ -599,41 +1020,49 @@ func TestSubRouter(t *testing.T) { tests := []routeTest{ { - route: subrouter1.Path("/{v2:[a-z]+}"), - request: newRequest("GET", "http://aaa.google.com/bbb"), - vars: map[string]string{"v1": "aaa", "v2": "bbb"}, - host: "aaa.google.com", - path: "/bbb", - shouldMatch: true, + route: subrouter1.Path("/{v2:[a-z]+}"), + request: newRequest("GET", "http://aaa.google.com/bbb"), + vars: map[string]string{"v1": "aaa", "v2": "bbb"}, + host: "aaa.google.com", + path: "/bbb", + pathTemplate: `/{v2:[a-z]+}`, + hostTemplate: `{v1:[a-z]+}.google.com`, + shouldMatch: true, }, { - route: subrouter1.Path("/{v2:[a-z]+}"), - request: newRequest("GET", "http://111.google.com/111"), - vars: map[string]string{"v1": "aaa", "v2": "bbb"}, - host: "aaa.google.com", - path: "/bbb", - shouldMatch: false, + route: subrouter1.Path("/{v2:[a-z]+}"), + request: newRequest("GET", "http://111.google.com/111"), + vars: map[string]string{"v1": "aaa", "v2": "bbb"}, + host: "aaa.google.com", + path: "/bbb", + pathTemplate: `/{v2:[a-z]+}`, + hostTemplate: `{v1:[a-z]+}.google.com`, + shouldMatch: false, }, { - route: subrouter2.Path("/baz/{v2}"), - request: newRequest("GET", "http://localhost/foo/bar/baz/ding"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "/foo/bar/baz/ding", - shouldMatch: true, + route: subrouter2.Path("/baz/{v2}"), + request: newRequest("GET", "http://localhost/foo/bar/baz/ding"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "/foo/bar/baz/ding", + pathTemplate: `/foo/{v1}/baz/{v2}`, + shouldMatch: true, }, { - route: subrouter2.Path("/baz/{v2}"), - request: newRequest("GET", "http://localhost/foo/bar"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "/foo/bar/baz/ding", - shouldMatch: false, + route: subrouter2.Path("/baz/{v2}"), + request: newRequest("GET", "http://localhost/foo/bar"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "/foo/bar/baz/ding", + pathTemplate: `/foo/{v1}/baz/{v2}`, + shouldMatch: false, }, } for _, test := range tests { testRoute(t, test) + testTemplate(t, test) + testUseEscapedRoute(t, test) } } @@ -729,6 +1158,176 @@ func TestStrictSlash(t *testing.T) { for _, test := range tests { testRoute(t, test) + testTemplate(t, test) + testUseEscapedRoute(t, test) + } +} + +func TestUseEncodedPath(t *testing.T) { + r := NewRouter() + r.UseEncodedPath() + + tests := []routeTest{ + { + title: "Router with useEncodedPath, URL with encoded slash does match", + route: r.NewRoute().Path("/v1/{v1}/v2"), + request: newRequest("GET", "http://localhost/v1/1%2F2/v2"), + vars: map[string]string{"v1": "1%2F2"}, + host: "", + path: "/v1/1%2F2/v2", + pathTemplate: `/v1/{v1}/v2`, + shouldMatch: true, + }, + { + title: "Router with useEncodedPath, URL with encoded slash doesn't match", + route: r.NewRoute().Path("/v1/1/2/v2"), + request: newRequest("GET", "http://localhost/v1/1%2F2/v2"), + vars: map[string]string{"v1": "1%2F2"}, + host: "", + path: "/v1/1%2F2/v2", + pathTemplate: `/v1/1/2/v2`, + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + testTemplate(t, test) + } +} + +func TestWalkSingleDepth(t *testing.T) { + r0 := NewRouter() + r1 := NewRouter() + r2 := NewRouter() + + r0.Path("/g") + r0.Path("/o") + r0.Path("/d").Handler(r1) + r0.Path("/r").Handler(r2) + r0.Path("/a") + + r1.Path("/z") + r1.Path("/i") + r1.Path("/l") + r1.Path("/l") + + r2.Path("/i") + r2.Path("/l") + r2.Path("/l") + + paths := []string{"g", "o", "r", "i", "l", "l", "a"} + depths := []int{0, 0, 0, 1, 1, 1, 0} + i := 0 + err := r0.Walk(func(route *Route, router *Router, ancestors []*Route) error { + matcher := route.matchers[0].(*routeRegexp) + if matcher.template == "/d" { + return SkipRouter + } + if len(ancestors) != depths[i] { + t.Errorf(`Expected depth of %d at i = %d; got "%d"`, depths[i], i, len(ancestors)) + } + if matcher.template != "/"+paths[i] { + t.Errorf(`Expected "/%s" at i = %d; got "%s"`, paths[i], i, matcher.template) + } + i++ + return nil + }) + if err != nil { + panic(err) + } + if i != len(paths) { + t.Errorf("Expected %d routes, found %d", len(paths), i) + } +} + +func TestWalkNested(t *testing.T) { + router := NewRouter() + + g := router.Path("/g").Subrouter() + o := g.PathPrefix("/o").Subrouter() + r := o.PathPrefix("/r").Subrouter() + i := r.PathPrefix("/i").Subrouter() + l1 := i.PathPrefix("/l").Subrouter() + l2 := l1.PathPrefix("/l").Subrouter() + l2.Path("/a") + + paths := []string{"/g", "/g/o", "/g/o/r", "/g/o/r/i", "/g/o/r/i/l", "/g/o/r/i/l/l", "/g/o/r/i/l/l/a"} + idx := 0 + err := router.Walk(func(route *Route, router *Router, ancestors []*Route) error { + path := paths[idx] + tpl := route.regexp.path.template + if tpl != path { + t.Errorf(`Expected %s got %s`, path, tpl) + } + idx++ + return nil + }) + if err != nil { + panic(err) + } + if idx != len(paths) { + t.Errorf("Expected %d routes, found %d", len(paths), idx) + } +} + +func TestWalkErrorRoute(t *testing.T) { + router := NewRouter() + router.Path("/g") + expectedError := errors.New("error") + err := router.Walk(func(route *Route, router *Router, ancestors []*Route) error { + return expectedError + }) + if err != expectedError { + t.Errorf("Expected %v routes, found %v", expectedError, err) + } +} + +func TestWalkErrorMatcher(t *testing.T) { + router := NewRouter() + expectedError := router.Path("/g").Subrouter().Path("").GetError() + err := router.Walk(func(route *Route, router *Router, ancestors []*Route) error { + return route.GetError() + }) + if err != expectedError { + t.Errorf("Expected %v routes, found %v", expectedError, err) + } +} + +func TestWalkErrorHandler(t *testing.T) { + handler := NewRouter() + expectedError := handler.Path("/path").Subrouter().Path("").GetError() + router := NewRouter() + router.Path("/g").Handler(handler) + err := router.Walk(func(route *Route, router *Router, ancestors []*Route) error { + return route.GetError() + }) + if err != expectedError { + t.Errorf("Expected %v routes, found %v", expectedError, err) + } +} + +func TestSubrouterErrorHandling(t *testing.T) { + superRouterCalled := false + subRouterCalled := false + + router := NewRouter() + router.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + superRouterCalled = true + }) + subRouter := router.PathPrefix("/bign8").Subrouter() + subRouter.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + subRouterCalled = true + }) + + req, _ := http.NewRequest("GET", "http://localhost/bign8/was/here", nil) + router.ServeHTTP(NewRecorder(), req) + + if superRouterCalled { + t.Error("Super router 404 handler called when sub-router 404 handler is available.") + } + if !subRouterCalled { + t.Error("Sub-router 404 handler was not called.") } } @@ -737,14 +1336,13 @@ func TestStrictSlash(t *testing.T) { // ---------------------------------------------------------------------------- func getRouteTemplate(route *Route) string { - host, path := "none", "none" - if route.regexp != nil { - if route.regexp.host != nil { - host = route.regexp.host.template - } - if route.regexp.path != nil { - path = route.regexp.path.template - } + host, err := route.GetHostTemplate() + if err != nil { + host = "none" + } + path, err := route.GetPathTemplate() + if err != nil { + path = "none" } return fmt.Sprintf("Host: %v, Path: %v", host, path) } @@ -806,34 +1404,31 @@ func testRoute(t *testing.T, test routeTest) { } } -// Tests that the context is cleared or not cleared properly depending on -// the configuration of the router -func TestKeepContext(t *testing.T) { - func1 := func(w http.ResponseWriter, r *http.Request) {} +func testUseEscapedRoute(t *testing.T, test routeTest) { + test.route.useEncodedPath = true + testRoute(t, test) +} - r := NewRouter() - r.HandleFunc("/", func1).Name("func1") - - req, _ := http.NewRequest("GET", "http://localhost/", nil) - context.Set(req, "t", 1) - - res := new(http.ResponseWriter) - r.ServeHTTP(*res, req) - - if _, ok := context.GetOk(req, "t"); ok { - t.Error("Context should have been cleared at end of request") +func testTemplate(t *testing.T, test routeTest) { + route := test.route + pathTemplate := test.pathTemplate + if len(pathTemplate) == 0 { + pathTemplate = test.path + } + hostTemplate := test.hostTemplate + if len(hostTemplate) == 0 { + hostTemplate = test.host } - r.KeepContext = true - - req, _ = http.NewRequest("GET", "http://localhost/", nil) - context.Set(req, "t", 1) - - r.ServeHTTP(*res, req) - if _, ok := context.GetOk(req, "t"); !ok { - t.Error("Context should NOT have been cleared at end of request") + routePathTemplate, pathErr := route.GetPathTemplate() + if pathErr == nil && routePathTemplate != pathTemplate { + t.Errorf("(%v) GetPathTemplate not equal: expected %v, got %v", test.title, pathTemplate, routePathTemplate) } + routeHostTemplate, hostErr := route.GetHostTemplate() + if hostErr == nil && routeHostTemplate != hostTemplate { + t.Errorf("(%v) GetHostTemplate not equal: expected %v, got %v", test.title, hostTemplate, routeHostTemplate) + } } type TestA301ResponseWriter struct { @@ -876,6 +1471,24 @@ func Test301Redirect(t *testing.T) { } } +func TestSkipClean(t *testing.T) { + func1 := func(w http.ResponseWriter, r *http.Request) {} + func2 := func(w http.ResponseWriter, r *http.Request) {} + + r := NewRouter() + r.SkipClean(true) + r.HandleFunc("/api/", func2).Name("func2") + r.HandleFunc("/", func1).Name("func1") + + req, _ := http.NewRequest("GET", "http://localhost//api/?abc=def", nil) + res := NewRecorder() + r.ServeHTTP(res, req) + + if len(res.HeaderMap["Location"]) != 0 { + t.Errorf("Shouldn't redirect since skip clean is disabled") + } +} + // https://plus.google.com/101022900381697718949/posts/eWy6DjFJ6uW func TestSubrouterHeader(t *testing.T) { expected := "func1 response" @@ -933,11 +1546,42 @@ func stringMapEqual(m1, m2 map[string]string) bool { return true } -// newRequest is a helper function to create a new request with a method and url +// newRequest is a helper function to create a new request with a method and url. +// The request returned is a 'server' request as opposed to a 'client' one through +// simulated write onto the wire and read off of the wire. +// The differences between requests are detailed in the net/http package. func newRequest(method, url string) *http.Request { req, err := http.NewRequest(method, url, nil) if err != nil { panic(err) } + // extract the escaped original host+path from url + // http://localhost/path/here?v=1#frag -> //localhost/path/here + opaque := "" + if i := len(req.URL.Scheme); i > 0 { + opaque = url[i+1:] + } + + if i := strings.LastIndex(opaque, "?"); i > -1 { + opaque = opaque[:i] + } + if i := strings.LastIndex(opaque, "#"); i > -1 { + opaque = opaque[:i] + } + + // Escaped host+path workaround as detailed in https://golang.org/pkg/net/url/#URL + // for < 1.5 client side workaround + req.URL.Opaque = opaque + + // Simulate writing to wire + var buff bytes.Buffer + req.Write(&buff) + ioreader := bufio.NewReader(&buff) + + // Parse request off of 'wire' + req, err = http.ReadRequest(ioreader) + if err != nil { + panic(err) + } return req } diff --git a/vendor/github.com/gorilla/mux/old_test.go b/vendor/github.com/gorilla/mux/old_test.go index 1f7c190c0..9bdc5e5d1 100644 --- a/vendor/github.com/gorilla/mux/old_test.go +++ b/vendor/github.com/gorilla/mux/old_test.go @@ -36,10 +36,6 @@ func NewRecorder() *ResponseRecorder { } } -// DefaultRemoteAddr is the default remote address to return in RemoteAddr if -// an explicit DefaultRemoteAddr isn't set on ResponseRecorder. -const DefaultRemoteAddr = "1.2.3.4" - // Header returns the response headers. func (rw *ResponseRecorder) Header() http.Header { return rw.HeaderMap @@ -545,7 +541,7 @@ func TestMatchedRouteName(t *testing.T) { router := NewRouter() route := router.NewRoute().Path("/products/").Name(routeName) - url := "http://www.domain.com/products/" + url := "http://www.example.com/products/" request, _ := http.NewRequest("GET", url, nil) var rv RouteMatch ok := router.Match(request, &rv) @@ -563,10 +559,10 @@ func TestMatchedRouteName(t *testing.T) { func TestSubRouting(t *testing.T) { // Example from docs. router := NewRouter() - subrouter := router.NewRoute().Host("www.domain.com").Subrouter() + subrouter := router.NewRoute().Host("www.example.com").Subrouter() route := subrouter.NewRoute().Path("/products/").Name("products") - url := "http://www.domain.com/products/" + url := "http://www.example.com/products/" request, _ := http.NewRequest("GET", url, nil) var rv RouteMatch ok := router.Match(request, &rv) @@ -576,10 +572,10 @@ func TestSubRouting(t *testing.T) { } u, _ := router.Get("products").URL() - builtUrl := u.String() + builtURL := u.String() // Yay, subroute aware of the domain when building! - if builtUrl != url { - t.Errorf("Expected %q, got %q.", url, builtUrl) + if builtURL != url { + t.Errorf("Expected %q, got %q.", url, builtURL) } } @@ -691,7 +687,7 @@ func TestNewRegexp(t *testing.T) { } for pattern, paths := range tests { - p, _ = newRouteRegexp(pattern, false, false, false, false) + p, _ = newRouteRegexp(pattern, false, false, false, false, false) for path, result := range paths { matches = p.regexp.FindStringSubmatch(path) if result == nil { diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go index a6305483d..fd8fe3956 100644 --- a/vendor/github.com/gorilla/mux/regexp.go +++ b/vendor/github.com/gorilla/mux/regexp.go @@ -10,6 +10,7 @@ import ( "net/http" "net/url" "regexp" + "strconv" "strings" ) @@ -23,7 +24,7 @@ import ( // Previously we accepted only Python-like identifiers for variable // names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that // name and pattern can't be empty, and names can't contain a colon. -func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) { +func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash, useEncodedPath bool) (*routeRegexp, error) { // Check if it is well-formed. idxs, errBraces := braceIndices(tpl) if errBraces != nil { @@ -34,8 +35,7 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash // Now let's parse it. defaultPattern := "[^/]+" if matchQuery { - defaultPattern = "[^?&]+" - matchPrefix = true + defaultPattern = "[^?&]*" } else if matchHost { defaultPattern = "[^.]+" matchPrefix = false @@ -53,9 +53,7 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash varsN := make([]string, len(idxs)/2) varsR := make([]*regexp.Regexp, len(idxs)/2) pattern := bytes.NewBufferString("") - if !matchQuery { - pattern.WriteByte('^') - } + pattern.WriteByte('^') reverse := bytes.NewBufferString("") var end int var err error @@ -75,9 +73,11 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash tpl[idxs[i]:end]) } // Build the regexp pattern. - fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), patt) + fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt) + // Build the reverse template. fmt.Fprintf(reverse, "%s%%s", raw) + // Append variable name and compiled pattern. varsN[i/2] = name varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) @@ -91,6 +91,12 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash if strictSlash { pattern.WriteString("[/]?") } + if matchQuery { + // Add the default pattern if the query value is empty + if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { + pattern.WriteString(defaultPattern) + } + } if !matchPrefix { pattern.WriteByte('$') } @@ -105,14 +111,15 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash } // Done! return &routeRegexp{ - template: template, - matchHost: matchHost, - matchQuery: matchQuery, - strictSlash: strictSlash, - regexp: reg, - reverse: reverse.String(), - varsN: varsN, - varsR: varsR, + template: template, + matchHost: matchHost, + matchQuery: matchQuery, + strictSlash: strictSlash, + useEncodedPath: useEncodedPath, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, }, nil } @@ -127,6 +134,9 @@ type routeRegexp struct { matchQuery bool // The strictSlash value defined on the route, but disabled if PathPrefix was used. strictSlash bool + // Determines whether to use encoded path from getPath function or unencoded + // req.URL.Path for path matching + useEncodedPath bool // Expanded regexp. regexp *regexp.Regexp // Reverse template. @@ -141,20 +151,20 @@ type routeRegexp struct { func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { if !r.matchHost { if r.matchQuery { - return r.regexp.MatchString(req.URL.RawQuery) - } else { - return r.regexp.MatchString(req.URL.Path) + return r.matchQueryString(req) } + path := req.URL.Path + if r.useEncodedPath { + path = getPath(req) + } + return r.regexp.MatchString(path) } + return r.regexp.MatchString(getHost(req)) } // url builds a URL part using the given values. -func (r *routeRegexp) url(pairs ...string) (string, error) { - values, err := mapFromPairs(pairs...) - if err != nil { - return "", err - } +func (r *routeRegexp) url(values map[string]string) (string, error) { urlValues := make([]interface{}, len(r.varsN)) for k, v := range r.varsN { value, ok := values[v] @@ -179,11 +189,31 @@ func (r *routeRegexp) url(pairs ...string) (string, error) { return rv, nil } +// getURLQuery returns a single query parameter from a request URL. +// For a URL with foo=bar&baz=ding, we return only the relevant key +// value pair for the routeRegexp. +func (r *routeRegexp) getURLQuery(req *http.Request) string { + if !r.matchQuery { + return "" + } + templateKey := strings.SplitN(r.template, "=", 2)[0] + for key, vals := range req.URL.Query() { + if key == templateKey && len(vals) > 0 { + return key + "=" + vals[0] + } + } + return "" +} + +func (r *routeRegexp) matchQueryString(req *http.Request) bool { + return r.regexp.MatchString(r.getURLQuery(req)) +} + // braceIndices returns the first level curly brace indices from a string. // It returns an error in case of unbalanced braces. func braceIndices(s string) ([]int, error) { var level, idx int - idxs := make([]int, 0) + var idxs []int for i := 0; i < len(s); i++ { switch s[i] { case '{': @@ -204,6 +234,11 @@ func braceIndices(s string) ([]int, error) { return idxs, nil } +// varGroupName builds a capturing group name for the indexed variable. +func varGroupName(idx int) string { + return "v" + strconv.Itoa(idx) +} + // ---------------------------------------------------------------------------- // routeRegexpGroup // ---------------------------------------------------------------------------- @@ -219,23 +254,24 @@ type routeRegexpGroup struct { func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { // Store host variables. if v.host != nil { - hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) - if hostVars != nil { - for k, v := range v.host.varsN { - m.Vars[v] = hostVars[k+1] - } + host := getHost(req) + matches := v.host.regexp.FindStringSubmatchIndex(host) + if len(matches) > 0 { + extractVars(host, matches, v.host.varsN, m.Vars) } } + path := req.URL.Path + if r.useEncodedPath { + path = getPath(req) + } // Store path variables. if v.path != nil { - pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) - if pathVars != nil { - for k, v := range v.path.varsN { - m.Vars[v] = pathVars[k+1] - } + matches := v.path.regexp.FindStringSubmatchIndex(path) + if len(matches) > 0 { + extractVars(path, matches, v.path.varsN, m.Vars) // Check if we should redirect. if v.path.strictSlash { - p1 := strings.HasSuffix(req.URL.Path, "/") + p1 := strings.HasSuffix(path, "/") p2 := strings.HasSuffix(v.path.template, "/") if p1 != p2 { u, _ := url.Parse(req.URL.String()) @@ -250,13 +286,11 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) } } // Store query string variables. - rawQuery := req.URL.RawQuery for _, q := range v.queries { - queryVars := q.regexp.FindStringSubmatch(rawQuery) - if queryVars != nil { - for k, v := range q.varsN { - m.Vars[v] = queryVars[k+1] - } + queryURL := q.getURLQuery(req) + matches := q.regexp.FindStringSubmatchIndex(queryURL) + if len(matches) > 0 { + extractVars(queryURL, matches, q.varsN, m.Vars) } } } @@ -274,3 +308,9 @@ func getHost(r *http.Request) string { return host } + +func extractVars(input string, matches []int, names []string, output map[string]string) { + for i, name := range names { + output[name] = input[matches[2*i+2]:matches[2*i+3]] + } +} diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go index c310e66bc..293b6d493 100644 --- a/vendor/github.com/gorilla/mux/route.go +++ b/vendor/github.com/gorilla/mux/route.go @@ -9,6 +9,7 @@ import ( "fmt" "net/http" "net/url" + "regexp" "strings" ) @@ -25,12 +26,23 @@ type Route struct { // If true, when the path pattern is "/path/", accessing "/path" will // redirect to the former and vice versa. strictSlash bool + // If true, when the path pattern is "/path//to", accessing "/path//to" + // will not redirect + skipClean bool + // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to" + useEncodedPath bool // If true, this route never matches: it is only used to build URLs. buildOnly bool // The name used to build URLs. name string // Error resulted from building a route. err error + + buildVarsFunc BuildVarsFunc +} + +func (r *Route) SkipClean() bool { + return r.skipClean } // Match matches the route against the request. @@ -148,7 +160,7 @@ func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl } } - rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash) + rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash, r.useEncodedPath) if err != nil { return err } @@ -186,7 +198,7 @@ func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery type headerMatcher map[string]string func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMap(m, r.Header, true) + return matchMapWithString(m, r.Header, true) } // Headers adds a matcher for request header values. @@ -197,22 +209,46 @@ func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { // "X-Requested-With", "XMLHttpRequest") // // The above route will only match if both request header values match. -// -// It the value is an empty string, it will match any value if the key is set. +// If the value is an empty string, it will match any value if the key is set. func (r *Route) Headers(pairs ...string) *Route { if r.err == nil { var headers map[string]string - headers, r.err = mapFromPairs(pairs...) + headers, r.err = mapFromPairsToString(pairs...) return r.addMatcher(headerMatcher(headers)) } return r } +// headerRegexMatcher matches the request against the route given a regex for the header +type headerRegexMatcher map[string]*regexp.Regexp + +func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithRegex(m, r.Header, true) +} + +// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex +// support. For example: +// +// r := mux.NewRouter() +// r.HeadersRegexp("Content-Type", "application/(text|json)", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both the request header matches both regular expressions. +// It the value is an empty string, it will match any value if the key is set. +func (r *Route) HeadersRegexp(pairs ...string) *Route { + if r.err == nil { + var headers map[string]*regexp.Regexp + headers, r.err = mapFromPairsToRegex(pairs...) + return r.addMatcher(headerRegexMatcher(headers)) + } + return r +} + // Host ----------------------------------------------------------------------- // Host adds a matcher for the URL host. // It accepts a template with zero or more URL variables enclosed by {}. -// Variables can define an optional regexp pattern to me matched: +// Variables can define an optional regexp pattern to be matched: // // - {name} matches anything until the next dot. // @@ -221,7 +257,7 @@ func (r *Route) Headers(pairs ...string) *Route { // For example: // // r := mux.NewRouter() -// r.Host("www.domain.com") +// r.Host("www.example.com") // r.Host("{subdomain}.domain.com") // r.Host("{subdomain:[a-z]+}.domain.com") // @@ -237,6 +273,7 @@ func (r *Route) Host(tpl string) *Route { // MatcherFunc is the function signature used by custom matchers. type MatcherFunc func(*http.Request, *RouteMatch) bool +// Match returns the match for a given request. func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { return m(r, match) } @@ -270,7 +307,7 @@ func (r *Route) Methods(methods ...string) *Route { // Path adds a matcher for the URL path. // It accepts a template with zero or more URL variables enclosed by {}. The // template must start with a "/". -// Variables can define an optional regexp pattern to me matched: +// Variables can define an optional regexp pattern to be matched: // // - {name} matches anything until the next slash. // @@ -321,7 +358,7 @@ func (r *Route) PathPrefix(tpl string) *Route { // // It the value is an empty string, it will match any value if the key is set. // -// Variables can define an optional regexp pattern to me matched: +// Variables can define an optional regexp pattern to be matched: // // - {name} matches anything until the next slash. // @@ -334,7 +371,7 @@ func (r *Route) Queries(pairs ...string) *Route { return nil } for i := 0; i < length; i += 2 { - if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, true, true); r.err != nil { + if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil { return r } } @@ -360,6 +397,19 @@ func (r *Route) Schemes(schemes ...string) *Route { return r.addMatcher(schemeMatcher(schemes)) } +// BuildVarsFunc -------------------------------------------------------------- + +// BuildVarsFunc is the function signature used by custom build variable +// functions (which can modify route variables before a route's URL is built). +type BuildVarsFunc func(map[string]string) map[string]string + +// BuildVarsFunc adds a custom function to be used to modify build variables +// before a route's URL is built. +func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { + r.buildVarsFunc = f + return r +} + // Subrouter ------------------------------------------------------------------ // Subrouter creates a subrouter for the route. @@ -367,7 +417,7 @@ func (r *Route) Schemes(schemes ...string) *Route { // It will test the inner routes only if the parent route matched. For example: // // r := mux.NewRouter() -// s := r.Host("www.domain.com").Subrouter() +// s := r.Host("www.example.com").Subrouter() // s.HandleFunc("/products/", ProductsHandler) // s.HandleFunc("/products/{key}", ProductHandler) // s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) @@ -422,17 +472,20 @@ func (r *Route) URL(pairs ...string) (*url.URL, error) { if r.regexp == nil { return nil, errors.New("mux: route doesn't have a host or path") } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } var scheme, host, path string - var err error if r.regexp.host != nil { // Set a default scheme. scheme = "http" - if host, err = r.regexp.host.url(pairs...); err != nil { + if host, err = r.regexp.host.url(values); err != nil { return nil, err } } if r.regexp.path != nil { - if path, err = r.regexp.path.url(pairs...); err != nil { + if path, err = r.regexp.path.url(values); err != nil { return nil, err } } @@ -453,7 +506,11 @@ func (r *Route) URLHost(pairs ...string) (*url.URL, error) { if r.regexp == nil || r.regexp.host == nil { return nil, errors.New("mux: route doesn't have a host") } - host, err := r.regexp.host.url(pairs...) + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + host, err := r.regexp.host.url(values) if err != nil { return nil, err } @@ -473,7 +530,11 @@ func (r *Route) URLPath(pairs ...string) (*url.URL, error) { if r.regexp == nil || r.regexp.path == nil { return nil, errors.New("mux: route doesn't have a path") } - path, err := r.regexp.path.url(pairs...) + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + path, err := r.regexp.path.url(values) if err != nil { return nil, err } @@ -482,6 +543,56 @@ func (r *Route) URLPath(pairs ...string) (*url.URL, error) { }, nil } +// GetPathTemplate returns the template used to build the +// route match. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a path. +func (r *Route) GetPathTemplate() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp == nil || r.regexp.path == nil { + return "", errors.New("mux: route doesn't have a path") + } + return r.regexp.path.template, nil +} + +// GetHostTemplate returns the template used to build the +// route match. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a host. +func (r *Route) GetHostTemplate() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp == nil || r.regexp.host == nil { + return "", errors.New("mux: route doesn't have a host") + } + return r.regexp.host.template, nil +} + +// prepareVars converts the route variable pairs into a map. If the route has a +// BuildVarsFunc, it is invoked. +func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { + m, err := mapFromPairsToString(pairs...) + if err != nil { + return nil, err + } + return r.buildVars(m), nil +} + +func (r *Route) buildVars(m map[string]string) map[string]string { + if r.parent != nil { + m = r.parent.buildVars(m) + } + if r.buildVarsFunc != nil { + m = r.buildVarsFunc(m) + } + return m +} + // ---------------------------------------------------------------------------- // parentRoute // ---------------------------------------------------------------------------- @@ -490,6 +601,7 @@ func (r *Route) URLPath(pairs ...string) (*url.URL, error) { type parentRoute interface { getNamedRoutes() map[string]*Route getRegexpGroup() *routeRegexpGroup + buildVars(map[string]string) map[string]string } // getNamedRoutes returns the map where named routes are registered. diff --git a/vendor/github.com/opencontainers/go-digest/.mailmap b/vendor/github.com/opencontainers/go-digest/.mailmap new file mode 100644 index 000000000..ba611cb21 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/.mailmap @@ -0,0 +1 @@ +Stephen J Day diff --git a/vendor/github.com/opencontainers/go-digest/.pullapprove.yml b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml new file mode 100644 index 000000000..45fa4b9ec --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml @@ -0,0 +1,12 @@ +approve_by_comment: true +approve_regex: '^(Approved|lgtm|LGTM|:shipit:|:star:|:\+1:|:ship:)' +reject_regex: ^Rejected +reset_on_push: true +author_approval: ignored +signed_off_by: + required: true +reviewers: + teams: + - go-digest-maintainers + name: default + required: 2 diff --git a/vendor/github.com/opencontainers/go-digest/.travis.yml b/vendor/github.com/opencontainers/go-digest/.travis.yml new file mode 100644 index 000000000..7ea4ed1d2 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/.travis.yml @@ -0,0 +1,4 @@ +language: go +go: + - 1.7 + - master diff --git a/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md b/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md new file mode 100644 index 000000000..e4d962ac1 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md @@ -0,0 +1,72 @@ +# Contributing to Docker open source projects + +Want to hack on this project? Awesome! Here are instructions to get you started. + +This project is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +For an in-depth description of our contribution process, visit the +contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/) + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE.code b/vendor/github.com/opencontainers/go-digest/LICENSE.code new file mode 100644 index 000000000..0ea3ff81e --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/LICENSE.code @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE.docs b/vendor/github.com/opencontainers/go-digest/LICENSE.docs new file mode 100644 index 000000000..e26cd4fc8 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/opencontainers/go-digest/MAINTAINERS b/vendor/github.com/opencontainers/go-digest/MAINTAINERS new file mode 100644 index 000000000..42a29795d --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/MAINTAINERS @@ -0,0 +1,9 @@ +Aaron Lehmann (@aaronlehmann) +Brandon Philips (@philips) +Brendan Burns (@brendandburns) +Derek McGowan (@dmcgowan) +Jason Bouzane (@jbouzane) +John Starks (@jstarks) +Jonathan Boulle (@jonboulle) +Stephen Day (@stevvooe) +Vincent Batts (@vbatts) diff --git a/vendor/github.com/opencontainers/go-digest/README.md b/vendor/github.com/opencontainers/go-digest/README.md new file mode 100644 index 000000000..0f5a04092 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/README.md @@ -0,0 +1,104 @@ +# go-digest + +[![GoDoc](https://godoc.org/github.com/opencontainers/go-digest?status.svg)](https://godoc.org/github.com/opencontainers/go-digest) [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/go-digest)](https://goreportcard.com/report/github.com/opencontainers/go-digest) [![Build Status](https://travis-ci.org/opencontainers/go-digest.svg?branch=master)](https://travis-ci.org/opencontainers/go-digest) + +Common digest package used across the container ecosystem. + +Please see the [godoc](https://godoc.org/github.com/opencontainers/go-digest) for more information. + +# What is a digest? + +A digest is just a hash. + +The most common use case for a digest is to create a content +identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage) +systems: + +```go +id := digest.FromBytes([]byte("my content")) +``` + +In the example above, the id can be used to uniquely identify +the byte slice "my content". This allows two disparate applications +to agree on a verifiable identifier without having to trust one +another. + +An identifying digest can be verified, as follows: + +```go +if id != digest.FromBytes([]byte("my content")) { + return errors.New("the content has changed!") +} +``` + +A `Verifier` type can be used to handle cases where an `io.Reader` +makes more sense: + +```go +rd := getContent() +verifier := id.Verifier() +io.Copy(verifier, rd) + +if !verifier.Verified() { + return errors.New("the content has changed!") +} +``` + +Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this +can power a rich, safe, content distribution system. + +# Usage + +While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is +considered the best resource, a few important items need to be called +out when using this package. + +1. Make sure to import the hash implementations into your application + or the package will panic. You should have something like the + following in the main (or other entrypoint) of your application: + + ```go + import ( + _ "crypto/sha256" + _ "crypto/sha512" + ) + ``` + This may seem inconvenient but it allows you replace the hash + implementations with others, such as https://github.com/stevvooe/resumable. + +2. Even though `digest.Digest` may be assemable as a string, _always_ + verify your input with `digest.Parse` or use `Digest.Validate` + when accepting untrusted input. While there are measures to + avoid common problems, this will ensure you have valid digests + in the rest of your application. + +# Stability + +The Go API, at this stage, is considered stable, unless otherwise noted. + +As always, before using a package export, read the [godoc](https://godoc.org/github.com/opencontainers/go-digest). + +# Contributing + +This package is considered fairly complete. It has been in production +in thousands (millions?) of deployments and is fairly battle-hardened. +New additions will be met with skepticism. If you think there is a +missing feature, please file a bug clearly describing the problem and +the alternatives you tried before submitting a PR. + +# Reporting security issues + +Please DO NOT file a public issue, instead send your report privately to +security@opencontainers.org. + +The maintainers take security seriously. If you discover a security issue, +please bring it to their attention right away! + +If you are reporting a security issue, do not create an issue or file a pull +request on GitHub. Instead, disclose the issue responsibly by sending an email +to security@opencontainers.org (which is inhabited only by the maintainers of +the various OCI projects). + +# Copyright and license + +Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE.code). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/. diff --git a/vendor/github.com/opencontainers/go-digest/algorithm.go b/vendor/github.com/opencontainers/go-digest/algorithm.go new file mode 100644 index 000000000..8813bd26f --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/algorithm.go @@ -0,0 +1,192 @@ +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import ( + "crypto" + "fmt" + "hash" + "io" + "regexp" +) + +// Algorithm identifies and implementation of a digester by an identifier. +// Note the that this defines both the hash algorithm used and the string +// encoding. +type Algorithm string + +// supported digest types +const ( + SHA256 Algorithm = "sha256" // sha256 with hex encoding (lower case only) + SHA384 Algorithm = "sha384" // sha384 with hex encoding (lower case only) + SHA512 Algorithm = "sha512" // sha512 with hex encoding (lower case only) + + // Canonical is the primary digest algorithm used with the distribution + // project. Other digests may be used but this one is the primary storage + // digest. + Canonical = SHA256 +) + +var ( + // TODO(stevvooe): Follow the pattern of the standard crypto package for + // registration of digests. Effectively, we are a registerable set and + // common symbol access. + + // algorithms maps values to hash.Hash implementations. Other algorithms + // may be available but they cannot be calculated by the digest package. + algorithms = map[Algorithm]crypto.Hash{ + SHA256: crypto.SHA256, + SHA384: crypto.SHA384, + SHA512: crypto.SHA512, + } + + // anchoredEncodedRegexps contains anchored regular expressions for hex-encoded digests. + // Note that /A-F/ disallowed. + anchoredEncodedRegexps = map[Algorithm]*regexp.Regexp{ + SHA256: regexp.MustCompile(`^[a-f0-9]{64}$`), + SHA384: regexp.MustCompile(`^[a-f0-9]{96}$`), + SHA512: regexp.MustCompile(`^[a-f0-9]{128}$`), + } +) + +// Available returns true if the digest type is available for use. If this +// returns false, Digester and Hash will return nil. +func (a Algorithm) Available() bool { + h, ok := algorithms[a] + if !ok { + return false + } + + // check availability of the hash, as well + return h.Available() +} + +func (a Algorithm) String() string { + return string(a) +} + +// Size returns number of bytes returned by the hash. +func (a Algorithm) Size() int { + h, ok := algorithms[a] + if !ok { + return 0 + } + return h.Size() +} + +// Set implemented to allow use of Algorithm as a command line flag. +func (a *Algorithm) Set(value string) error { + if value == "" { + *a = Canonical + } else { + // just do a type conversion, support is queried with Available. + *a = Algorithm(value) + } + + if !a.Available() { + return ErrDigestUnsupported + } + + return nil +} + +// Digester returns a new digester for the specified algorithm. If the algorithm +// does not have a digester implementation, nil will be returned. This can be +// checked by calling Available before calling Digester. +func (a Algorithm) Digester() Digester { + return &digester{ + alg: a, + hash: a.Hash(), + } +} + +// Hash returns a new hash as used by the algorithm. If not available, the +// method will panic. Check Algorithm.Available() before calling. +func (a Algorithm) Hash() hash.Hash { + if !a.Available() { + // Empty algorithm string is invalid + if a == "" { + panic(fmt.Sprintf("empty digest algorithm, validate before calling Algorithm.Hash()")) + } + + // NOTE(stevvooe): A missing hash is usually a programming error that + // must be resolved at compile time. We don't import in the digest + // package to allow users to choose their hash implementation (such as + // when using stevvooe/resumable or a hardware accelerated package). + // + // Applications that may want to resolve the hash at runtime should + // call Algorithm.Available before call Algorithm.Hash(). + panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) + } + + return algorithms[a].New() +} + +// Encode encodes the raw bytes of a digest, typically from a hash.Hash, into +// the encoded portion of the digest. +func (a Algorithm) Encode(d []byte) string { + // TODO(stevvooe): Currently, all algorithms use a hex encoding. When we + // add support for back registration, we can modify this accordingly. + return fmt.Sprintf("%x", d) +} + +// FromReader returns the digest of the reader using the algorithm. +func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { + digester := a.Digester() + + if _, err := io.Copy(digester.Hash(), rd); err != nil { + return "", err + } + + return digester.Digest(), nil +} + +// FromBytes digests the input and returns a Digest. +func (a Algorithm) FromBytes(p []byte) Digest { + digester := a.Digester() + + if _, err := digester.Hash().Write(p); err != nil { + // Writes to a Hash should never fail. None of the existing + // hash implementations in the stdlib or hashes vendored + // here can return errors from Write. Having a panic in this + // condition instead of having FromBytes return an error value + // avoids unnecessary error handling paths in all callers. + panic("write to hash function returned error: " + err.Error()) + } + + return digester.Digest() +} + +// FromString digests the string input and returns a Digest. +func (a Algorithm) FromString(s string) Digest { + return a.FromBytes([]byte(s)) +} + +// Validate validates the encoded portion string +func (a Algorithm) Validate(encoded string) error { + r, ok := anchoredEncodedRegexps[a] + if !ok { + return ErrDigestUnsupported + } + // Digests much always be hex-encoded, ensuring that their hex portion will + // always be size*2 + if a.Size()*2 != len(encoded) { + return ErrDigestInvalidLength + } + if r.MatchString(encoded) { + return nil + } + return ErrDigestInvalidFormat +} diff --git a/vendor/github.com/opencontainers/go-digest/algorithm_test.go b/vendor/github.com/opencontainers/go-digest/algorithm_test.go new file mode 100644 index 000000000..d50e8494f --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/algorithm_test.go @@ -0,0 +1,114 @@ +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import ( + "bytes" + "crypto/rand" + _ "crypto/sha256" + _ "crypto/sha512" + "flag" + "fmt" + "strings" + "testing" +) + +func TestFlagInterface(t *testing.T) { + var ( + alg Algorithm + flagSet flag.FlagSet + ) + + flagSet.Var(&alg, "algorithm", "set the digest algorithm") + for _, testcase := range []struct { + Name string + Args []string + Err error + Expected Algorithm + }{ + { + Name: "Invalid", + Args: []string{"-algorithm", "bean"}, + Err: ErrDigestUnsupported, + }, + { + Name: "Default", + Args: []string{"unrelated"}, + Expected: "sha256", + }, + { + Name: "Other", + Args: []string{"-algorithm", "sha512"}, + Expected: "sha512", + }, + } { + t.Run(testcase.Name, func(t *testing.T) { + alg = Canonical + if err := flagSet.Parse(testcase.Args); err != testcase.Err { + if testcase.Err == nil { + t.Fatal("unexpected error", err) + } + + // check that flag package returns correct error + if !strings.Contains(err.Error(), testcase.Err.Error()) { + t.Fatalf("unexpected error: %v != %v", err, testcase.Err) + } + return + } + + if alg != testcase.Expected { + t.Fatalf("unexpected algorithm: %v != %v", alg, testcase.Expected) + } + }) + } +} + +func TestFroms(t *testing.T) { + p := make([]byte, 1<<20) + rand.Read(p) + + for alg := range algorithms { + h := alg.Hash() + h.Write(p) + expected := Digest(fmt.Sprintf("%s:%x", alg, h.Sum(nil))) + readerDgst, err := alg.FromReader(bytes.NewReader(p)) + if err != nil { + t.Fatalf("error calculating hash from reader: %v", err) + } + + dgsts := []Digest{ + alg.FromBytes(p), + alg.FromString(string(p)), + readerDgst, + } + + if alg == Canonical { + readerDgst, err := FromReader(bytes.NewReader(p)) + if err != nil { + t.Fatalf("error calculating hash from reader: %v", err) + } + + dgsts = append(dgsts, + FromBytes(p), + FromString(string(p)), + readerDgst) + } + for _, dgst := range dgsts { + if dgst != expected { + t.Fatalf("unexpected digest %v != %v", dgst, expected) + } + } + } +} diff --git a/vendor/github.com/opencontainers/go-digest/digest.go b/vendor/github.com/opencontainers/go-digest/digest.go new file mode 100644 index 000000000..ad398cba2 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/digest.go @@ -0,0 +1,156 @@ +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import ( + "fmt" + "hash" + "io" + "regexp" + "strings" +) + +// Digest allows simple protection of hex formatted digest strings, prefixed +// by their algorithm. Strings of type Digest have some guarantee of being in +// the correct format and it provides quick access to the components of a +// digest string. +// +// The following is an example of the contents of Digest types: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// This allows to abstract the digest behind this type and work only in those +// terms. +type Digest string + +// NewDigest returns a Digest from alg and a hash.Hash object. +func NewDigest(alg Algorithm, h hash.Hash) Digest { + return NewDigestFromBytes(alg, h.Sum(nil)) +} + +// NewDigestFromBytes returns a new digest from the byte contents of p. +// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) +// functions. This is also useful for rebuilding digests from binary +// serializations. +func NewDigestFromBytes(alg Algorithm, p []byte) Digest { + return NewDigestFromEncoded(alg, alg.Encode(p)) +} + +// NewDigestFromHex is deprecated. Please use NewDigestFromEncoded. +func NewDigestFromHex(alg, hex string) Digest { + return NewDigestFromEncoded(Algorithm(alg), hex) +} + +// NewDigestFromEncoded returns a Digest from alg and the encoded digest. +func NewDigestFromEncoded(alg Algorithm, encoded string) Digest { + return Digest(fmt.Sprintf("%s:%s", alg, encoded)) +} + +// DigestRegexp matches valid digest types. +var DigestRegexp = regexp.MustCompile(`[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+`) + +// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. +var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) + +var ( + // ErrDigestInvalidFormat returned when digest format invalid. + ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") + + // ErrDigestInvalidLength returned when digest has invalid length. + ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") + + // ErrDigestUnsupported returned when the digest algorithm is unsupported. + ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") +) + +// Parse parses s and returns the validated digest object. An error will +// be returned if the format is invalid. +func Parse(s string) (Digest, error) { + d := Digest(s) + return d, d.Validate() +} + +// FromReader consumes the content of rd until io.EOF, returning canonical digest. +func FromReader(rd io.Reader) (Digest, error) { + return Canonical.FromReader(rd) +} + +// FromBytes digests the input and returns a Digest. +func FromBytes(p []byte) Digest { + return Canonical.FromBytes(p) +} + +// FromString digests the input and returns a Digest. +func FromString(s string) Digest { + return Canonical.FromString(s) +} + +// Validate checks that the contents of d is a valid digest, returning an +// error if not. +func (d Digest) Validate() error { + s := string(d) + i := strings.Index(s, ":") + if i <= 0 || i+1 == len(s) { + return ErrDigestInvalidFormat + } + algorithm, encoded := Algorithm(s[:i]), s[i+1:] + if !algorithm.Available() { + if !DigestRegexpAnchored.MatchString(s) { + return ErrDigestInvalidFormat + } + return ErrDigestUnsupported + } + return algorithm.Validate(encoded) +} + +// Algorithm returns the algorithm portion of the digest. This will panic if +// the underlying digest is not in a valid format. +func (d Digest) Algorithm() Algorithm { + return Algorithm(d[:d.sepIndex()]) +} + +// Verifier returns a writer object that can be used to verify a stream of +// content against the digest. If the digest is invalid, the method will panic. +func (d Digest) Verifier() Verifier { + return hashVerifier{ + hash: d.Algorithm().Hash(), + digest: d, + } +} + +// Encoded returns the encoded portion of the digest. This will panic if the +// underlying digest is not in a valid format. +func (d Digest) Encoded() string { + return string(d[d.sepIndex()+1:]) +} + +// Hex is deprecated. Please use Digest.Encoded. +func (d Digest) Hex() string { + return d.Encoded() +} + +func (d Digest) String() string { + return string(d) +} + +func (d Digest) sepIndex() int { + i := strings.Index(string(d), ":") + + if i < 0 { + panic(fmt.Sprintf("no ':' separator in digest %q", d)) + } + + return i +} diff --git a/vendor/github.com/opencontainers/go-digest/digest_test.go b/vendor/github.com/opencontainers/go-digest/digest_test.go new file mode 100644 index 000000000..cc3b648a8 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/digest_test.go @@ -0,0 +1,134 @@ +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import ( + "testing" +) + +func TestParseDigest(t *testing.T) { + for _, testcase := range []struct { + input string + err error + algorithm Algorithm + encoded string + }{ + { + input: "sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + algorithm: "sha256", + encoded: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + }, + { + input: "sha384:d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", + algorithm: "sha384", + encoded: "d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", + }, + { + // empty hex + input: "sha256:", + err: ErrDigestInvalidFormat, + }, + { + // empty hex + input: ":", + err: ErrDigestInvalidFormat, + }, + { + // just hex + input: "d41d8cd98f00b204e9800998ecf8427e", + err: ErrDigestInvalidFormat, + }, + { + // not hex + input: "sha256:d41d8cd98f00b204e9800m98ecf8427e", + err: ErrDigestInvalidLength, + }, + { + // too short + input: "sha256:abcdef0123456789", + err: ErrDigestInvalidLength, + }, + { + // too short (from different algorithm) + input: "sha512:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + err: ErrDigestInvalidLength, + }, + { + input: "foo:d41d8cd98f00b204e9800998ecf8427e", + err: ErrDigestUnsupported, + }, + { + // repeated separators + input: "sha384__foo+bar:d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", + err: ErrDigestInvalidFormat, + }, + { + // ensure that we parse, but we don't have support for the algorithm + input: "sha384.foo+bar:d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", + algorithm: "sha384.foo+bar", + encoded: "d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", + err: ErrDigestUnsupported, + }, + { + input: "sha384_foo+bar:d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", + algorithm: "sha384_foo+bar", + encoded: "d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", + err: ErrDigestUnsupported, + }, + { + input: "sha256+b64:LCa0a2j_xo_5m0U8HTBBNBNCLXBkg7-g-YpeiGJm564", + algorithm: "sha256+b64", + encoded: "LCa0a2j_xo_5m0U8HTBBNBNCLXBkg7-g-YpeiGJm564", + err: ErrDigestUnsupported, + }, + { + input: "sha256:E58FCF7418D4390DEC8E8FB69D88C06EC07039D651FEDD3AA72AF9972E7D046B", + err: ErrDigestInvalidFormat, + }, + } { + digest, err := Parse(testcase.input) + if err != testcase.err { + t.Fatalf("error differed from expected while parsing %q: %v != %v", testcase.input, err, testcase.err) + } + + if testcase.err != nil { + continue + } + + if digest.Algorithm() != testcase.algorithm { + t.Fatalf("incorrect algorithm for parsed digest: %q != %q", digest.Algorithm(), testcase.algorithm) + } + + if digest.Encoded() != testcase.encoded { + t.Fatalf("incorrect hex for parsed digest: %q != %q", digest.Encoded(), testcase.encoded) + } + + // Parse string return value and check equality + newParsed, err := Parse(digest.String()) + + if err != nil { + t.Fatalf("unexpected error parsing input %q: %v", testcase.input, err) + } + + if newParsed != digest { + t.Fatalf("expected equal: %q != %q", newParsed, digest) + } + + newFromHex := NewDigestFromEncoded(newParsed.Algorithm(), newParsed.Encoded()) + if newFromHex != digest { + t.Fatalf("%v != %v", newFromHex, digest) + } + } +} diff --git a/vendor/github.com/opencontainers/go-digest/digester.go b/vendor/github.com/opencontainers/go-digest/digester.go new file mode 100644 index 000000000..36fa2728e --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/digester.go @@ -0,0 +1,39 @@ +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import "hash" + +// Digester calculates the digest of written data. Writes should go directly +// to the return value of Hash, while calling Digest will return the current +// value of the digest. +type Digester interface { + Hash() hash.Hash // provides direct access to underlying hash instance. + Digest() Digest +} + +// digester provides a simple digester definition that embeds a hasher. +type digester struct { + alg Algorithm + hash hash.Hash +} + +func (d *digester) Hash() hash.Hash { + return d.hash +} + +func (d *digester) Digest() Digest { + return NewDigest(d.alg, d.hash) +} diff --git a/vendor/github.com/opencontainers/go-digest/doc.go b/vendor/github.com/opencontainers/go-digest/doc.go new file mode 100644 index 000000000..491ea1ef1 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/doc.go @@ -0,0 +1,56 @@ +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package digest provides a generalized type to opaquely represent message +// digests and their operations within the registry. The Digest type is +// designed to serve as a flexible identifier in a content-addressable system. +// More importantly, it provides tools and wrappers to work with +// hash.Hash-based digests with little effort. +// +// Basics +// +// The format of a digest is simply a string with two parts, dubbed the +// "algorithm" and the "digest", separated by a colon: +// +// : +// +// An example of a sha256 digest representation follows: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// In this case, the string "sha256" is the algorithm and the hex bytes are +// the "digest". +// +// Because the Digest type is simply a string, once a valid Digest is +// obtained, comparisons are cheap, quick and simple to express with the +// standard equality operator. +// +// Verification +// +// The main benefit of using the Digest type is simple verification against a +// given digest. The Verifier interface, modeled after the stdlib hash.Hash +// interface, provides a common write sink for digest verification. After +// writing is complete, calling the Verifier.Verified method will indicate +// whether or not the stream of bytes matches the target digest. +// +// Missing Features +// +// In addition to the above, we intend to add the following features to this +// package: +// +// 1. A Digester type that supports write sink digest calculation. +// +// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. +// +package digest diff --git a/vendor/github.com/opencontainers/go-digest/verifiers.go b/vendor/github.com/opencontainers/go-digest/verifiers.go new file mode 100644 index 000000000..32125e918 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/verifiers.go @@ -0,0 +1,45 @@ +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import ( + "hash" + "io" +) + +// Verifier presents a general verification interface to be used with message +// digests and other byte stream verifications. Users instantiate a Verifier +// from one of the various methods, write the data under test to it then check +// the result with the Verified method. +type Verifier interface { + io.Writer + + // Verified will return true if the content written to Verifier matches + // the digest. + Verified() bool +} + +type hashVerifier struct { + digest Digest + hash hash.Hash +} + +func (hv hashVerifier) Write(p []byte) (n int, err error) { + return hv.hash.Write(p) +} + +func (hv hashVerifier) Verified() bool { + return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) +} diff --git a/vendor/github.com/opencontainers/go-digest/verifiers_test.go b/vendor/github.com/opencontainers/go-digest/verifiers_test.go new file mode 100644 index 000000000..d67bb1bc6 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/verifiers_test.go @@ -0,0 +1,80 @@ +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import ( + "bytes" + "crypto/rand" + "io" + "reflect" + "testing" +) + +func TestDigestVerifier(t *testing.T) { + p := make([]byte, 1<<20) + rand.Read(p) + digest := FromBytes(p) + + verifier := digest.Verifier() + + io.Copy(verifier, bytes.NewReader(p)) + + if !verifier.Verified() { + t.Fatalf("bytes not verified") + } +} + +// TestVerifierUnsupportedDigest ensures that unsupported digest validation is +// flowing through verifier creation. +func TestVerifierUnsupportedDigest(t *testing.T) { + for _, testcase := range []struct { + Name string + Digest Digest + Expected interface{} // expected panic target + }{ + { + Name: "Empty", + Digest: "", + Expected: "no ':' separator in digest \"\"", + }, + { + Name: "EmptyAlg", + Digest: ":", + Expected: "empty digest algorithm, validate before calling Algorithm.Hash()", + }, + { + Name: "Unsupported", + Digest: Digest("bean:0123456789abcdef"), + Expected: "bean not available (make sure it is imported)", + }, + { + Name: "Garbage", + Digest: Digest("sha256-garbage:pure"), + Expected: "sha256-garbage not available (make sure it is imported)", + }, + } { + t.Run(testcase.Name, func(t *testing.T) { + expected := testcase.Expected + defer func() { + recovered := recover() + if !reflect.DeepEqual(recovered, expected) { + t.Fatalf("unexpected recover: %v != %v", recovered, expected) + } + }() + + _ = testcase.Digest.Verifier() + }) + } +}