mirror of
https://github.com/fnproject/fn.git
synced 2022-10-28 21:29:17 +03:00
add funcy/functions_go & update docker/distribution
This commit is contained in:
committed by
Travis Reeder
parent
452dc1ee86
commit
c9c7950d0f
12
glide.lock
generated
12
glide.lock
generated
@@ -1,5 +1,5 @@
|
|||||||
hash: a8dbae01be6173ddeceed9debaf995f8007df052cabb90838a1652e0da0baa70
|
hash: c3a20e76f70802a5cb8ead0c4ac29c6bca272a3d738d9b95538d9fc238ad450c
|
||||||
updated: 2017-06-11T01:58:27.005608286-07:00
|
updated: 2017-06-30T11:01:18.063490786-07:00
|
||||||
imports:
|
imports:
|
||||||
- name: code.cloudfoundry.org/bytefmt
|
- name: code.cloudfoundry.org/bytefmt
|
||||||
version: f4415fafc5619dd75599a54a7c91fb3948ad58bd
|
version: f4415fafc5619dd75599a54a7c91fb3948ad58bd
|
||||||
@@ -40,7 +40,7 @@ imports:
|
|||||||
subpackages:
|
subpackages:
|
||||||
- cli/config/configfile
|
- cli/config/configfile
|
||||||
- name: github.com/docker/distribution
|
- name: github.com/docker/distribution
|
||||||
version: 99cb7c0946d2f5a38015443e515dc916295064d7
|
version: a25b9ef0c9fe242ac04bb20d3a028442b7d266b6
|
||||||
subpackages:
|
subpackages:
|
||||||
- context
|
- context
|
||||||
- digest
|
- digest
|
||||||
@@ -89,6 +89,10 @@ imports:
|
|||||||
version: 4da3e2cfbabc9f751898f250b49f2439785783a1
|
version: 4da3e2cfbabc9f751898f250b49f2439785783a1
|
||||||
- name: github.com/fsouza/go-dockerclient
|
- name: github.com/fsouza/go-dockerclient
|
||||||
version: c933ed18bef34ec2955de03de8ef9a3bb996e3df
|
version: c933ed18bef34ec2955de03de8ef9a3bb996e3df
|
||||||
|
- name: github.com/funcy/functions_go
|
||||||
|
version: 5d9948e8b1292c5421b5dd98bb6a9b5535d5e1ba
|
||||||
|
subpackages:
|
||||||
|
- models
|
||||||
- name: github.com/garyburd/redigo
|
- name: github.com/garyburd/redigo
|
||||||
version: 95d11dba2d44531bdb8022752b98912baafae03a
|
version: 95d11dba2d44531bdb8022752b98912baafae03a
|
||||||
subpackages:
|
subpackages:
|
||||||
@@ -201,8 +205,6 @@ imports:
|
|||||||
version: b938d81255b5473c57635324295cb0fe398c7a58
|
version: b938d81255b5473c57635324295cb0fe398c7a58
|
||||||
- name: github.com/PuerkitoBio/urlesc
|
- name: github.com/PuerkitoBio/urlesc
|
||||||
version: bbf7a2afc14f93e1e0a5c06df524fbd75e5031e5
|
version: bbf7a2afc14f93e1e0a5c06df524fbd75e5031e5
|
||||||
- name: github.com/satori/go.uuid
|
|
||||||
version: 5bf94b69c6b68ee1b541973bb8e1144db23a194b
|
|
||||||
- name: github.com/Sirupsen/logrus
|
- name: github.com/Sirupsen/logrus
|
||||||
version: ba1b36c82c5e05c4f912a88eab0dcd91a171688f
|
version: ba1b36c82c5e05c4f912a88eab0dcd91a171688f
|
||||||
repo: https://github.com/sirupsen/logrus
|
repo: https://github.com/sirupsen/logrus
|
||||||
|
|||||||
@@ -3,10 +3,13 @@ excludeDirs:
|
|||||||
- fn
|
- fn
|
||||||
import:
|
import:
|
||||||
- package: code.cloudfoundry.org/bytefmt
|
- package: code.cloudfoundry.org/bytefmt
|
||||||
|
- package: github.com/funcy/functions_go
|
||||||
|
subpackages:
|
||||||
|
- models
|
||||||
- package: github.com/Sirupsen/logrus
|
- package: github.com/Sirupsen/logrus
|
||||||
repo: https://github.com/sirupsen/logrus
|
repo: https://github.com/sirupsen/logrus
|
||||||
vcs: git
|
vcs: git
|
||||||
version: v0.11.5
|
version: ^v0.11.5
|
||||||
subpackages:
|
subpackages:
|
||||||
- hooks/syslog
|
- hooks/syslog
|
||||||
- package: github.com/amir/raidman
|
- package: github.com/amir/raidman
|
||||||
@@ -25,6 +28,7 @@ import:
|
|||||||
subpackages:
|
subpackages:
|
||||||
- cli/config/configfile
|
- cli/config/configfile
|
||||||
- package: github.com/docker/distribution
|
- package: github.com/docker/distribution
|
||||||
|
version: ^2.6.1
|
||||||
subpackages:
|
subpackages:
|
||||||
- manifest/schema1
|
- manifest/schema1
|
||||||
- package: github.com/fsouza/go-dockerclient
|
- package: github.com/fsouza/go-dockerclient
|
||||||
|
|||||||
35
vendor/github.com/docker/distribution/AUTHORS
generated
vendored
35
vendor/github.com/docker/distribution/AUTHORS
generated
vendored
@@ -1,6 +1,8 @@
|
|||||||
|
a-palchikov <deemok@gmail.com>
|
||||||
Aaron Lehmann <aaron.lehmann@docker.com>
|
Aaron Lehmann <aaron.lehmann@docker.com>
|
||||||
Aaron Schlesinger <aschlesinger@deis.com>
|
Aaron Schlesinger <aschlesinger@deis.com>
|
||||||
Aaron Vinson <avinson.public@gmail.com>
|
Aaron Vinson <avinson.public@gmail.com>
|
||||||
|
Adam Duke <adam.v.duke@gmail.com>
|
||||||
Adam Enger <adamenger@gmail.com>
|
Adam Enger <adamenger@gmail.com>
|
||||||
Adrian Mouat <adrian.mouat@gmail.com>
|
Adrian Mouat <adrian.mouat@gmail.com>
|
||||||
Ahmet Alp Balkan <ahmetalpbalkan@gmail.com>
|
Ahmet Alp Balkan <ahmetalpbalkan@gmail.com>
|
||||||
@@ -19,6 +21,7 @@ Anis Elleuch <vadmeste@gmail.com>
|
|||||||
Anton Tiurin <noxiouz@yandex.ru>
|
Anton Tiurin <noxiouz@yandex.ru>
|
||||||
Antonio Mercado <amercado@thinknode.com>
|
Antonio Mercado <amercado@thinknode.com>
|
||||||
Antonio Murdaca <runcom@redhat.com>
|
Antonio Murdaca <runcom@redhat.com>
|
||||||
|
Anusha Ragunathan <anusha@docker.com>
|
||||||
Arien Holthuizen <aholthuizen@schubergphilis.com>
|
Arien Holthuizen <aholthuizen@schubergphilis.com>
|
||||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||||
Arthur Baars <arthur@semmle.com>
|
Arthur Baars <arthur@semmle.com>
|
||||||
@@ -26,12 +29,16 @@ Asuka Suzuki <hello@tanksuzuki.com>
|
|||||||
Avi Miller <avi.miller@oracle.com>
|
Avi Miller <avi.miller@oracle.com>
|
||||||
Ayose Cazorla <ayosec@gmail.com>
|
Ayose Cazorla <ayosec@gmail.com>
|
||||||
BadZen <dave.trombley@gmail.com>
|
BadZen <dave.trombley@gmail.com>
|
||||||
|
Ben Bodenmiller <bbodenmiller@hotmail.com>
|
||||||
Ben Firshman <ben@firshman.co.uk>
|
Ben Firshman <ben@firshman.co.uk>
|
||||||
bin liu <liubin0329@gmail.com>
|
bin liu <liubin0329@gmail.com>
|
||||||
Brian Bland <brian.bland@docker.com>
|
Brian Bland <brian.bland@docker.com>
|
||||||
burnettk <burnettk@gmail.com>
|
burnettk <burnettk@gmail.com>
|
||||||
Carson A <ca@carsonoid.net>
|
Carson A <ca@carsonoid.net>
|
||||||
|
Cezar Sa Espinola <cezarsa@gmail.com>
|
||||||
|
Charles Smith <charles.smith@docker.com>
|
||||||
Chris Dillon <squarism@gmail.com>
|
Chris Dillon <squarism@gmail.com>
|
||||||
|
cuiwei13 <cuiwei13@pku.edu.cn>
|
||||||
cyli <cyli@twistedmatrix.com>
|
cyli <cyli@twistedmatrix.com>
|
||||||
Daisuke Fujita <dtanshi45@gmail.com>
|
Daisuke Fujita <dtanshi45@gmail.com>
|
||||||
Daniel Huhn <daniel@danielhuhn.de>
|
Daniel Huhn <daniel@danielhuhn.de>
|
||||||
@@ -48,11 +55,14 @@ Diogo Mónica <diogo.monica@gmail.com>
|
|||||||
DJ Enriquez <dj.enriquez@infospace.com>
|
DJ Enriquez <dj.enriquez@infospace.com>
|
||||||
Donald Huang <don.hcd@gmail.com>
|
Donald Huang <don.hcd@gmail.com>
|
||||||
Doug Davis <dug@us.ibm.com>
|
Doug Davis <dug@us.ibm.com>
|
||||||
|
Edgar Lee <edgar.lee@docker.com>
|
||||||
Eric Yang <windfarer@gmail.com>
|
Eric Yang <windfarer@gmail.com>
|
||||||
|
Fabio Berchtold <jamesclonk@jamesclonk.ch>
|
||||||
Fabio Huser <fabio@fh1.ch>
|
Fabio Huser <fabio@fh1.ch>
|
||||||
farmerworking <farmerworking@gmail.com>
|
farmerworking <farmerworking@gmail.com>
|
||||||
Felix Yan <felixonmars@archlinux.org>
|
Felix Yan <felixonmars@archlinux.org>
|
||||||
Florentin Raud <florentin.raud@gmail.com>
|
Florentin Raud <florentin.raud@gmail.com>
|
||||||
|
Frank Chen <frankchn@gmail.com>
|
||||||
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
|
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
|
||||||
gabriell nascimento <gabriell@bluesoft.com.br>
|
gabriell nascimento <gabriell@bluesoft.com.br>
|
||||||
Gleb Schukin <gschukin@ptsecurity.com>
|
Gleb Schukin <gschukin@ptsecurity.com>
|
||||||
@@ -64,16 +74,23 @@ HuKeping <hukeping@huawei.com>
|
|||||||
Ian Babrou <ibobrik@gmail.com>
|
Ian Babrou <ibobrik@gmail.com>
|
||||||
igayoso <igayoso@gmail.com>
|
igayoso <igayoso@gmail.com>
|
||||||
Jack Griffin <jackpg14@gmail.com>
|
Jack Griffin <jackpg14@gmail.com>
|
||||||
|
James Findley <jfindley@fastmail.com>
|
||||||
Jason Freidman <jason.freidman@gmail.com>
|
Jason Freidman <jason.freidman@gmail.com>
|
||||||
|
Jason Heiss <jheiss@aput.net>
|
||||||
Jeff Nickoloff <jeff@allingeek.com>
|
Jeff Nickoloff <jeff@allingeek.com>
|
||||||
|
Jess Frazelle <acidburn@google.com>
|
||||||
Jessie Frazelle <jessie@docker.com>
|
Jessie Frazelle <jessie@docker.com>
|
||||||
jhaohai <jhaohai@foxmail.com>
|
jhaohai <jhaohai@foxmail.com>
|
||||||
Jianqing Wang <tsing@jianqing.org>
|
Jianqing Wang <tsing@jianqing.org>
|
||||||
|
Jihoon Chung <jihoon@gmail.com>
|
||||||
|
Joao Fernandes <joao.fernandes@docker.com>
|
||||||
|
John Mulhausen <john@docker.com>
|
||||||
John Starks <jostarks@microsoft.com>
|
John Starks <jostarks@microsoft.com>
|
||||||
Jon Johnson <jonjohnson@google.com>
|
Jon Johnson <jonjohnson@google.com>
|
||||||
Jon Poler <jonathan.poler@apcera.com>
|
Jon Poler <jonathan.poler@apcera.com>
|
||||||
Jonathan Boulle <jonathanboulle@gmail.com>
|
Jonathan Boulle <jonathanboulle@gmail.com>
|
||||||
Jordan Liggitt <jliggitt@redhat.com>
|
Jordan Liggitt <jliggitt@redhat.com>
|
||||||
|
Josh Chorlton <josh.chorlton@docker.com>
|
||||||
Josh Hawn <josh.hawn@docker.com>
|
Josh Hawn <josh.hawn@docker.com>
|
||||||
Julien Fernandez <julien.fernandez@gmail.com>
|
Julien Fernandez <julien.fernandez@gmail.com>
|
||||||
Ke Xu <leonhartx.k@gmail.com>
|
Ke Xu <leonhartx.k@gmail.com>
|
||||||
@@ -84,22 +101,30 @@ Kenny Leung <kleung@google.com>
|
|||||||
Li Yi <denverdino@gmail.com>
|
Li Yi <denverdino@gmail.com>
|
||||||
Liu Hua <sdu.liu@huawei.com>
|
Liu Hua <sdu.liu@huawei.com>
|
||||||
liuchang0812 <liuchang0812@gmail.com>
|
liuchang0812 <liuchang0812@gmail.com>
|
||||||
|
Lloyd Ramey <lnr0626@gmail.com>
|
||||||
Louis Kottmann <louis.kottmann@gmail.com>
|
Louis Kottmann <louis.kottmann@gmail.com>
|
||||||
Luke Carpenter <x@rubynerd.net>
|
Luke Carpenter <x@rubynerd.net>
|
||||||
|
Marcus Martins <marcus@docker.com>
|
||||||
Mary Anthony <mary@docker.com>
|
Mary Anthony <mary@docker.com>
|
||||||
Matt Bentley <mbentley@mbentley.net>
|
Matt Bentley <mbentley@mbentley.net>
|
||||||
Matt Duch <matt@learnmetrics.com>
|
Matt Duch <matt@learnmetrics.com>
|
||||||
Matt Moore <mattmoor@google.com>
|
Matt Moore <mattmoor@google.com>
|
||||||
Matt Robenolt <matt@ydekproductions.com>
|
Matt Robenolt <matt@ydekproductions.com>
|
||||||
|
Matthew Green <greenmr@live.co.uk>
|
||||||
Michael Prokop <mika@grml.org>
|
Michael Prokop <mika@grml.org>
|
||||||
Michal Minar <miminar@redhat.com>
|
Michal Minar <miminar@redhat.com>
|
||||||
|
Michal Minář <miminar@redhat.com>
|
||||||
|
Mike Brown <brownwm@us.ibm.com>
|
||||||
Miquel Sabaté <msabate@suse.com>
|
Miquel Sabaté <msabate@suse.com>
|
||||||
|
Misty Stanley-Jones <misty@apache.org>
|
||||||
|
Misty Stanley-Jones <misty@docker.com>
|
||||||
Morgan Bauer <mbauer@us.ibm.com>
|
Morgan Bauer <mbauer@us.ibm.com>
|
||||||
moxiegirl <mary@docker.com>
|
moxiegirl <mary@docker.com>
|
||||||
Nathan Sullivan <nathan@nightsys.net>
|
Nathan Sullivan <nathan@nightsys.net>
|
||||||
nevermosby <robolwq@qq.com>
|
nevermosby <robolwq@qq.com>
|
||||||
Nghia Tran <tcnghia@gmail.com>
|
Nghia Tran <tcnghia@gmail.com>
|
||||||
Nikita Tarasov <nikita@mygento.ru>
|
Nikita Tarasov <nikita@mygento.ru>
|
||||||
|
Noah Treuhaft <noah.treuhaft@docker.com>
|
||||||
Nuutti Kotivuori <nuutti.kotivuori@poplatek.fi>
|
Nuutti Kotivuori <nuutti.kotivuori@poplatek.fi>
|
||||||
Oilbeater <liumengxinfly@gmail.com>
|
Oilbeater <liumengxinfly@gmail.com>
|
||||||
Olivier Gambier <olivier@docker.com>
|
Olivier Gambier <olivier@docker.com>
|
||||||
@@ -108,17 +133,23 @@ Omer Cohen <git@omer.io>
|
|||||||
Patrick Devine <patrick.devine@docker.com>
|
Patrick Devine <patrick.devine@docker.com>
|
||||||
Phil Estes <estesp@linux.vnet.ibm.com>
|
Phil Estes <estesp@linux.vnet.ibm.com>
|
||||||
Philip Misiowiec <philip@atlashealth.com>
|
Philip Misiowiec <philip@atlashealth.com>
|
||||||
|
Pierre-Yves Ritschard <pyr@spootnik.org>
|
||||||
|
Qiao Anran <qiaoanran@gmail.com>
|
||||||
|
Randy Barlow <randy@electronsweatshop.com>
|
||||||
Richard Scothern <richard.scothern@docker.com>
|
Richard Scothern <richard.scothern@docker.com>
|
||||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||||
Rusty Conover <rusty@luckydinosaur.com>
|
Rusty Conover <rusty@luckydinosaur.com>
|
||||||
Sean Boran <Boran@users.noreply.github.com>
|
Sean Boran <Boran@users.noreply.github.com>
|
||||||
Sebastiaan van Stijn <github@gone.nl>
|
Sebastiaan van Stijn <github@gone.nl>
|
||||||
|
Sebastien Coavoux <s.coavoux@free.fr>
|
||||||
Serge Dubrouski <sergeyfd@gmail.com>
|
Serge Dubrouski <sergeyfd@gmail.com>
|
||||||
Sharif Nassar <sharif@mrwacky.com>
|
Sharif Nassar <sharif@mrwacky.com>
|
||||||
Shawn Falkner-Horine <dreadpirateshawn@gmail.com>
|
Shawn Falkner-Horine <dreadpirateshawn@gmail.com>
|
||||||
Shreyas Karnik <karnik.shreyas@gmail.com>
|
Shreyas Karnik <karnik.shreyas@gmail.com>
|
||||||
Simon Thulbourn <simon+github@thulbourn.com>
|
Simon Thulbourn <simon+github@thulbourn.com>
|
||||||
|
spacexnice <yaoyao.xyy@alibaba-inc.com>
|
||||||
Spencer Rinehart <anubis@overthemonkey.com>
|
Spencer Rinehart <anubis@overthemonkey.com>
|
||||||
|
Stan Hu <stanhu@gmail.com>
|
||||||
Stefan Majewsky <stefan.majewsky@sap.com>
|
Stefan Majewsky <stefan.majewsky@sap.com>
|
||||||
Stefan Weil <sw@weilnetz.de>
|
Stefan Weil <sw@weilnetz.de>
|
||||||
Stephen J Day <stephen.day@docker.com>
|
Stephen J Day <stephen.day@docker.com>
|
||||||
@@ -134,6 +165,8 @@ Tonis Tiigi <tonistiigi@gmail.com>
|
|||||||
Tony Holdstock-Brown <tony@docker.com>
|
Tony Holdstock-Brown <tony@docker.com>
|
||||||
Trevor Pounds <trevor.pounds@gmail.com>
|
Trevor Pounds <trevor.pounds@gmail.com>
|
||||||
Troels Thomsen <troels@thomsen.io>
|
Troels Thomsen <troels@thomsen.io>
|
||||||
|
Victor Vieux <vieux@docker.com>
|
||||||
|
Victoria Bialas <victoria.bialas@docker.com>
|
||||||
Vincent Batts <vbatts@redhat.com>
|
Vincent Batts <vbatts@redhat.com>
|
||||||
Vincent Demeester <vincent@sbr.pm>
|
Vincent Demeester <vincent@sbr.pm>
|
||||||
Vincent Giersch <vincent.giersch@ovh.net>
|
Vincent Giersch <vincent.giersch@ovh.net>
|
||||||
@@ -142,6 +175,8 @@ weiyuan.yl <weiyuan.yl@alibaba-inc.com>
|
|||||||
xg.song <xg.song@venusource.com>
|
xg.song <xg.song@venusource.com>
|
||||||
xiekeyang <xiekeyang@huawei.com>
|
xiekeyang <xiekeyang@huawei.com>
|
||||||
Yann ROBERT <yann.robert@anantaplex.fr>
|
Yann ROBERT <yann.robert@anantaplex.fr>
|
||||||
|
yaoyao.xyy <yaoyao.xyy@alibaba-inc.com>
|
||||||
|
yuexiao-wang <wang.yuexiao@zte.com.cn>
|
||||||
yuzou <zouyu7@huawei.com>
|
yuzou <zouyu7@huawei.com>
|
||||||
zhouhaibing089 <zhouhaibing089@gmail.com>
|
zhouhaibing089 <zhouhaibing089@gmail.com>
|
||||||
姜继忠 <jizhong.jiangjz@alibaba-inc.com>
|
姜继忠 <jizhong.jiangjz@alibaba-inc.com>
|
||||||
|
|||||||
2
vendor/github.com/docker/distribution/BUILDING.md
generated
vendored
2
vendor/github.com/docker/distribution/BUILDING.md
generated
vendored
@@ -11,7 +11,7 @@ Most people should use the [official Registry docker image](https://hub.docker.c
|
|||||||
|
|
||||||
People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`.
|
People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`.
|
||||||
|
|
||||||
OS X users who want to run natively can do so following [the instructions here](osx-setup-guide.md).
|
OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md).
|
||||||
|
|
||||||
### Gotchas
|
### Gotchas
|
||||||
|
|
||||||
|
|||||||
89
vendor/github.com/docker/distribution/CHANGELOG.md
generated
vendored
89
vendor/github.com/docker/distribution/CHANGELOG.md
generated
vendored
@@ -1,9 +1,88 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## 2.6.1 (2017-04-05)
|
||||||
|
|
||||||
|
#### Registry
|
||||||
|
- Fix `Forwarded` header handling, revert use of `X-Forwarded-Port`
|
||||||
|
- Use driver `Stat` for registry health check
|
||||||
|
|
||||||
|
## 2.6.0 (2017-01-18)
|
||||||
|
|
||||||
|
#### Storage
|
||||||
|
- S3: fixed bug in delete due to read-after-write inconsistency
|
||||||
|
- S3: allow EC2 IAM roles to be used when authorizing region endpoints
|
||||||
|
- S3: add Object ACL Support
|
||||||
|
- S3: fix delete method's notion of subpaths
|
||||||
|
- S3: use multipart upload API in `Move` method for performance
|
||||||
|
- S3: add v2 signature signing for legacy S3 clones
|
||||||
|
- Swift: add simple heuristic to detect incomplete DLOs during read ops
|
||||||
|
- Swift: support different user and tenant domains
|
||||||
|
- Swift: bulk deletes in chunks
|
||||||
|
- Aliyun OSS: fix delete method's notion of subpaths
|
||||||
|
- Aliyun OSS: optimize data copy after upload finishes
|
||||||
|
- Azure: close leaking response body
|
||||||
|
- Fix storage drivers dropping non-EOF errors when listing repositories
|
||||||
|
- Compare path properly when listing repositories in catalog
|
||||||
|
- Add a foreign layer URL host whitelist
|
||||||
|
- Improve catalog enumerate runtime
|
||||||
|
|
||||||
|
#### Registry
|
||||||
|
- Export `storage.CreateOptions` in top-level package
|
||||||
|
- Enable notifications to endpoints that use self-signed certificates
|
||||||
|
- Properly validate multi-URL foreign layers
|
||||||
|
- Add control over validation of URLs in pushed manifests
|
||||||
|
- Proxy mode: fix socket leak when pull is cancelled
|
||||||
|
- Tag service: properly handle error responses on HEAD request
|
||||||
|
- Support for custom authentication URL in proxying registry
|
||||||
|
- Add configuration option to disable access logging
|
||||||
|
- Add notification filtering by target media type
|
||||||
|
- Manifest: `References()` returns all children
|
||||||
|
- Honor `X-Forwarded-Port` and Forwarded headers
|
||||||
|
- Reference: Preserve tag and digest in With* functions
|
||||||
|
- Add policy configuration for enforcing repository classes
|
||||||
|
|
||||||
|
#### Client
|
||||||
|
- Changes the client Tags `All()` method to follow links
|
||||||
|
- Allow registry clients to connect via HTTP2
|
||||||
|
- Better handling of OAuth errors in client
|
||||||
|
|
||||||
|
#### Spec
|
||||||
|
- Manifest: clarify relationship between urls and foreign layers
|
||||||
|
- Authorization: add support for repository classes
|
||||||
|
|
||||||
|
#### Manifest
|
||||||
|
- Override media type returned from `Stat()` for existing manifests
|
||||||
|
- Add plugin mediatype to distribution manifest
|
||||||
|
|
||||||
|
#### Docs
|
||||||
|
- Document `TOOMANYREQUESTS` error code
|
||||||
|
- Document required Let's Encrypt port
|
||||||
|
- Improve documentation around implementation of OAuth2
|
||||||
|
- Improve documentation for configuration
|
||||||
|
|
||||||
|
#### Auth
|
||||||
|
- Add support for registry type in scope
|
||||||
|
- Add support for using v2 ping challenges for v1
|
||||||
|
- Add leeway to JWT `nbf` and `exp` checking
|
||||||
|
- htpasswd: dynamically parse htpasswd file
|
||||||
|
- Fix missing auth headers with PATCH HTTP request when pushing to default port
|
||||||
|
|
||||||
|
#### Dockerfile
|
||||||
|
- Update to go1.7
|
||||||
|
- Reorder Dockerfile steps for better layer caching
|
||||||
|
|
||||||
|
#### Notes
|
||||||
|
|
||||||
|
Documentation has moved to the documentation repository at
|
||||||
|
`github.com/docker/docker.github.io/tree/master/registry`
|
||||||
|
|
||||||
|
The registry is go 1.7 compliant, and passes newer, more restrictive `lint` and `vet` ing.
|
||||||
|
|
||||||
|
|
||||||
## 2.5.0 (2016-06-14)
|
## 2.5.0 (2016-06-14)
|
||||||
|
|
||||||
### Storage
|
#### Storage
|
||||||
- Ensure uploads directory is cleaned after upload is commited
|
- Ensure uploads directory is cleaned after upload is committed
|
||||||
- Add ability to cap concurrent operations in filesystem driver
|
- Add ability to cap concurrent operations in filesystem driver
|
||||||
- S3: Add 'us-gov-west-1' to the valid region list
|
- S3: Add 'us-gov-west-1' to the valid region list
|
||||||
- Swift: Handle ceph not returning Last-Modified header for HEAD requests
|
- Swift: Handle ceph not returning Last-Modified header for HEAD requests
|
||||||
@@ -23,13 +102,13 @@
|
|||||||
- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported
|
- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported
|
||||||
- Clarify API documentation around catalog fetch behavior
|
- Clarify API documentation around catalog fetch behavior
|
||||||
|
|
||||||
### API
|
#### API
|
||||||
- Support returning HTTP 429 (Too Many Requests)
|
- Support returning HTTP 429 (Too Many Requests)
|
||||||
|
|
||||||
### Documentation
|
#### Documentation
|
||||||
- Update auth documentation examples to show "expires in" as int
|
- Update auth documentation examples to show "expires in" as int
|
||||||
|
|
||||||
### Docker Image
|
#### Docker Image
|
||||||
- Use Alpine Linux as base image
|
- Use Alpine Linux as base image
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
2
vendor/github.com/docker/distribution/Dockerfile
generated
vendored
2
vendor/github.com/docker/distribution/Dockerfile
generated
vendored
@@ -1,4 +1,4 @@
|
|||||||
FROM golang:1.6-alpine
|
FROM golang:1.7-alpine
|
||||||
|
|
||||||
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
|
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
|
||||||
ENV DOCKER_BUILDTAGS include_oss include_gcs
|
ENV DOCKER_BUILDTAGS include_oss include_gcs
|
||||||
|
|||||||
10
vendor/github.com/docker/distribution/Godeps/Godeps.json
generated
vendored
10
vendor/github.com/docker/distribution/Godeps/Godeps.json
generated
vendored
@@ -8,8 +8,8 @@
|
|||||||
"Deps": [
|
"Deps": [
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/Azure/azure-sdk-for-go/storage",
|
"ImportPath": "github.com/Azure/azure-sdk-for-go/storage",
|
||||||
"Comment": "v1.2-334-g95361a2",
|
"Comment": "v5.0.0-beta-6-g0b5fe2a",
|
||||||
"Rev": "95361a2573b1fa92a00c5fc2707a80308483c6f9"
|
"Rev": "0b5fe2abe0271ba07049eacaa65922d67c319543"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/Sirupsen/logrus",
|
"ImportPath": "github.com/Sirupsen/logrus",
|
||||||
@@ -167,15 +167,15 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/denverdino/aliyungo/common",
|
"ImportPath": "github.com/denverdino/aliyungo/common",
|
||||||
"Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502"
|
"Rev": "afedced274aa9a7fcdd47ac97018f0f8db4e5de2"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/denverdino/aliyungo/oss",
|
"ImportPath": "github.com/denverdino/aliyungo/oss",
|
||||||
"Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502"
|
"Rev": "afedced274aa9a7fcdd47ac97018f0f8db4e5de2"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/denverdino/aliyungo/util",
|
"ImportPath": "github.com/denverdino/aliyungo/util",
|
||||||
"Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502"
|
"Rev": "afedced274aa9a7fcdd47ac97018f0f8db4e5de2"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/goamz/aws",
|
"ImportPath": "github.com/docker/goamz/aws",
|
||||||
|
|||||||
8
vendor/github.com/docker/distribution/Jenkinsfile
generated
vendored
8
vendor/github.com/docker/distribution/Jenkinsfile
generated
vendored
@@ -1,8 +0,0 @@
|
|||||||
// Only run on Linux atm
|
|
||||||
wrappedNode(label: 'docker') {
|
|
||||||
deleteDir()
|
|
||||||
stage "checkout"
|
|
||||||
checkout scm
|
|
||||||
|
|
||||||
documentationChecker("docs")
|
|
||||||
}
|
|
||||||
17
vendor/github.com/docker/distribution/Makefile
generated
vendored
17
vendor/github.com/docker/distribution/Makefile
generated
vendored
@@ -13,7 +13,7 @@ endif
|
|||||||
|
|
||||||
GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)"
|
GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)"
|
||||||
|
|
||||||
.PHONY: clean all fmt vet lint build test binaries
|
.PHONY: all build binaries clean dep-restore dep-save dep-validate fmt lint test test-full vet
|
||||||
.DEFAULT: all
|
.DEFAULT: all
|
||||||
all: fmt vet lint build test binaries
|
all: fmt vet lint build test binaries
|
||||||
|
|
||||||
@@ -27,22 +27,25 @@ version/version.go:
|
|||||||
# Required for go 1.5 to build
|
# Required for go 1.5 to build
|
||||||
GO15VENDOREXPERIMENT := 1
|
GO15VENDOREXPERIMENT := 1
|
||||||
|
|
||||||
|
# Go files
|
||||||
|
GOFILES=$(shell find . -type f -name '*.go')
|
||||||
|
|
||||||
# Package list
|
# Package list
|
||||||
PKGS := $(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/)
|
PKGS=$(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/)
|
||||||
|
|
||||||
# Resolving binary dependencies for specific targets
|
# Resolving binary dependencies for specific targets
|
||||||
GOLINT := $(shell which golint || echo '')
|
GOLINT=$(shell which golint || echo '')
|
||||||
GODEP := $(shell which godep || echo '')
|
GODEP=$(shell which godep || echo '')
|
||||||
|
|
||||||
${PREFIX}/bin/registry: $(wildcard **/*.go)
|
${PREFIX}/bin/registry: $(GOFILES)
|
||||||
@echo "+ $@"
|
@echo "+ $@"
|
||||||
@go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry
|
@go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry
|
||||||
|
|
||||||
${PREFIX}/bin/digest: $(wildcard **/*.go)
|
${PREFIX}/bin/digest: $(GOFILES)
|
||||||
@echo "+ $@"
|
@echo "+ $@"
|
||||||
@go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest
|
@go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest
|
||||||
|
|
||||||
${PREFIX}/bin/registry-api-descriptor-template: $(wildcard **/*.go)
|
${PREFIX}/bin/registry-api-descriptor-template: $(GOFILES)
|
||||||
@echo "+ $@"
|
@echo "+ $@"
|
||||||
@go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template
|
@go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template
|
||||||
|
|
||||||
|
|||||||
4
vendor/github.com/docker/distribution/README.md
generated
vendored
4
vendor/github.com/docker/distribution/README.md
generated
vendored
@@ -19,7 +19,7 @@ This repository contains the following components:
|
|||||||
| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. |
|
| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. |
|
||||||
| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. |
|
| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. |
|
||||||
| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) |
|
| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) |
|
||||||
| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. |
|
| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. |
|
||||||
|
|
||||||
### How does this integrate with Docker engine?
|
### How does this integrate with Docker engine?
|
||||||
|
|
||||||
@@ -68,7 +68,7 @@ others, it is not.
|
|||||||
For example, users with their own software products may want to maintain a
|
For example, users with their own software products may want to maintain a
|
||||||
registry for private, company images. Also, you may wish to deploy your own
|
registry for private, company images. Also, you may wish to deploy your own
|
||||||
image repository for images used to test or in continuous integration. For these
|
image repository for images used to test or in continuous integration. For these
|
||||||
use cases and others, [deploying your own registry instance](docs/deploying.md)
|
use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md)
|
||||||
may be the better choice.
|
may be the better choice.
|
||||||
|
|
||||||
### Migration to Registry 2.0
|
### Migration to Registry 2.0
|
||||||
|
|||||||
36
vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md
generated
vendored
Normal file
36
vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
## Registry Release Checklist
|
||||||
|
|
||||||
|
10. Compile release notes detailing features and since the last release. Update the `CHANGELOG.md` file.
|
||||||
|
|
||||||
|
20. Update the version file: `https://github.com/docker/distribution/blob/master/version/version.go`
|
||||||
|
|
||||||
|
30. Update the `MAINTAINERS` (if necessary), `AUTHORS` and `.mailmap` files.
|
||||||
|
|
||||||
|
```
|
||||||
|
make AUTHORS
|
||||||
|
```
|
||||||
|
|
||||||
|
40. Create a signed tag.
|
||||||
|
|
||||||
|
Distribution uses semantic versioning. Tags are of the format `vx.y.z[-rcn]`
|
||||||
|
You will need PGP installed and a PGP key which has been added to your Github account. The comment for the tag should include the release notes.
|
||||||
|
|
||||||
|
50. Push the signed tag
|
||||||
|
|
||||||
|
60. Create a new [release](https://github.com/docker/distribution/releases). In the case of a release candidate, tick the `pre-release` checkbox.
|
||||||
|
|
||||||
|
70. Update the registry binary in [distribution library image repo](https://github.com/docker/distribution-library-image) by running the update script and opening a pull request.
|
||||||
|
|
||||||
|
80. Update the official image. Add the new version in the [official images repo](https://github.com/docker-library/official-images) by appending a new version to the `registry/registry` file with the git hash pointed to by the signed tag. Update the major version to point to the latest version and the minor version to point to new patch release if necessary.
|
||||||
|
e.g. to release `2.3.1`
|
||||||
|
|
||||||
|
`2.3.1 (new)`
|
||||||
|
|
||||||
|
`2.3.0 -> 2.3.0` can be removed
|
||||||
|
|
||||||
|
`2 -> 2.3.1`
|
||||||
|
|
||||||
|
`2.3 -> 2.3.1`
|
||||||
|
|
||||||
|
90. Build a new distribution/registry image on [Docker hub](https://hub.docker.com/u/distribution/dashboard) by adding a new automated build with the new tag and re-building the images.
|
||||||
|
|
||||||
2
vendor/github.com/docker/distribution/circle.yml
generated
vendored
2
vendor/github.com/docker/distribution/circle.yml
generated
vendored
@@ -8,7 +8,7 @@ machine:
|
|||||||
|
|
||||||
post:
|
post:
|
||||||
# go
|
# go
|
||||||
- gvm install go1.6 --prefer-binary --name=stable
|
- gvm install go1.7 --prefer-binary --name=stable
|
||||||
|
|
||||||
environment:
|
environment:
|
||||||
# Convenient shortcuts to "common" locations
|
# Convenient shortcuts to "common" locations
|
||||||
|
|||||||
13
vendor/github.com/docker/distribution/configuration/configuration.go
generated
vendored
13
vendor/github.com/docker/distribution/configuration/configuration.go
generated
vendored
@@ -203,6 +203,19 @@ type Configuration struct {
|
|||||||
} `yaml:"urls,omitempty"`
|
} `yaml:"urls,omitempty"`
|
||||||
} `yaml:"manifests,omitempty"`
|
} `yaml:"manifests,omitempty"`
|
||||||
} `yaml:"validation,omitempty"`
|
} `yaml:"validation,omitempty"`
|
||||||
|
|
||||||
|
// Policy configures registry policy options.
|
||||||
|
Policy struct {
|
||||||
|
// Repository configures policies for repositories
|
||||||
|
Repository struct {
|
||||||
|
// Classes is a list of repository classes which the
|
||||||
|
// registry allows content for. This class is matched
|
||||||
|
// against the configuration media type inside uploaded
|
||||||
|
// manifests. When non-empty, the registry will enforce
|
||||||
|
// the class in authorized resources.
|
||||||
|
Classes []string `yaml:"classes"`
|
||||||
|
} `yaml:"repository,omitempty"`
|
||||||
|
} `yaml:"policy,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// LogHook is composed of hook Level and Type.
|
// LogHook is composed of hook Level and Type.
|
||||||
|
|||||||
54
vendor/github.com/docker/distribution/contrib/token-server/main.go
generated
vendored
54
vendor/github.com/docker/distribution/contrib/token-server/main.go
generated
vendored
@@ -18,6 +18,10 @@ import (
|
|||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
enforceRepoClass bool
|
||||||
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var (
|
var (
|
||||||
issuer = &TokenIssuer{}
|
issuer = &TokenIssuer{}
|
||||||
@@ -44,6 +48,8 @@ func main() {
|
|||||||
flag.StringVar(&cert, "tlscert", "", "Certificate file for TLS")
|
flag.StringVar(&cert, "tlscert", "", "Certificate file for TLS")
|
||||||
flag.StringVar(&certKey, "tlskey", "", "Certificate key for TLS")
|
flag.StringVar(&certKey, "tlskey", "", "Certificate key for TLS")
|
||||||
|
|
||||||
|
flag.BoolVar(&enforceRepoClass, "enforce-class", false, "Enforce policy for single repository class")
|
||||||
|
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
if debug {
|
if debug {
|
||||||
@@ -157,6 +163,8 @@ type tokenResponse struct {
|
|||||||
ExpiresIn int `json:"expires_in,omitempty"`
|
ExpiresIn int `json:"expires_in,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var repositoryClassCache = map[string]string{}
|
||||||
|
|
||||||
func filterAccessList(ctx context.Context, scope string, requestedAccessList []auth.Access) []auth.Access {
|
func filterAccessList(ctx context.Context, scope string, requestedAccessList []auth.Access) []auth.Access {
|
||||||
if !strings.HasSuffix(scope, "/") {
|
if !strings.HasSuffix(scope, "/") {
|
||||||
scope = scope + "/"
|
scope = scope + "/"
|
||||||
@@ -168,6 +176,16 @@ func filterAccessList(ctx context.Context, scope string, requestedAccessList []a
|
|||||||
context.GetLogger(ctx).Debugf("Resource scope not allowed: %s", access.Name)
|
context.GetLogger(ctx).Debugf("Resource scope not allowed: %s", access.Name)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if enforceRepoClass {
|
||||||
|
if class, ok := repositoryClassCache[access.Name]; ok {
|
||||||
|
if class != access.Class {
|
||||||
|
context.GetLogger(ctx).Debugf("Different repository class: %q, previously %q", access.Class, class)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if strings.EqualFold(access.Action, "push") {
|
||||||
|
repositoryClassCache[access.Name] = access.Class
|
||||||
|
}
|
||||||
|
}
|
||||||
} else if access.Type == "registry" {
|
} else if access.Type == "registry" {
|
||||||
if access.Name != "catalog" {
|
if access.Name != "catalog" {
|
||||||
context.GetLogger(ctx).Debugf("Unknown registry resource: %s", access.Name)
|
context.GetLogger(ctx).Debugf("Unknown registry resource: %s", access.Name)
|
||||||
@@ -183,6 +201,18 @@ func filterAccessList(ctx context.Context, scope string, requestedAccessList []a
|
|||||||
return grantedAccessList
|
return grantedAccessList
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type acctSubject struct{}
|
||||||
|
|
||||||
|
func (acctSubject) String() string { return "acctSubject" }
|
||||||
|
|
||||||
|
type requestedAccess struct{}
|
||||||
|
|
||||||
|
func (requestedAccess) String() string { return "requestedAccess" }
|
||||||
|
|
||||||
|
type grantedAccess struct{}
|
||||||
|
|
||||||
|
func (grantedAccess) String() string { return "grantedAccess" }
|
||||||
|
|
||||||
// getToken handles authenticating the request and authorizing access to the
|
// getToken handles authenticating the request and authorizing access to the
|
||||||
// requested scopes.
|
// requested scopes.
|
||||||
func (ts *tokenServer) getToken(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
func (ts *tokenServer) getToken(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||||
@@ -225,17 +255,17 @@ func (ts *tokenServer) getToken(ctx context.Context, w http.ResponseWriter, r *h
|
|||||||
|
|
||||||
username := context.GetStringValue(ctx, "auth.user.name")
|
username := context.GetStringValue(ctx, "auth.user.name")
|
||||||
|
|
||||||
ctx = context.WithValue(ctx, "acctSubject", username)
|
ctx = context.WithValue(ctx, acctSubject{}, username)
|
||||||
ctx = context.WithLogger(ctx, context.GetLogger(ctx, "acctSubject"))
|
ctx = context.WithLogger(ctx, context.GetLogger(ctx, acctSubject{}))
|
||||||
|
|
||||||
context.GetLogger(ctx).Info("authenticated client")
|
context.GetLogger(ctx).Info("authenticated client")
|
||||||
|
|
||||||
ctx = context.WithValue(ctx, "requestedAccess", requestedAccessList)
|
ctx = context.WithValue(ctx, requestedAccess{}, requestedAccessList)
|
||||||
ctx = context.WithLogger(ctx, context.GetLogger(ctx, "requestedAccess"))
|
ctx = context.WithLogger(ctx, context.GetLogger(ctx, requestedAccess{}))
|
||||||
|
|
||||||
grantedAccessList := filterAccessList(ctx, username, requestedAccessList)
|
grantedAccessList := filterAccessList(ctx, username, requestedAccessList)
|
||||||
ctx = context.WithValue(ctx, "grantedAccess", grantedAccessList)
|
ctx = context.WithValue(ctx, grantedAccess{}, grantedAccessList)
|
||||||
ctx = context.WithLogger(ctx, context.GetLogger(ctx, "grantedAccess"))
|
ctx = context.WithLogger(ctx, context.GetLogger(ctx, grantedAccess{}))
|
||||||
|
|
||||||
token, err := ts.issuer.CreateJWT(username, service, grantedAccessList)
|
token, err := ts.issuer.CreateJWT(username, service, grantedAccessList)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -347,17 +377,17 @@ func (ts *tokenServer) postToken(ctx context.Context, w http.ResponseWriter, r *
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = context.WithValue(ctx, "acctSubject", subject)
|
ctx = context.WithValue(ctx, acctSubject{}, subject)
|
||||||
ctx = context.WithLogger(ctx, context.GetLogger(ctx, "acctSubject"))
|
ctx = context.WithLogger(ctx, context.GetLogger(ctx, acctSubject{}))
|
||||||
|
|
||||||
context.GetLogger(ctx).Info("authenticated client")
|
context.GetLogger(ctx).Info("authenticated client")
|
||||||
|
|
||||||
ctx = context.WithValue(ctx, "requestedAccess", requestedAccessList)
|
ctx = context.WithValue(ctx, requestedAccess{}, requestedAccessList)
|
||||||
ctx = context.WithLogger(ctx, context.GetLogger(ctx, "requestedAccess"))
|
ctx = context.WithLogger(ctx, context.GetLogger(ctx, requestedAccess{}))
|
||||||
|
|
||||||
grantedAccessList := filterAccessList(ctx, subject, requestedAccessList)
|
grantedAccessList := filterAccessList(ctx, subject, requestedAccessList)
|
||||||
ctx = context.WithValue(ctx, "grantedAccess", grantedAccessList)
|
ctx = context.WithValue(ctx, grantedAccess{}, grantedAccessList)
|
||||||
ctx = context.WithLogger(ctx, context.GetLogger(ctx, "grantedAccess"))
|
ctx = context.WithLogger(ctx, context.GetLogger(ctx, grantedAccess{}))
|
||||||
|
|
||||||
token, err := ts.issuer.CreateJWT(subject, service, grantedAccessList)
|
token, err := ts.issuer.CreateJWT(subject, service, grantedAccessList)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
34
vendor/github.com/docker/distribution/contrib/token-server/token.go
generated
vendored
34
vendor/github.com/docker/distribution/contrib/token-server/token.go
generated
vendored
@@ -7,6 +7,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -32,12 +33,18 @@ func ResolveScopeSpecifiers(ctx context.Context, scopeSpecs []string) []auth.Acc
|
|||||||
|
|
||||||
resourceType, resourceName, actions := parts[0], parts[1], parts[2]
|
resourceType, resourceName, actions := parts[0], parts[1], parts[2]
|
||||||
|
|
||||||
|
resourceType, resourceClass := splitResourceClass(resourceType)
|
||||||
|
if resourceType == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
// Actions should be a comma-separated list of actions.
|
// Actions should be a comma-separated list of actions.
|
||||||
for _, action := range strings.Split(actions, ",") {
|
for _, action := range strings.Split(actions, ",") {
|
||||||
requestedAccess := auth.Access{
|
requestedAccess := auth.Access{
|
||||||
Resource: auth.Resource{
|
Resource: auth.Resource{
|
||||||
Type: resourceType,
|
Type: resourceType,
|
||||||
Name: resourceName,
|
Class: resourceClass,
|
||||||
|
Name: resourceName,
|
||||||
},
|
},
|
||||||
Action: action,
|
Action: action,
|
||||||
}
|
}
|
||||||
@@ -55,6 +62,19 @@ func ResolveScopeSpecifiers(ctx context.Context, scopeSpecs []string) []auth.Acc
|
|||||||
return requestedAccessList
|
return requestedAccessList
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var typeRegexp = regexp.MustCompile(`^([a-z0-9]+)(\([a-z0-9]+\))?$`)
|
||||||
|
|
||||||
|
func splitResourceClass(t string) (string, string) {
|
||||||
|
matches := typeRegexp.FindStringSubmatch(t)
|
||||||
|
if len(matches) < 2 {
|
||||||
|
return "", ""
|
||||||
|
}
|
||||||
|
if len(matches) == 2 || len(matches[2]) < 2 {
|
||||||
|
return matches[1], ""
|
||||||
|
}
|
||||||
|
return matches[1], matches[2][1 : len(matches[2])-1]
|
||||||
|
}
|
||||||
|
|
||||||
// ResolveScopeList converts a scope list from a token request's
|
// ResolveScopeList converts a scope list from a token request's
|
||||||
// `scope` parameter into a list of standard access objects.
|
// `scope` parameter into a list of standard access objects.
|
||||||
func ResolveScopeList(ctx context.Context, scopeList string) []auth.Access {
|
func ResolveScopeList(ctx context.Context, scopeList string) []auth.Access {
|
||||||
@@ -62,12 +82,19 @@ func ResolveScopeList(ctx context.Context, scopeList string) []auth.Access {
|
|||||||
return ResolveScopeSpecifiers(ctx, scopes)
|
return ResolveScopeSpecifiers(ctx, scopes)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func scopeString(a auth.Access) string {
|
||||||
|
if a.Class != "" {
|
||||||
|
return fmt.Sprintf("%s(%s):%s:%s", a.Type, a.Class, a.Name, a.Action)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s:%s:%s", a.Type, a.Name, a.Action)
|
||||||
|
}
|
||||||
|
|
||||||
// ToScopeList converts a list of access to a
|
// ToScopeList converts a list of access to a
|
||||||
// scope list string
|
// scope list string
|
||||||
func ToScopeList(access []auth.Access) string {
|
func ToScopeList(access []auth.Access) string {
|
||||||
var s []string
|
var s []string
|
||||||
for _, a := range access {
|
for _, a := range access {
|
||||||
s = append(s, fmt.Sprintf("%s:%s:%s", a.Type, a.Name, a.Action))
|
s = append(s, scopeString(a))
|
||||||
}
|
}
|
||||||
return strings.Join(s, ",")
|
return strings.Join(s, ",")
|
||||||
}
|
}
|
||||||
@@ -102,6 +129,7 @@ func (issuer *TokenIssuer) CreateJWT(subject string, audience string, grantedAcc
|
|||||||
|
|
||||||
accessEntries = append(accessEntries, &token.ResourceActions{
|
accessEntries = append(accessEntries, &token.ResourceActions{
|
||||||
Type: resource.Type,
|
Type: resource.Type,
|
||||||
|
Class: resource.Class,
|
||||||
Name: resource.Name,
|
Name: resource.Name,
|
||||||
Actions: actions,
|
Actions: actions,
|
||||||
})
|
})
|
||||||
|
|||||||
9
vendor/github.com/docker/distribution/docs/Dockerfile
generated
vendored
9
vendor/github.com/docker/distribution/docs/Dockerfile
generated
vendored
@@ -1,9 +0,0 @@
|
|||||||
FROM docs/base:oss
|
|
||||||
MAINTAINER Docker Docs <docs@docker.com>
|
|
||||||
|
|
||||||
ENV PROJECT=registry
|
|
||||||
|
|
||||||
# To get the git info for this repo
|
|
||||||
COPY . /src
|
|
||||||
RUN rm -rf /docs/content/$PROJECT/
|
|
||||||
COPY . /docs/content/$PROJECT/
|
|
||||||
38
vendor/github.com/docker/distribution/docs/Makefile
generated
vendored
38
vendor/github.com/docker/distribution/docs/Makefile
generated
vendored
@@ -1,38 +0,0 @@
|
|||||||
.PHONY: all default docs docs-build docs-shell shell test
|
|
||||||
|
|
||||||
# to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs)
|
|
||||||
DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR))
|
|
||||||
|
|
||||||
# to allow `make DOCSPORT=9000 docs`
|
|
||||||
DOCSPORT := 8000
|
|
||||||
|
|
||||||
# Get the IP ADDRESS
|
|
||||||
DOCKER_IP=$(shell python -c "import urlparse ; print urlparse.urlparse('$(DOCKER_HOST)').hostname or ''")
|
|
||||||
HUGO_BASE_URL=$(shell test -z "$(DOCKER_IP)" && echo localhost || echo "$(DOCKER_IP)")
|
|
||||||
HUGO_BIND_IP=0.0.0.0
|
|
||||||
|
|
||||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
|
||||||
GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
|
|
||||||
DOCKER_DOCS_IMAGE := registry-docs$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN))
|
|
||||||
|
|
||||||
DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE
|
|
||||||
|
|
||||||
# for some docs workarounds (see below in "docs-build" target)
|
|
||||||
GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null)
|
|
||||||
|
|
||||||
default: docs
|
|
||||||
|
|
||||||
docs: docs-build
|
|
||||||
$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP)
|
|
||||||
|
|
||||||
docs-draft: docs-build
|
|
||||||
$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --buildDrafts="true" --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP)
|
|
||||||
|
|
||||||
docs-shell: docs-build
|
|
||||||
$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash
|
|
||||||
|
|
||||||
docs-build:
|
|
||||||
docker build -t "$(DOCKER_DOCS_IMAGE)" .
|
|
||||||
|
|
||||||
test: docs-build
|
|
||||||
$(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)"
|
|
||||||
16
vendor/github.com/docker/distribution/docs/README.md
generated
vendored
Normal file
16
vendor/github.com/docker/distribution/docs/README.md
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# The docs have been moved!
|
||||||
|
|
||||||
|
The documentation for Registry has been merged into
|
||||||
|
[the general documentation repo](https://github.com/docker/docker.github.io).
|
||||||
|
Commit history has been preserved.
|
||||||
|
|
||||||
|
The docs for Registry are now here:
|
||||||
|
https://github.com/docker/docker.github.io/tree/master/registry
|
||||||
|
|
||||||
|
> Note: The definitive [./spec directory](spec/) directory and
|
||||||
|
[configuration.md](configuration.md) file will be maintained in this repository
|
||||||
|
and be refreshed periodically in
|
||||||
|
[the general documentation repo](https://github.com/docker/docker.github.io).
|
||||||
|
|
||||||
|
As always, the docs in the general repo remain open-source and we appreciate
|
||||||
|
your feedback and pull requests!
|
||||||
54
vendor/github.com/docker/distribution/docs/architecture.md
generated
vendored
54
vendor/github.com/docker/distribution/docs/architecture.md
generated
vendored
@@ -1,54 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
draft = true
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Architecture
|
|
||||||
|
|
||||||
## Design
|
|
||||||
**TODO(stevvooe):** Discuss the architecture of the registry, internally and externally, in a few different deployment scenarios.
|
|
||||||
|
|
||||||
### Eventual Consistency
|
|
||||||
|
|
||||||
> **NOTE:** This section belongs somewhere, perhaps in a design document. We
|
|
||||||
> are leaving this here so the information is not lost.
|
|
||||||
|
|
||||||
Running the registry on eventually consistent backends has been part of the
|
|
||||||
design from the beginning. This section covers some of the approaches to
|
|
||||||
dealing with this reality.
|
|
||||||
|
|
||||||
There are a few classes of issues that we need to worry about when
|
|
||||||
implementing something on top of the storage drivers:
|
|
||||||
|
|
||||||
1. Read-After-Write consistency (see this [article on
|
|
||||||
s3](http://shlomoswidler.com/2009/12/read-after-write-consistency-in-amazon.html)).
|
|
||||||
2. [Write-Write Conflicts](http://en.wikipedia.org/wiki/Write%E2%80%93write_conflict).
|
|
||||||
|
|
||||||
In reality, the registry must worry about these kinds of errors when doing the
|
|
||||||
following:
|
|
||||||
|
|
||||||
1. Accepting data into a temporary upload file may not have latest data block
|
|
||||||
yet (read-after-write).
|
|
||||||
2. Moving uploaded data into its blob location (write-write race).
|
|
||||||
3. Modifying the "current" manifest for given tag (write-write race).
|
|
||||||
4. A whole slew of operations around deletes (read-after-write, delete-write
|
|
||||||
races, garbage collection, etc.).
|
|
||||||
|
|
||||||
The backend path layout employs a few techniques to avoid these problems:
|
|
||||||
|
|
||||||
1. Large writes are done to private upload directories. This alleviates most
|
|
||||||
of the corruption potential under multiple writers by avoiding multiple
|
|
||||||
writers.
|
|
||||||
2. Constraints in storage driver implementations, such as support for writing
|
|
||||||
after the end of a file to extend it.
|
|
||||||
3. Digest verification to avoid data corruption.
|
|
||||||
4. Manifest files are stored by digest and cannot change.
|
|
||||||
5. All other non-content files (links, hashes, etc.) are written as an atomic
|
|
||||||
unit. Anything that requires additions and deletions is broken out into
|
|
||||||
separate "files". Last writer still wins.
|
|
||||||
|
|
||||||
Unfortunately, one must play this game when trying to build something like
|
|
||||||
this on top of eventually consistent storage systems. If we run into serious
|
|
||||||
problems, we can wrap the storagedrivers in a shared consistency layer but
|
|
||||||
that would increase complexity and hinder registry cluster performance.
|
|
||||||
84
vendor/github.com/docker/distribution/docs/compatibility.md
generated
vendored
84
vendor/github.com/docker/distribution/docs/compatibility.md
generated
vendored
@@ -1,84 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "Compatibility"
|
|
||||||
description = "describes get by digest pitfall"
|
|
||||||
keywords = ["registry, manifest, images, tags, repository, distribution, digest"]
|
|
||||||
[menu.main]
|
|
||||||
parent="smn_registry_ref"
|
|
||||||
weight=9
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Registry Compatibility
|
|
||||||
|
|
||||||
## Synopsis
|
|
||||||
*If a manifest is pulled by _digest_ from a registry 2.3 with Docker Engine 1.9
|
|
||||||
and older, and the manifest was pushed with Docker Engine 1.10, a security check
|
|
||||||
will cause the Engine to receive a manifest it cannot use and the pull will fail.*
|
|
||||||
|
|
||||||
## Registry Manifest Support
|
|
||||||
|
|
||||||
Historically, the registry has supported a [single manifest type](./spec/manifest-v2-1.md)
|
|
||||||
known as _Schema 1_.
|
|
||||||
|
|
||||||
With the move toward multiple architecture images the distribution project
|
|
||||||
introduced two new manifest types: Schema 2 manifests and manifest lists. The
|
|
||||||
registry 2.3 supports all three manifest types and in order to be compatible
|
|
||||||
with older Docker engines will, in certain cases, do an on-the-fly
|
|
||||||
transformation of a manifest before serving the JSON in the response.
|
|
||||||
|
|
||||||
This conversion has some implications for pulling manifests by digest and this
|
|
||||||
document enumerate these implications.
|
|
||||||
|
|
||||||
|
|
||||||
## Content Addressable Storage (CAS)
|
|
||||||
|
|
||||||
Manifests are stored and retrieved in the registry by keying off a digest
|
|
||||||
representing a hash of the contents. One of the advantages provided by CAS is
|
|
||||||
security: if the contents are changed, then the digest will no longer match.
|
|
||||||
This prevents any modification of the manifest by a MITM attack or an untrusted
|
|
||||||
third party.
|
|
||||||
|
|
||||||
When a manifest is stored by the registry, this digest is returned in the HTTP
|
|
||||||
response headers and, if events are configured, delivered within the event. The
|
|
||||||
manifest can either be retrieved by the tag, or this digest.
|
|
||||||
|
|
||||||
For registry versions 2.2.1 and below, the registry will always store and
|
|
||||||
serve _Schema 1_ manifests. The Docker Engine 1.10 will first
|
|
||||||
attempt to send a _Schema 2_ manifest, falling back to sending a
|
|
||||||
Schema 1 type manifest when it detects that the registry does not
|
|
||||||
support the new version.
|
|
||||||
|
|
||||||
|
|
||||||
## Registry v2.3
|
|
||||||
|
|
||||||
### Manifest Push with Docker 1.9 and Older
|
|
||||||
|
|
||||||
The Docker Engine will construct a _Schema 1_ manifest which the
|
|
||||||
registry will persist to disk.
|
|
||||||
|
|
||||||
When the manifest is pulled by digest or tag with any docker version, a
|
|
||||||
_Schema 1_ manifest will be returned.
|
|
||||||
|
|
||||||
### Manifest Push with Docker 1.10
|
|
||||||
|
|
||||||
The docker engine will construct a _Schema 2_ manifest which the
|
|
||||||
registry will persist to disk.
|
|
||||||
|
|
||||||
When the manifest is pulled by digest or tag with Docker Engine 1.10, a
|
|
||||||
_Schema 2_ manifest will be returned. The Docker Engine 1.10
|
|
||||||
understands the new manifest format.
|
|
||||||
|
|
||||||
When the manifest is pulled by *tag* with Docker Engine 1.9 and older, the
|
|
||||||
manifest is converted on-the-fly to _Schema 1_ and sent in the
|
|
||||||
response. The Docker Engine 1.9 is compatible with this older format.
|
|
||||||
|
|
||||||
*When the manifest is pulled by _digest_ with Docker Engine 1.9 and older, the
|
|
||||||
same rewriting process will not happen in the registry. If this were to happen
|
|
||||||
the digest would no longer match the hash of the manifest and would violate the
|
|
||||||
constraints of CAS.*
|
|
||||||
|
|
||||||
For this reason if a manifest is pulled by _digest_ from a registry 2.3 with Docker
|
|
||||||
Engine 1.9 and older, and the manifest was pushed with Docker Engine 1.10, a
|
|
||||||
security check will cause the Engine to receive a manifest it cannot use and the
|
|
||||||
pull will fail.
|
|
||||||
2650
vendor/github.com/docker/distribution/docs/configuration.md
generated
vendored
2650
vendor/github.com/docker/distribution/docs/configuration.md
generated
vendored
File diff suppressed because it is too large
Load Diff
237
vendor/github.com/docker/distribution/docs/deploying.md
generated
vendored
237
vendor/github.com/docker/distribution/docs/deploying.md
generated
vendored
@@ -1,237 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "Deploying a registry server"
|
|
||||||
description = "Explains how to deploy a registry"
|
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution, deployment"]
|
|
||||||
[menu.main]
|
|
||||||
parent="smn_registry"
|
|
||||||
weight=3
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Deploying a registry server
|
|
||||||
|
|
||||||
You need to [install Docker version 1.6.0 or newer](/engine/installation/index.md).
|
|
||||||
|
|
||||||
## Running on localhost
|
|
||||||
|
|
||||||
Start your registry:
|
|
||||||
|
|
||||||
docker run -d -p 5000:5000 --restart=always --name registry registry:2
|
|
||||||
|
|
||||||
You can now use it with docker.
|
|
||||||
|
|
||||||
Get any image from the hub and tag it to point to your registry:
|
|
||||||
|
|
||||||
docker pull ubuntu && docker tag ubuntu localhost:5000/ubuntu
|
|
||||||
|
|
||||||
... then push it to your registry:
|
|
||||||
|
|
||||||
docker push localhost:5000/ubuntu
|
|
||||||
|
|
||||||
... then pull it back from your registry:
|
|
||||||
|
|
||||||
docker pull localhost:5000/ubuntu
|
|
||||||
|
|
||||||
To stop your registry, you would:
|
|
||||||
|
|
||||||
docker stop registry && docker rm -v registry
|
|
||||||
|
|
||||||
## Storage
|
|
||||||
|
|
||||||
By default, your registry data is persisted as a [docker volume](/engine/tutorials/dockervolumes.md) on the host filesystem. Properly understanding volumes is essential if you want to stick with a local filesystem storage.
|
|
||||||
|
|
||||||
Specifically, you might want to point your volume location to a specific place in order to more easily access your registry data. To do so you can:
|
|
||||||
|
|
||||||
docker run -d -p 5000:5000 --restart=always --name registry \
|
|
||||||
-v `pwd`/data:/var/lib/registry \
|
|
||||||
registry:2
|
|
||||||
|
|
||||||
### Alternatives
|
|
||||||
|
|
||||||
You should usually consider using [another storage backend](./storage-drivers/index.md) instead of the local filesystem. Use the [storage configuration options](./configuration.md#storage) to configure an alternate storage backend.
|
|
||||||
|
|
||||||
Using one of these will allow you to more easily scale your registry, and leverage your storage redundancy and availability features.
|
|
||||||
|
|
||||||
## Running a domain registry
|
|
||||||
|
|
||||||
While running on `localhost` has its uses, most people want their registry to be more widely available. To do so, the Docker engine requires you to secure it using TLS, which is conceptually very similar to configuring your web server with SSL.
|
|
||||||
|
|
||||||
### Get a certificate
|
|
||||||
|
|
||||||
Assuming that you own the domain `myregistrydomain.com`, and that its DNS record points to the host where you are running your registry, you first need to get a certificate from a CA.
|
|
||||||
|
|
||||||
Create a `certs` directory:
|
|
||||||
|
|
||||||
mkdir -p certs
|
|
||||||
|
|
||||||
Then move and/or rename your crt file to: `certs/domain.crt`, and your key file to: `certs/domain.key`.
|
|
||||||
|
|
||||||
Make sure you stopped your registry from the previous steps, then start your registry again with TLS enabled:
|
|
||||||
|
|
||||||
docker run -d -p 5000:5000 --restart=always --name registry \
|
|
||||||
-v `pwd`/certs:/certs \
|
|
||||||
-e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \
|
|
||||||
-e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \
|
|
||||||
registry:2
|
|
||||||
|
|
||||||
You should now be able to access your registry from another docker host:
|
|
||||||
|
|
||||||
docker pull ubuntu
|
|
||||||
docker tag ubuntu myregistrydomain.com:5000/ubuntu
|
|
||||||
docker push myregistrydomain.com:5000/ubuntu
|
|
||||||
docker pull myregistrydomain.com:5000/ubuntu
|
|
||||||
|
|
||||||
#### Gotcha
|
|
||||||
|
|
||||||
A certificate issuer may supply you with an *intermediate* certificate. In this case, you must combine your certificate with the intermediate's to form a *certificate bundle*. You can do this using the `cat` command:
|
|
||||||
|
|
||||||
cat domain.crt intermediate-certificates.pem > certs/domain.crt
|
|
||||||
|
|
||||||
### Let's Encrypt
|
|
||||||
|
|
||||||
The registry supports using Let's Encrypt to automatically obtain a browser-trusted certificate. For more
|
|
||||||
information on Let's Encrypt, see [https://letsencrypt.org/how-it-works/](https://letsencrypt.org/how-it-works/) and the relevant section of the [registry configuration](configuration.md#letsencrypt).
|
|
||||||
|
|
||||||
### Alternatives
|
|
||||||
|
|
||||||
While rarely advisable, you may want to use self-signed certificates instead, or use your registry in an insecure fashion. You will find instructions [here](insecure.md).
|
|
||||||
|
|
||||||
## Load Balancing Considerations
|
|
||||||
|
|
||||||
One may want to use a load balancer to distribute load, terminate TLS or
|
|
||||||
provide high availability. While a full load balancing setup is outside the
|
|
||||||
scope of this document, there are a few considerations that can make the process
|
|
||||||
smoother.
|
|
||||||
|
|
||||||
The most important aspect is that a load balanced cluster of registries must
|
|
||||||
share the same resources. For the current version of the registry, this means
|
|
||||||
the following must be the same:
|
|
||||||
|
|
||||||
- Storage Driver
|
|
||||||
- HTTP Secret
|
|
||||||
- Redis Cache (if configured)
|
|
||||||
|
|
||||||
If any of these are different, the registry will have trouble serving requests.
|
|
||||||
As an example, if you're using the filesystem driver, all registry instances
|
|
||||||
must have access to the same filesystem root, which means they should be in
|
|
||||||
the same machine. For other drivers, such as s3 or azure, they should be
|
|
||||||
accessing the same resource, and will likely share an identical configuration.
|
|
||||||
The _HTTP Secret_ coordinates uploads, so also must be the same across
|
|
||||||
instances. Configuring different redis instances will work (at the time
|
|
||||||
of writing), but will not be optimal if the instances are not shared, causing
|
|
||||||
more requests to be directed to the backend.
|
|
||||||
|
|
||||||
#### Important/Required HTTP-Headers
|
|
||||||
Getting the headers correct is very important. For all responses to any
|
|
||||||
request under the "/v2/" url space, the `Docker-Distribution-API-Version`
|
|
||||||
header should be set to the value "registry/2.0", even for a 4xx response.
|
|
||||||
This header allows the docker engine to quickly resolve authentication realms
|
|
||||||
and fallback to version 1 registries, if necessary. Confirming this is setup
|
|
||||||
correctly can help avoid problems with fallback.
|
|
||||||
|
|
||||||
In the same train of thought, you must make sure you are properly sending the
|
|
||||||
`X-Forwarded-Proto`, `X-Forwarded-For` and `Host` headers to their "client-side"
|
|
||||||
values. Failure to do so usually makes the registry issue redirects to internal
|
|
||||||
hostnames or downgrading from https to http.
|
|
||||||
|
|
||||||
A properly secured registry should return 401 when the "/v2/" endpoint is hit
|
|
||||||
without credentials. The response should include a `WWW-Authenticate`
|
|
||||||
challenge, providing guidance on how to authenticate, such as with basic auth
|
|
||||||
or a token service. If the load balancer has health checks, it is recommended
|
|
||||||
to configure it to consider a 401 response as healthy and any other as down.
|
|
||||||
This will secure your registry by ensuring that configuration problems with
|
|
||||||
authentication don't accidentally expose an unprotected registry. If you're
|
|
||||||
using a less sophisticated load balancer, such as Amazon's Elastic Load
|
|
||||||
Balancer, that doesn't allow one to change the healthy response code, health
|
|
||||||
checks can be directed at "/", which will always return a `200 OK` response.
|
|
||||||
|
|
||||||
## Restricting access
|
|
||||||
|
|
||||||
Except for registries running on secure local networks, registries should always implement access restrictions.
|
|
||||||
|
|
||||||
### Native basic auth
|
|
||||||
|
|
||||||
The simplest way to achieve access restriction is through basic authentication (this is very similar to other web servers' basic authentication mechanism).
|
|
||||||
|
|
||||||
> **Warning**: You **cannot** use authentication with an insecure registry. You have to [configure TLS first](#running-a-domain-registry) for this to work.
|
|
||||||
|
|
||||||
First create a password file with one entry for the user "testuser", with password "testpassword":
|
|
||||||
|
|
||||||
mkdir auth
|
|
||||||
docker run --entrypoint htpasswd registry:2 -Bbn testuser testpassword > auth/htpasswd
|
|
||||||
|
|
||||||
Make sure you stopped your registry from the previous step, then start it again:
|
|
||||||
|
|
||||||
docker run -d -p 5000:5000 --restart=always --name registry \
|
|
||||||
-v `pwd`/auth:/auth \
|
|
||||||
-e "REGISTRY_AUTH=htpasswd" \
|
|
||||||
-e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \
|
|
||||||
-e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd \
|
|
||||||
-v `pwd`/certs:/certs \
|
|
||||||
-e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \
|
|
||||||
-e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \
|
|
||||||
registry:2
|
|
||||||
|
|
||||||
You should now be able to:
|
|
||||||
|
|
||||||
docker login myregistrydomain.com:5000
|
|
||||||
|
|
||||||
And then push and pull images as an authenticated user.
|
|
||||||
|
|
||||||
#### Gotcha
|
|
||||||
|
|
||||||
Seeing X509 errors is usually a sign you are trying to use self-signed certificates, and failed to [configure your docker daemon properly](insecure.md).
|
|
||||||
|
|
||||||
### Alternatives
|
|
||||||
|
|
||||||
1. You may want to leverage more advanced basic auth implementations through a proxy design, in front of the registry. You will find examples of such patterns in the [recipes list](recipes/index.md).
|
|
||||||
|
|
||||||
2. Alternatively, the Registry also supports delegated authentication, redirecting users to a specific, trusted token server. That approach requires significantly more investment, and only makes sense if you want to fully configure ACLs and more control over the Registry integration into your global authorization and authentication systems.
|
|
||||||
|
|
||||||
You will find [background information here](spec/auth/token.md), and [configuration information here](configuration.md#auth).
|
|
||||||
|
|
||||||
Beware that you will have to implement your own authentication service for this to work, or leverage a third-party implementation.
|
|
||||||
|
|
||||||
## Managing with Compose
|
|
||||||
|
|
||||||
As your registry configuration grows more complex, dealing with it can quickly become tedious.
|
|
||||||
|
|
||||||
It's highly recommended to use [Docker Compose](/compose/index.md) to facilitate operating your registry.
|
|
||||||
|
|
||||||
Here is a simple `docker-compose.yml` example that condenses everything explained so far:
|
|
||||||
|
|
||||||
```
|
|
||||||
registry:
|
|
||||||
restart: always
|
|
||||||
image: registry:2
|
|
||||||
ports:
|
|
||||||
- 5000:5000
|
|
||||||
environment:
|
|
||||||
REGISTRY_HTTP_TLS_CERTIFICATE: /certs/domain.crt
|
|
||||||
REGISTRY_HTTP_TLS_KEY: /certs/domain.key
|
|
||||||
REGISTRY_AUTH: htpasswd
|
|
||||||
REGISTRY_AUTH_HTPASSWD_PATH: /auth/htpasswd
|
|
||||||
REGISTRY_AUTH_HTPASSWD_REALM: Registry Realm
|
|
||||||
volumes:
|
|
||||||
- /path/data:/var/lib/registry
|
|
||||||
- /path/certs:/certs
|
|
||||||
- /path/auth:/auth
|
|
||||||
```
|
|
||||||
|
|
||||||
> **Warning**: replace `/path` by whatever directory that holds your `certs` and `auth` folder from above.
|
|
||||||
|
|
||||||
You can then start your registry with a simple
|
|
||||||
|
|
||||||
docker-compose up -d
|
|
||||||
|
|
||||||
## Next
|
|
||||||
|
|
||||||
You will find more specific and advanced informations in the following sections:
|
|
||||||
|
|
||||||
- [Configuration reference](configuration.md)
|
|
||||||
- [Working with notifications](notifications.md)
|
|
||||||
- [Advanced "recipes"](recipes/index.md)
|
|
||||||
- [Registry API](spec/api.md)
|
|
||||||
- [Storage driver model](storage-drivers/index.md)
|
|
||||||
- [Token authentication](spec/auth/token.md)
|
|
||||||
27
vendor/github.com/docker/distribution/docs/deprecated.md
generated
vendored
27
vendor/github.com/docker/distribution/docs/deprecated.md
generated
vendored
@@ -1,27 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "Deprecated Features"
|
|
||||||
description = "describes deprecated functionality"
|
|
||||||
keywords = ["registry, manifest, images, signatures, repository, distribution, digest"]
|
|
||||||
[menu.main]
|
|
||||||
parent="smn_registry_ref"
|
|
||||||
weight=8
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Docker Registry Deprecation
|
|
||||||
|
|
||||||
This document details functionality or components which are deprecated within
|
|
||||||
the registry.
|
|
||||||
|
|
||||||
### v2.5.0
|
|
||||||
|
|
||||||
The signature store has been removed from the registry. Since `v2.4.0` it has
|
|
||||||
been possible to configure the registry to generate manifest signatures rather
|
|
||||||
than load them from storage. In this version of the registry this becomes
|
|
||||||
the default behavior. Signatures which are attached to manifests on put are
|
|
||||||
not stored in the registry. This does not alter the functional behavior of
|
|
||||||
the registry.
|
|
||||||
|
|
||||||
Old signatures blobs can be removed from the registry storage by running the
|
|
||||||
garbage-collect subcommand.
|
|
||||||
137
vendor/github.com/docker/distribution/docs/garbage-collection.md
generated
vendored
137
vendor/github.com/docker/distribution/docs/garbage-collection.md
generated
vendored
@@ -1,137 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "Garbage Collection"
|
|
||||||
description = "High level discussion of garbage collection"
|
|
||||||
keywords = ["registry, garbage, images, tags, repository, distribution"]
|
|
||||||
[menu.main]
|
|
||||||
parent="smn_registry_ref"
|
|
||||||
weight=4
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Garbage Collection
|
|
||||||
|
|
||||||
As of v2.4.0 a garbage collector command is included within the registry binary.
|
|
||||||
This document describes what this command does and how and why it should be used.
|
|
||||||
|
|
||||||
## What is Garbage Collection?
|
|
||||||
|
|
||||||
From [wikipedia](https://en.wikipedia.org/wiki/Garbage_collection_(computer_science)):
|
|
||||||
|
|
||||||
"In computer science, garbage collection (GC) is a form of automatic memory management. The
|
|
||||||
garbage collector, or just collector, attempts to reclaim garbage, or memory occupied by
|
|
||||||
objects that are no longer in use by the program."
|
|
||||||
|
|
||||||
In the context of the Docker registry, garbage collection is the process of
|
|
||||||
removing blobs from the filesystem which are no longer referenced by a
|
|
||||||
manifest. Blobs can include both layers and manifests.
|
|
||||||
|
|
||||||
|
|
||||||
## Why Garbage Collection?
|
|
||||||
|
|
||||||
Registry data can occupy considerable amounts of disk space and freeing up
|
|
||||||
this disk space is an oft-requested feature. Additionally for reasons of security it
|
|
||||||
can be desirable to ensure that certain layers no longer exist on the filesystem.
|
|
||||||
|
|
||||||
|
|
||||||
## Garbage Collection in the Registry
|
|
||||||
|
|
||||||
Filesystem layers are stored by their content address in the Registry. This
|
|
||||||
has many advantages, one of which is that data is stored once and referred to by manifests.
|
|
||||||
See [here](compatibility.md#content-addressable-storage-cas) for more details.
|
|
||||||
|
|
||||||
Layers are therefore shared amongst manifests; each manifest maintains a reference
|
|
||||||
to the layer. As long as a layer is referenced by one manifest, it cannot be garbage
|
|
||||||
collected.
|
|
||||||
|
|
||||||
Manifests and layers can be 'deleted` with the registry API (refer to the API
|
|
||||||
documentation [here](spec/api.md#deleting-a-layer) and
|
|
||||||
[here](spec/api.md#deleting-an-image) for details). This API removes references
|
|
||||||
to the target and makes them eligible for garbage collection. It also makes them
|
|
||||||
unable to be read via the API.
|
|
||||||
|
|
||||||
If a layer is deleted it will be removed from the filesystem when garbage collection
|
|
||||||
is run. If a manifest is deleted the layers to which it refers will be removed from
|
|
||||||
the filesystem if no other manifests refers to them.
|
|
||||||
|
|
||||||
|
|
||||||
### Example
|
|
||||||
|
|
||||||
In this example manifest A references two layers: `a` and `b`. Manifest `B` references
|
|
||||||
layers `a` and `c`. In this state, nothing is eligible for garbage collection:
|
|
||||||
|
|
||||||
```
|
|
||||||
A -----> a <----- B
|
|
||||||
\--> b |
|
|
||||||
c <--/
|
|
||||||
```
|
|
||||||
|
|
||||||
Manifest B is deleted via the API:
|
|
||||||
|
|
||||||
```
|
|
||||||
A -----> a B
|
|
||||||
\--> b
|
|
||||||
c
|
|
||||||
```
|
|
||||||
|
|
||||||
In this state layer `c` no longer has a reference and is eligible for garbage
|
|
||||||
collection. Layer `a` had one reference removed but will not be garbage
|
|
||||||
collected as it is still referenced by manifest `A`. The blob representing
|
|
||||||
manifest `B` will also be eligible for garbage collection.
|
|
||||||
|
|
||||||
After garbage collection has been run manifest `A` and its blobs remain.
|
|
||||||
|
|
||||||
```
|
|
||||||
A -----> a
|
|
||||||
\--> b
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## How Garbage Collection works
|
|
||||||
|
|
||||||
Garbage collection runs in two phases. First, in the 'mark' phase, the process
|
|
||||||
scans all the manifests in the registry. From these manifests, it constructs a
|
|
||||||
set of content address digests. This set is the 'mark set' and denotes the set
|
|
||||||
of blobs to *not* delete. Secondly, in the 'sweep' phase, the process scans all
|
|
||||||
the blobs and if a blob's content address digest is not in the mark set, the
|
|
||||||
process will delete it.
|
|
||||||
|
|
||||||
|
|
||||||
> **NOTE** You should ensure that the registry is in read-only mode or not running at
|
|
||||||
> all. If you were to upload an image while garbage collection is running, there is the
|
|
||||||
> risk that the image's layers will be mistakenly deleted, leading to a corrupted image.
|
|
||||||
|
|
||||||
This type of garbage collection is known as stop-the-world garbage collection. In future
|
|
||||||
registry versions the intention is that garbage collection will be an automated background
|
|
||||||
action and this manual process will no longer apply.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Running garbage collection
|
|
||||||
|
|
||||||
Garbage collection can be run as follows
|
|
||||||
|
|
||||||
`bin/registry garbage-collect [--dry-run] /path/to/config.yml`
|
|
||||||
|
|
||||||
The garbage-collect command accepts a `--dry-run` parameter, which will print the progress
|
|
||||||
of the mark and sweep phases without removing any data. Running with a log leve of `info`
|
|
||||||
will give a clear indication of what will and will not be deleted.
|
|
||||||
|
|
||||||
_Sample output from a dry run garbage collection with registry log level set to `info`_
|
|
||||||
|
|
||||||
```
|
|
||||||
hello-world
|
|
||||||
hello-world: marking manifest sha256:fea8895f450959fa676bcc1df0611ea93823a735a01205fd8622846041d0c7cf
|
|
||||||
hello-world: marking blob sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb
|
|
||||||
hello-world: marking blob sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4
|
|
||||||
hello-world: marking configuration sha256:690ed74de00f99a7d00a98a5ad855ac4febd66412be132438f9b8dbd300a937d
|
|
||||||
ubuntu
|
|
||||||
|
|
||||||
4 blobs marked, 5 blobs eligible for deletion
|
|
||||||
blob eligible for deletion: sha256:28e09fddaacbfc8a13f82871d9d66141a6ed9ca526cb9ed295ef545ab4559b81
|
|
||||||
blob eligible for deletion: sha256:7e15ce58ccb2181a8fced7709e9893206f0937cc9543bc0c8178ea1cf4d7e7b5
|
|
||||||
blob eligible for deletion: sha256:87192bdbe00f8f2a62527f36bb4c7c7f4eaf9307e4b87e8334fb6abec1765bcb
|
|
||||||
blob eligible for deletion: sha256:b549a9959a664038fc35c155a95742cf12297672ca0ae35735ec027d55bf4e97
|
|
||||||
blob eligible for deletion: sha256:f251d679a7c61455f06d793e43c06786d7766c88b8c24edf242b2c08e3c3f599
|
|
||||||
```
|
|
||||||
|
|
||||||
70
vendor/github.com/docker/distribution/docs/glossary.md
generated
vendored
70
vendor/github.com/docker/distribution/docs/glossary.md
generated
vendored
@@ -1,70 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
draft = true
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Glossary
|
|
||||||
|
|
||||||
This page contains definitions for distribution related terms.
|
|
||||||
|
|
||||||
<dl>
|
|
||||||
<dt id="blob"><h4>Blob</h4></dt>
|
|
||||||
<dd>
|
|
||||||
<blockquote>A blob is any kind of content that is stored by a Registry under a content-addressable identifier (a "digest").</blockquote>
|
|
||||||
<p>
|
|
||||||
<a href="#layer">Layers</a> are a good example of "blobs".
|
|
||||||
</p>
|
|
||||||
</dd>
|
|
||||||
|
|
||||||
<dt id="image"><h4>Image</h4></dt>
|
|
||||||
<dd>
|
|
||||||
<blockquote>An image is a named set of immutable data from which a Docker container can be created.</blockquote>
|
|
||||||
<p>
|
|
||||||
An image is represented by a json file called a <a href="#manifest">manifest</a>, and is conceptually a set of <a hred="#layer">layers</a>.
|
|
||||||
|
|
||||||
Image names indicate the location where they can be pulled from and pushed to, as they usually start with a <a href="#registry">registry</a> domain name and port.
|
|
||||||
|
|
||||||
</p>
|
|
||||||
</dd>
|
|
||||||
|
|
||||||
<dt id="layer"><h4>Layer</h4></dt>
|
|
||||||
<dd>
|
|
||||||
<blockquote>A layer is a tar archive bundling partial content from a filesystem.</blockquote>
|
|
||||||
<p>
|
|
||||||
Layers from an <a href="#image">image</a> are usually extracted in order on top of each other to make up a root filesystem from which containers run out.
|
|
||||||
</p>
|
|
||||||
</dd>
|
|
||||||
|
|
||||||
<dt id="manifest"><h4>Manifest</h4></dt>
|
|
||||||
<dd><blockquote>A manifest is the JSON representation of an image.</blockquote></dd>
|
|
||||||
|
|
||||||
<dt id="namespace"><h4>Namespace</h4></dt>
|
|
||||||
<dd><blockquote>A namespace is a collection of repositories with a common name prefix.</blockquote>
|
|
||||||
<p>
|
|
||||||
The namespace with an empty prefix is considered the Global Namespace.
|
|
||||||
</p>
|
|
||||||
</dd>
|
|
||||||
|
|
||||||
<dt id="registry"><h4>Registry</h4></dt>
|
|
||||||
<dd><blockquote>A registry is a service that let you store and deliver <a href="#images">images</a>.</blockquote>
|
|
||||||
</dd>
|
|
||||||
|
|
||||||
<dt id="registry"><h4>Repository</h4></dt>
|
|
||||||
<dd>
|
|
||||||
<blockquote>A repository is a set of data containing all versions of a given image.</blockquote>
|
|
||||||
</dd>
|
|
||||||
|
|
||||||
<dt id="scope"><h4>Scope</h4></dt>
|
|
||||||
<dd><blockquote>A scope is the portion of a namespace onto which a given authorization token is granted.</blockquote></dd>
|
|
||||||
|
|
||||||
<dt id="tag"><h4>Tag</h4></dt>
|
|
||||||
<dd><blockquote>A tag is conceptually a "version" of a <a href="#image">named image</a>.</blockquote>
|
|
||||||
<p>
|
|
||||||
Example: `docker pull myimage:latest` instructs docker to pull the image "myimage" in version "latest".
|
|
||||||
</p>
|
|
||||||
|
|
||||||
</dd>
|
|
||||||
|
|
||||||
|
|
||||||
</dl>
|
|
||||||
24
vendor/github.com/docker/distribution/docs/help.md
generated
vendored
24
vendor/github.com/docker/distribution/docs/help.md
generated
vendored
@@ -1,24 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "Getting help"
|
|
||||||
description = "Getting help with the Registry"
|
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution, help, 101, TL;DR"]
|
|
||||||
[menu.main]
|
|
||||||
parent="smn_registry"
|
|
||||||
weight=9
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Getting help
|
|
||||||
|
|
||||||
If you need help, or just want to chat, you can reach us:
|
|
||||||
|
|
||||||
- on irc: `#docker-distribution` on freenode
|
|
||||||
- on the [mailing list](https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution) (mail at <distribution@dockerproject.org>)
|
|
||||||
|
|
||||||
If you want to report a bug:
|
|
||||||
|
|
||||||
- be sure to first read about [how to contribute](https://github.com/docker/distribution/blob/master/CONTRIBUTING.md)
|
|
||||||
- you can then do so on the [GitHub project bugtracker](https://github.com/docker/distribution/issues)
|
|
||||||
|
|
||||||
You can also find out more about the Docker's project [Getting Help resources](/opensource/get-help.md).
|
|
||||||
1
vendor/github.com/docker/distribution/docs/images/notifications.gliffy
generated
vendored
1
vendor/github.com/docker/distribution/docs/images/notifications.gliffy
generated
vendored
File diff suppressed because one or more lines are too long
BIN
vendor/github.com/docker/distribution/docs/images/notifications.png
generated
vendored
BIN
vendor/github.com/docker/distribution/docs/images/notifications.png
generated
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 37 KiB |
1
vendor/github.com/docker/distribution/docs/images/notifications.svg
generated
vendored
1
vendor/github.com/docker/distribution/docs/images/notifications.svg
generated
vendored
File diff suppressed because one or more lines are too long
|
Before Width: | Height: | Size: 31 KiB |
BIN
vendor/github.com/docker/distribution/docs/images/v2-registry-auth.png
generated
vendored
BIN
vendor/github.com/docker/distribution/docs/images/v2-registry-auth.png
generated
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 12 KiB |
67
vendor/github.com/docker/distribution/docs/index.md
generated
vendored
67
vendor/github.com/docker/distribution/docs/index.md
generated
vendored
@@ -1,67 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "Registry Overview"
|
|
||||||
description = "High-level overview of the Registry"
|
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution"]
|
|
||||||
aliases = ["/registry/overview/"]
|
|
||||||
[menu.main]
|
|
||||||
parent="smn_registry"
|
|
||||||
weight=1
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Docker Registry
|
|
||||||
|
|
||||||
## What it is
|
|
||||||
|
|
||||||
The Registry is a stateless, highly scalable server side application that stores and lets you distribute Docker images.
|
|
||||||
The Registry is open-source, under the permissive [Apache license](http://en.wikipedia.org/wiki/Apache_License).
|
|
||||||
|
|
||||||
## Why use it
|
|
||||||
|
|
||||||
You should use the Registry if you want to:
|
|
||||||
|
|
||||||
* tightly control where your images are being stored
|
|
||||||
* fully own your images distribution pipeline
|
|
||||||
* integrate image storage and distribution tightly into your in-house development workflow
|
|
||||||
|
|
||||||
## Alternatives
|
|
||||||
|
|
||||||
Users looking for a zero maintenance, ready-to-go solution are encouraged to head-over to the [Docker Hub](https://hub.docker.com), which provides a free-to-use, hosted Registry, plus additional features (organization accounts, automated builds, and more).
|
|
||||||
|
|
||||||
Users looking for a commercially supported version of the Registry should look into [Docker Trusted Registry](https://docs.docker.com/docker-trusted-registry/overview/).
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
|
|
||||||
The Registry is compatible with Docker engine **version 1.6.0 or higher**.
|
|
||||||
If you really need to work with older Docker versions, you should look into the [old python registry](https://github.com/docker/docker-registry).
|
|
||||||
|
|
||||||
## TL;DR
|
|
||||||
|
|
||||||
Start your registry
|
|
||||||
|
|
||||||
docker run -d -p 5000:5000 --name registry registry:2
|
|
||||||
|
|
||||||
Pull (or build) some image from the hub
|
|
||||||
|
|
||||||
docker pull ubuntu
|
|
||||||
|
|
||||||
Tag the image so that it points to your registry
|
|
||||||
|
|
||||||
docker tag ubuntu localhost:5000/myfirstimage
|
|
||||||
|
|
||||||
Push it
|
|
||||||
|
|
||||||
docker push localhost:5000/myfirstimage
|
|
||||||
|
|
||||||
Pull it back
|
|
||||||
|
|
||||||
docker pull localhost:5000/myfirstimage
|
|
||||||
|
|
||||||
Now stop your registry and remove all data
|
|
||||||
|
|
||||||
docker stop registry && docker rm -v registry
|
|
||||||
|
|
||||||
## Next
|
|
||||||
|
|
||||||
You should now read the [detailed introduction about the registry](introduction.md), or jump directly to [deployment instructions](deploying.md).
|
|
||||||
116
vendor/github.com/docker/distribution/docs/insecure.md
generated
vendored
116
vendor/github.com/docker/distribution/docs/insecure.md
generated
vendored
@@ -1,116 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "Testing an insecure registry"
|
|
||||||
description = "Deploying a Registry in an insecure fashion"
|
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution, insecure"]
|
|
||||||
[menu.main]
|
|
||||||
parent="smn_registry_ref"
|
|
||||||
weight=5
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Insecure Registry
|
|
||||||
|
|
||||||
While it's highly recommended to secure your registry using a TLS certificate
|
|
||||||
issued by a known CA, you may alternatively decide to use self-signed
|
|
||||||
certificates, or even use your registry over plain http.
|
|
||||||
|
|
||||||
You have to understand the downsides in doing so, and the extra burden in
|
|
||||||
configuration.
|
|
||||||
|
|
||||||
## Deploying a plain HTTP registry
|
|
||||||
|
|
||||||
> **Warning**: it's not possible to use an insecure registry with basic authentication.
|
|
||||||
|
|
||||||
This basically tells Docker to entirely disregard security for your registry.
|
|
||||||
While this is relatively easy to configure the daemon in this way, it is
|
|
||||||
**very** insecure. It does expose your registry to trivial MITM. Only use this
|
|
||||||
solution for isolated testing or in a tightly controlled, air-gapped
|
|
||||||
environment.
|
|
||||||
|
|
||||||
1. Open the `/etc/default/docker` file or `/etc/sysconfig/docker` for editing.
|
|
||||||
|
|
||||||
Depending on your operating system, your Engine daemon start options.
|
|
||||||
|
|
||||||
2. Edit (or add) the `DOCKER_OPTS` line and add the `--insecure-registry` flag.
|
|
||||||
|
|
||||||
This flag takes the URL of your registry, for example.
|
|
||||||
|
|
||||||
`DOCKER_OPTS="--insecure-registry myregistrydomain.com:5000"`
|
|
||||||
|
|
||||||
3. Close and save the configuration file.
|
|
||||||
|
|
||||||
4. Restart your Docker daemon
|
|
||||||
|
|
||||||
The command you use to restart the daemon depends on your operating system.
|
|
||||||
For example, on Ubuntu, this is usually the `service docker stop` and `service
|
|
||||||
docker start` command.
|
|
||||||
|
|
||||||
5. Repeat this configuration on every Engine host that wants to access your registry.
|
|
||||||
|
|
||||||
|
|
||||||
## Using self-signed certificates
|
|
||||||
|
|
||||||
> **Warning**: using this along with basic authentication requires to **also** trust the certificate into the OS cert store for some versions of docker (see below)
|
|
||||||
|
|
||||||
This is more secure than the insecure registry solution. You must configure every docker daemon that wants to access your registry
|
|
||||||
|
|
||||||
1. Generate your own certificate:
|
|
||||||
|
|
||||||
```
|
|
||||||
mkdir -p certs && openssl req \
|
|
||||||
-newkey rsa:4096 -nodes -sha256 -keyout certs/domain.key \
|
|
||||||
-x509 -days 365 -out certs/domain.crt
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Be sure to use the name `myregistrydomain.com` as a CN.
|
|
||||||
|
|
||||||
3. Use the result to [start your registry with TLS enabled](./deploying.md#get-a-certificate)
|
|
||||||
|
|
||||||
4. Instruct every docker daemon to trust that certificate.
|
|
||||||
|
|
||||||
This is done by copying the `domain.crt` file to `/etc/docker/certs.d/myregistrydomain.com:5000/ca.crt`.
|
|
||||||
|
|
||||||
5. Don't forget to restart the Engine daemon.
|
|
||||||
|
|
||||||
## Troubleshooting insecure registry
|
|
||||||
|
|
||||||
This sections lists some common failures and how to recover from them.
|
|
||||||
|
|
||||||
### Failing...
|
|
||||||
|
|
||||||
Failing to configure the Engine daemon and trying to pull from a registry that is not using
|
|
||||||
TLS will results in the following message:
|
|
||||||
|
|
||||||
```
|
|
||||||
FATA[0000] Error response from daemon: v1 ping attempt failed with error:
|
|
||||||
Get https://myregistrydomain.com:5000/v1/_ping: tls: oversized record received with length 20527.
|
|
||||||
If this private registry supports only HTTP or HTTPS with an unknown CA certificate,please add
|
|
||||||
`--insecure-registry myregistrydomain.com:5000` to the daemon's arguments.
|
|
||||||
In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag;
|
|
||||||
simply place the CA certificate at /etc/docker/certs.d/myregistrydomain.com:5000/ca.crt
|
|
||||||
```
|
|
||||||
|
|
||||||
### Docker still complains about the certificate when using authentication?
|
|
||||||
|
|
||||||
When using authentication, some versions of docker also require you to trust the certificate at the OS level. Usually, on Ubuntu this is done with:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ cp certs/domain.crt /usr/local/share/ca-certificates/myregistrydomain.com.crt
|
|
||||||
update-ca-certificates
|
|
||||||
```
|
|
||||||
|
|
||||||
... and on Red Hat (and its derivatives) with:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cp certs/domain.crt /etc/pki/ca-trust/source/anchors/myregistrydomain.com.crt
|
|
||||||
update-ca-trust
|
|
||||||
```
|
|
||||||
|
|
||||||
... On some distributions, e.g. Oracle Linux 6, the Shared System Certificates feature needs to be manually enabled:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ update-ca-trust enable
|
|
||||||
```
|
|
||||||
|
|
||||||
Now restart docker (`service docker stop && service docker start`, or any other way you use to restart docker).
|
|
||||||
55
vendor/github.com/docker/distribution/docs/introduction.md
generated
vendored
55
vendor/github.com/docker/distribution/docs/introduction.md
generated
vendored
@@ -1,55 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "Understanding the Registry"
|
|
||||||
description = "Explains what the Registry is, basic use cases and requirements"
|
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution, use cases, requirements"]
|
|
||||||
[menu.main]
|
|
||||||
parent="smn_registry"
|
|
||||||
weight=2
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Understanding the Registry
|
|
||||||
|
|
||||||
A registry is a storage and content delivery system, holding named Docker images, available in different tagged versions.
|
|
||||||
|
|
||||||
> Example: the image `distribution/registry`, with tags `2.0` and `2.1`.
|
|
||||||
|
|
||||||
Users interact with a registry by using docker push and pull commands.
|
|
||||||
|
|
||||||
> Example: `docker pull registry-1.docker.io/distribution/registry:2.1`.
|
|
||||||
|
|
||||||
Storage itself is delegated to drivers. The default storage driver is the local posix filesystem, which is suitable for development or small deployments. Additional cloud-based storage drivers like S3, Microsoft Azure, OpenStack Swift and Aliyun OSS are also supported. People looking into using other storage backends may do so by writing their own driver implementing the [Storage API](storage-drivers/index.md).
|
|
||||||
|
|
||||||
Since securing access to your hosted images is paramount, the Registry natively supports TLS and basic authentication.
|
|
||||||
|
|
||||||
The Registry GitHub repository includes additional information about advanced authentication and authorization methods. Only very large or public deployments are expected to extend the Registry in this way.
|
|
||||||
|
|
||||||
Finally, the Registry ships with a robust [notification system](notifications.md), calling webhooks in response to activity, and both extensive logging and reporting, mostly useful for large installations that want to collect metrics.
|
|
||||||
|
|
||||||
## Understanding image naming
|
|
||||||
|
|
||||||
Image names as used in typical docker commands reflect their origin:
|
|
||||||
|
|
||||||
* `docker pull ubuntu` instructs docker to pull an image named `ubuntu` from the official Docker Hub. This is simply a shortcut for the longer `docker pull docker.io/library/ubuntu` command
|
|
||||||
* `docker pull myregistrydomain:port/foo/bar` instructs docker to contact the registry located at `myregistrydomain:port` to find the image `foo/bar`
|
|
||||||
|
|
||||||
You can find out more about the various Docker commands dealing with images in the [official Docker engine documentation](/engine/reference/commandline/cli.md).
|
|
||||||
|
|
||||||
## Use cases
|
|
||||||
|
|
||||||
Running your own Registry is a great solution to integrate with and complement your CI/CD system. In a typical workflow, a commit to your source revision control system would trigger a build on your CI system, which would then push a new image to your Registry if the build is successful. A notification from the Registry would then trigger a deployment on a staging environment, or notify other systems that a new image is available.
|
|
||||||
|
|
||||||
It's also an essential component if you want to quickly deploy a new image over a large cluster of machines.
|
|
||||||
|
|
||||||
Finally, it's the best way to distribute images inside an isolated network.
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
|
|
||||||
You absolutely need to be familiar with Docker, specifically with regard to pushing and pulling images. You must understand the difference between the daemon and the cli, and at least grasp basic concepts about networking.
|
|
||||||
|
|
||||||
Also, while just starting a registry is fairly easy, operating it in a production environment requires operational skills, just like any other service. You are expected to be familiar with systems availability and scalability, logging and log processing, systems monitoring, and security 101. Strong understanding of http and overall network communications, plus familiarity with golang are certainly useful as well for advanced operations or hacking.
|
|
||||||
|
|
||||||
## Next
|
|
||||||
|
|
||||||
Dive into [deploying your registry](deploying.md)
|
|
||||||
23
vendor/github.com/docker/distribution/docs/menu.md
generated
vendored
23
vendor/github.com/docker/distribution/docs/menu.md
generated
vendored
@@ -1,23 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "Docker Registry"
|
|
||||||
description = "High-level overview of the Registry"
|
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution"]
|
|
||||||
type = "menu"
|
|
||||||
[menu.main]
|
|
||||||
identifier="smn_registry"
|
|
||||||
parent="mn_components"
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Overview of Docker Registry Documentation
|
|
||||||
|
|
||||||
The Docker Registry documentation includes the following topics:
|
|
||||||
|
|
||||||
* [Docker Registry Introduction](index.md)
|
|
||||||
* [Understanding the Registry](introduction.md)
|
|
||||||
* [Deploying a registry server](deploying.md)
|
|
||||||
* [Registry Configuration Reference](configuration.md)
|
|
||||||
* [Notifications](notifications.md)
|
|
||||||
* [Recipes](recipes/index.md)
|
|
||||||
* [Getting help](help.md)
|
|
||||||
30
vendor/github.com/docker/distribution/docs/migration.md
generated
vendored
30
vendor/github.com/docker/distribution/docs/migration.md
generated
vendored
@@ -1,30 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
draft = true
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Migrating a 1.0 registry to 2.0
|
|
||||||
|
|
||||||
TODO: This needs to be revised in light of Olivier's work
|
|
||||||
|
|
||||||
A few thoughts here:
|
|
||||||
|
|
||||||
There was no "1.0". There was an implementation of the Registry API V1 but only a version 0.9 of the service was released.
|
|
||||||
The image formats are not compatible in any way. One must convert v1 images to v2 images using a docker client or other tool.
|
|
||||||
One can migrate images from one version to the other by pulling images from the old registry and pushing them to the v2 registry.
|
|
||||||
|
|
||||||
-----
|
|
||||||
|
|
||||||
The Docker Registry 2.0 is backward compatible with images created by the earlier specification. If you are migrating a private registry to version 2.0, you should use the following process:
|
|
||||||
|
|
||||||
1. Configure and test a 2.0 registry image in a sandbox environment.
|
|
||||||
|
|
||||||
2. Back up up your production image storage.
|
|
||||||
|
|
||||||
Your production image storage should reside on a volume or storage backend.
|
|
||||||
Make sure you have a backup of its contents.
|
|
||||||
|
|
||||||
3. Stop your existing registry service.
|
|
||||||
|
|
||||||
4. Restart your registry with your tested 2.0 image.
|
|
||||||
350
vendor/github.com/docker/distribution/docs/notifications.md
generated
vendored
350
vendor/github.com/docker/distribution/docs/notifications.md
generated
vendored
@@ -1,350 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "Working with notifications"
|
|
||||||
description = "Explains how to work with registry notifications"
|
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution, notifications, advanced"]
|
|
||||||
[menu.main]
|
|
||||||
parent="smn_registry"
|
|
||||||
weight=5
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Notifications
|
|
||||||
|
|
||||||
The Registry supports sending webhook notifications in response to events
|
|
||||||
happening within the registry. Notifications are sent in response to manifest
|
|
||||||
pushes and pulls and layer pushes and pulls. These actions are serialized into
|
|
||||||
events. The events are queued into a registry-internal broadcast system which
|
|
||||||
queues and dispatches events to [_Endpoints_](#endpoints).
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
## Endpoints
|
|
||||||
|
|
||||||
Notifications are sent to _endpoints_ via HTTP requests. Each configured
|
|
||||||
endpoint has isolated queues, retry configuration and http targets within each
|
|
||||||
instance of a registry. When an action happens within the registry, it is
|
|
||||||
converted into an event which is dropped into an inmemory queue. When the
|
|
||||||
event reaches the end of the queue, an http request is made to the endpoint
|
|
||||||
until the request succeeds. The events are sent serially to each endpoint but
|
|
||||||
order is not guaranteed.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
To setup a registry instance to send notifications to endpoints, one must add
|
|
||||||
them to the configuration. A simple example follows:
|
|
||||||
|
|
||||||
notifications:
|
|
||||||
endpoints:
|
|
||||||
- name: alistener
|
|
||||||
url: https://mylistener.example.com/event
|
|
||||||
headers:
|
|
||||||
Authorization: [Bearer <your token, if needed>]
|
|
||||||
timeout: 500ms
|
|
||||||
threshold: 5
|
|
||||||
backoff: 1s
|
|
||||||
|
|
||||||
The above would configure the registry with an endpoint to send events to
|
|
||||||
`https://mylistener.example.com/event`, with the header "Authorization: Bearer
|
|
||||||
<your token, if needed>". The request would timeout after 500 milliseconds. If
|
|
||||||
5 failures happen consecutively, the registry will backoff for 1 second before
|
|
||||||
trying again.
|
|
||||||
|
|
||||||
For details on the fields, please see the [configuration documentation](configuration.md#notifications).
|
|
||||||
|
|
||||||
A properly configured endpoint should lead to a log message from the registry
|
|
||||||
upon startup:
|
|
||||||
|
|
||||||
```
|
|
||||||
INFO[0000] configuring endpoint alistener (https://mylistener.example.com/event), timeout=500ms, headers=map[Authorization:[Bearer <your token if needed>]] app.id=812bfeb2-62d6-43cf-b0c6-152f541618a3 environment=development service=registry
|
|
||||||
```
|
|
||||||
|
|
||||||
## Events
|
|
||||||
|
|
||||||
Events have a well-defined JSON structure and are sent as the body of
|
|
||||||
notification requests. One or more events are sent in a structure called an
|
|
||||||
envelope. Each event has a unique id that can be used to uniquely identify incoming
|
|
||||||
requests, if required. Along with that, an _action_ is provided with a
|
|
||||||
_target_, identifying the object mutated during the event.
|
|
||||||
|
|
||||||
The fields available in an `event` are described below.
|
|
||||||
|
|
||||||
Field | Type | Description
|
|
||||||
----- | ----- | -------------
|
|
||||||
id | string |ID provides a unique identifier for the event.
|
|
||||||
timestamp | Time | Timestamp is the time at which the event occurred.
|
|
||||||
action | string | Action indicates what action encompasses the provided event.
|
|
||||||
target | distribution.Descriptor | Target uniquely describes the target of the event.
|
|
||||||
length | int | Length in bytes of content. Same as Size field in Descriptor.
|
|
||||||
repository | string | Repository identifies the named repository.
|
|
||||||
fromRepository | string | FromRepository identifies the named repository which a blob was mounted from if appropriate.
|
|
||||||
url | string | URL provides a direct link to the content.
|
|
||||||
tag | string | Tag identifies a tag name in tag events
|
|
||||||
request | [RequestRecord](https://godoc.org/github.com/docker/distribution/notifications#RequestRecord) | Request covers the request that generated the event.
|
|
||||||
actor | [ActorRecord](https://godoc.org/github.com/docker/distribution/notifications#ActorRecord). | Actor specifies the agent that initiated the event. For most situations, this could be from the authorization context of the request.
|
|
||||||
source | [SourceRecord](https://godoc.org/github.com/docker/distribution/notifications#SourceRecord) | Source identifies the registry node that generated the event. Put differently, while the actor "initiates" the event, the source "generates" it.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
The following is an example of a JSON event, sent in response to the push of a
|
|
||||||
manifest:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"events": [
|
|
||||||
{
|
|
||||||
"id": "320678d8-ca14-430f-8bb6-4ca139cd83f7",
|
|
||||||
"timestamp": "2016-03-09T14:44:26.402973972-08:00",
|
|
||||||
"action": "pull",
|
|
||||||
"target": {
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"size": 708,
|
|
||||||
"digest": "sha256:fea8895f450959fa676bcc1df0611ea93823a735a01205fd8622846041d0c7cf",
|
|
||||||
"length": 708,
|
|
||||||
"repository": "hello-world",
|
|
||||||
"url": "http://192.168.100.227:5000/v2/hello-world/manifests/sha256:fea8895f450959fa676bcc1df0611ea93823a735a01205fd8622846041d0c7cf",
|
|
||||||
"tag": "latest"
|
|
||||||
},
|
|
||||||
"request": {
|
|
||||||
"id": "6df24a34-0959-4923-81ca-14f09767db19",
|
|
||||||
"addr": "192.168.64.11:42961",
|
|
||||||
"host": "192.168.100.227:5000",
|
|
||||||
"method": "GET",
|
|
||||||
"useragent": "curl/7.38.0"
|
|
||||||
},
|
|
||||||
"actor": {},
|
|
||||||
"source": {
|
|
||||||
"addr": "xtal.local:5000",
|
|
||||||
"instanceID": "a53db899-3b4b-4a62-a067-8dd013beaca4"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
The target struct of events which are sent when manifests and blobs are deleted
|
|
||||||
will contain a subset of the data contained in Get and Put events. Specifically,
|
|
||||||
only the digest and repository will be sent.
|
|
||||||
|
|
||||||
```json
|
|
||||||
"target": {
|
|
||||||
"digest": "sha256:d89e1bee20d9cb344674e213b581f14fbd8e70274ecf9d10c514bab78a307845",
|
|
||||||
"repository": "library/test"
|
|
||||||
},
|
|
||||||
```
|
|
||||||
|
|
||||||
> __NOTE:__ As of version 2.1, the `length` field for event targets
|
|
||||||
> is being deprecated for the `size` field, bringing the target in line with
|
|
||||||
> common nomenclature. Both will continue to be set for the foreseeable
|
|
||||||
> future. Newer code should favor `size` but accept either.
|
|
||||||
|
|
||||||
## Envelope
|
|
||||||
|
|
||||||
The envelope contains one or more events, with the following json structure:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"events": [ ... ],
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
While events may be sent in the same envelope, the set of events within that
|
|
||||||
envelope have no implied relationship. For example, the registry may choose to
|
|
||||||
group unrelated events and send them in the same envelope to reduce the total
|
|
||||||
number of requests.
|
|
||||||
|
|
||||||
The full package has the mediatype
|
|
||||||
"application/vnd.docker.distribution.events.v1+json", which will be set on the
|
|
||||||
request coming to an endpoint.
|
|
||||||
|
|
||||||
An example of a full event may look as follows:
|
|
||||||
|
|
||||||
```json
|
|
||||||
GET /callback
|
|
||||||
Host: application/vnd.docker.distribution.events.v1+json
|
|
||||||
Authorization: Bearer <your token, if needed>
|
|
||||||
Content-Type: application/vnd.docker.distribution.events.v1+json
|
|
||||||
|
|
||||||
{
|
|
||||||
"events": [
|
|
||||||
{
|
|
||||||
"id": "asdf-asdf-asdf-asdf-0",
|
|
||||||
"timestamp": "2006-01-02T15:04:05Z",
|
|
||||||
"action": "push",
|
|
||||||
"target": {
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v1+json",
|
|
||||||
"length": 1,
|
|
||||||
"digest": "sha256:fea8895f450959fa676bcc1df0611ea93823a735a01205fd8622846041d0c7cf",
|
|
||||||
"repository": "library/test",
|
|
||||||
"url": "http://example.com/v2/library/test/manifests/sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"
|
|
||||||
},
|
|
||||||
"request": {
|
|
||||||
"id": "asdfasdf",
|
|
||||||
"addr": "client.local",
|
|
||||||
"host": "registrycluster.local",
|
|
||||||
"method": "PUT",
|
|
||||||
"useragent": "test/0.1"
|
|
||||||
},
|
|
||||||
"actor": {
|
|
||||||
"name": "test-actor"
|
|
||||||
},
|
|
||||||
"source": {
|
|
||||||
"addr": "hostname.local:port"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "asdf-asdf-asdf-asdf-1",
|
|
||||||
"timestamp": "2006-01-02T15:04:05Z",
|
|
||||||
"action": "push",
|
|
||||||
"target": {
|
|
||||||
"mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar",
|
|
||||||
"length": 2,
|
|
||||||
"digest": "sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5",
|
|
||||||
"repository": "library/test",
|
|
||||||
"url": "http://example.com/v2/library/test/blobs/sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"
|
|
||||||
},
|
|
||||||
"request": {
|
|
||||||
"id": "asdfasdf",
|
|
||||||
"addr": "client.local",
|
|
||||||
"host": "registrycluster.local",
|
|
||||||
"method": "PUT",
|
|
||||||
"useragent": "test/0.1"
|
|
||||||
},
|
|
||||||
"actor": {
|
|
||||||
"name": "test-actor"
|
|
||||||
},
|
|
||||||
"source": {
|
|
||||||
"addr": "hostname.local:port"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "asdf-asdf-asdf-asdf-2",
|
|
||||||
"timestamp": "2006-01-02T15:04:05Z",
|
|
||||||
"action": "push",
|
|
||||||
"target": {
|
|
||||||
"mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar",
|
|
||||||
"length": 3,
|
|
||||||
"digest": "sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5",
|
|
||||||
"repository": "library/test",
|
|
||||||
"url": "http://example.com/v2/library/test/blobs/sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"
|
|
||||||
},
|
|
||||||
"request": {
|
|
||||||
"id": "asdfasdf",
|
|
||||||
"addr": "client.local",
|
|
||||||
"host": "registrycluster.local",
|
|
||||||
"method": "PUT",
|
|
||||||
"useragent": "test/0.1"
|
|
||||||
},
|
|
||||||
"actor": {
|
|
||||||
"name": "test-actor"
|
|
||||||
},
|
|
||||||
"source": {
|
|
||||||
"addr": "hostname.local:port"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Responses
|
|
||||||
|
|
||||||
The registry is fairly accepting of the response codes from endpoints. If an
|
|
||||||
endpoint responds with any 2xx or 3xx response code (after following
|
|
||||||
redirects), the message will be considered delivered and discarded.
|
|
||||||
|
|
||||||
In turn, it is recommended that endpoints are accepting of incoming responses,
|
|
||||||
as well. While the format of event envelopes are standardized by media type,
|
|
||||||
any "pickyness" about validation may cause the queue to backup on the
|
|
||||||
registry.
|
|
||||||
|
|
||||||
## Monitoring
|
|
||||||
|
|
||||||
The state of the endpoints are reported via the debug/vars http interface,
|
|
||||||
usually configured to `http://localhost:5001/debug/vars`. Information such as
|
|
||||||
configuration and metrics are available by endpoint.
|
|
||||||
|
|
||||||
The following provides an example of a few endpoints that have experienced
|
|
||||||
several failures and have since recovered:
|
|
||||||
|
|
||||||
```json
|
|
||||||
"notifications":{
|
|
||||||
"endpoints":[
|
|
||||||
{
|
|
||||||
"name":"local-5003",
|
|
||||||
"url":"http://localhost:5003/callback",
|
|
||||||
"Headers":{
|
|
||||||
"Authorization":[
|
|
||||||
"Bearer \u003can example token\u003e"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Timeout":1000000000,
|
|
||||||
"Threshold":10,
|
|
||||||
"Backoff":1000000000,
|
|
||||||
"Metrics":{
|
|
||||||
"Pending":76,
|
|
||||||
"Events":76,
|
|
||||||
"Successes":0,
|
|
||||||
"Failures":0,
|
|
||||||
"Errors":46,
|
|
||||||
"Statuses":{
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name":"local-8083",
|
|
||||||
"url":"http://localhost:8083/callback",
|
|
||||||
"Headers":null,
|
|
||||||
"Timeout":1000000000,
|
|
||||||
"Threshold":10,
|
|
||||||
"Backoff":1000000000,
|
|
||||||
"Metrics":{
|
|
||||||
"Pending":0,
|
|
||||||
"Events":76,
|
|
||||||
"Successes":76,
|
|
||||||
"Failures":0,
|
|
||||||
"Errors":28,
|
|
||||||
"Statuses":{
|
|
||||||
"202 Accepted":76
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
If using notification as part of a larger application, it is _critical_ to
|
|
||||||
monitor the size ("Pending" above) of the endpoint queues. If failures or
|
|
||||||
queue sizes are increasing, it can indicate a larger problem.
|
|
||||||
|
|
||||||
The logs are also a valuable resource for monitoring problems. A failing
|
|
||||||
endpoint will lead to messages similar to the following:
|
|
||||||
|
|
||||||
```
|
|
||||||
ERRO[0340] retryingsink: error writing events: httpSink{http://localhost:5003/callback}: error posting: Post http://localhost:5003/callback: dial tcp 127.0.0.1:5003: connection refused, retrying
|
|
||||||
WARN[0340] httpSink{http://localhost:5003/callback} encountered too many errors, backing off
|
|
||||||
```
|
|
||||||
|
|
||||||
The above indicates that several errors have led to a backoff and the registry
|
|
||||||
will wait before retrying.
|
|
||||||
|
|
||||||
## Considerations
|
|
||||||
|
|
||||||
Currently, the queues are inmemory, so endpoints should be _reasonably
|
|
||||||
reliable_. They are designed to make a best-effort to send the messages but if
|
|
||||||
an instance is lost, messages may be dropped. If an endpoint goes down, care
|
|
||||||
should be taken to ensure that the registry instance is not terminated before
|
|
||||||
the endpoint comes back up or messages will be lost.
|
|
||||||
|
|
||||||
This can be mitigated by running endpoints in close proximity to the registry
|
|
||||||
instances. One could run an endpoint that pages to disk and then forwards a
|
|
||||||
request to provide better durability.
|
|
||||||
|
|
||||||
The notification system is designed around a series of interchangeable _sinks_
|
|
||||||
which can be wired up to achieve interesting behavior. If this system doesn't
|
|
||||||
provide acceptable guarantees, adding a transactional `Sink` to the registry
|
|
||||||
is a possibility, although it may have an effect on request service time.
|
|
||||||
Please see the
|
|
||||||
[godoc](http://godoc.org/github.com/docker/distribution/notifications#Sink)
|
|
||||||
for more information.
|
|
||||||
215
vendor/github.com/docker/distribution/docs/recipes/apache.md
generated
vendored
215
vendor/github.com/docker/distribution/docs/recipes/apache.md
generated
vendored
@@ -1,215 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "Authenticating proxy with apache"
|
|
||||||
description = "Restricting access to your registry using an apache proxy"
|
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution, authentication, proxy, apache, httpd, TLS, recipe, advanced"]
|
|
||||||
[menu.main]
|
|
||||||
parent="smn_recipes"
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Authenticating proxy with apache
|
|
||||||
|
|
||||||
## Use-case
|
|
||||||
|
|
||||||
People already relying on an apache proxy to authenticate their users to other services might want to leverage it and have Registry communications tunneled through the same pipeline.
|
|
||||||
|
|
||||||
Usually, that includes enterprise setups using LDAP/AD on the backend and a SSO mechanism fronting their internal http portal.
|
|
||||||
|
|
||||||
### Alternatives
|
|
||||||
|
|
||||||
If you just want authentication for your registry, and are happy maintaining users access separately, you should really consider sticking with the native [basic auth registry feature](../deploying.md#native-basic-auth).
|
|
||||||
|
|
||||||
### Solution
|
|
||||||
|
|
||||||
With the method presented here, you implement basic authentication for docker engines in a reverse proxy that sits in front of your registry.
|
|
||||||
|
|
||||||
While we use a simple htpasswd file as an example, any other apache authentication backend should be fairly easy to implement once you are done with the example.
|
|
||||||
|
|
||||||
We also implement push restriction (to a limited user group) for the sake of the example. Again, you should modify this to fit your mileage.
|
|
||||||
|
|
||||||
### Gotchas
|
|
||||||
|
|
||||||
While this model gives you the ability to use whatever authentication backend you want through the secondary authentication mechanism implemented inside your proxy, it also requires that you move TLS termination from the Registry to the proxy itself.
|
|
||||||
|
|
||||||
Furthermore, introducing an extra http layer in your communication pipeline will make it more complex to deploy, maintain, and debug, and will possibly create issues.
|
|
||||||
|
|
||||||
## Setting things up
|
|
||||||
|
|
||||||
Read again [the requirements](index.md#requirements).
|
|
||||||
|
|
||||||
Ready?
|
|
||||||
|
|
||||||
Run the following script:
|
|
||||||
|
|
||||||
```
|
|
||||||
mkdir -p auth
|
|
||||||
mkdir -p data
|
|
||||||
|
|
||||||
# This is the main apache configuration you will use
|
|
||||||
cat <<EOF > auth/httpd.conf
|
|
||||||
LoadModule headers_module modules/mod_headers.so
|
|
||||||
|
|
||||||
LoadModule authn_file_module modules/mod_authn_file.so
|
|
||||||
LoadModule authn_core_module modules/mod_authn_core.so
|
|
||||||
LoadModule authz_groupfile_module modules/mod_authz_groupfile.so
|
|
||||||
LoadModule authz_user_module modules/mod_authz_user.so
|
|
||||||
LoadModule authz_core_module modules/mod_authz_core.so
|
|
||||||
LoadModule auth_basic_module modules/mod_auth_basic.so
|
|
||||||
LoadModule access_compat_module modules/mod_access_compat.so
|
|
||||||
|
|
||||||
LoadModule log_config_module modules/mod_log_config.so
|
|
||||||
|
|
||||||
LoadModule ssl_module modules/mod_ssl.so
|
|
||||||
|
|
||||||
LoadModule proxy_module modules/mod_proxy.so
|
|
||||||
LoadModule proxy_http_module modules/mod_proxy_http.so
|
|
||||||
|
|
||||||
LoadModule unixd_module modules/mod_unixd.so
|
|
||||||
|
|
||||||
<IfModule ssl_module>
|
|
||||||
SSLRandomSeed startup builtin
|
|
||||||
SSLRandomSeed connect builtin
|
|
||||||
</IfModule>
|
|
||||||
|
|
||||||
<IfModule unixd_module>
|
|
||||||
User daemon
|
|
||||||
Group daemon
|
|
||||||
</IfModule>
|
|
||||||
|
|
||||||
ServerAdmin you@example.com
|
|
||||||
|
|
||||||
ErrorLog /proc/self/fd/2
|
|
||||||
|
|
||||||
LogLevel warn
|
|
||||||
|
|
||||||
<IfModule log_config_module>
|
|
||||||
LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined
|
|
||||||
LogFormat "%h %l %u %t \"%r\" %>s %b" common
|
|
||||||
|
|
||||||
<IfModule logio_module>
|
|
||||||
LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio
|
|
||||||
</IfModule>
|
|
||||||
|
|
||||||
CustomLog /proc/self/fd/1 common
|
|
||||||
</IfModule>
|
|
||||||
|
|
||||||
ServerRoot "/usr/local/apache2"
|
|
||||||
|
|
||||||
Listen 5043
|
|
||||||
|
|
||||||
<Directory />
|
|
||||||
AllowOverride none
|
|
||||||
Require all denied
|
|
||||||
</Directory>
|
|
||||||
|
|
||||||
<VirtualHost *:5043>
|
|
||||||
|
|
||||||
ServerName myregistrydomain.com
|
|
||||||
|
|
||||||
SSLEngine on
|
|
||||||
SSLCertificateFile /usr/local/apache2/conf/domain.crt
|
|
||||||
SSLCertificateKeyFile /usr/local/apache2/conf/domain.key
|
|
||||||
|
|
||||||
## SSL settings recommandation from: https://raymii.org/s/tutorials/Strong_SSL_Security_On_Apache2.html
|
|
||||||
# Anti CRIME
|
|
||||||
SSLCompression off
|
|
||||||
|
|
||||||
# POODLE and other stuff
|
|
||||||
SSLProtocol all -SSLv2 -SSLv3 -TLSv1
|
|
||||||
|
|
||||||
# Secure cypher suites
|
|
||||||
SSLCipherSuite EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH
|
|
||||||
SSLHonorCipherOrder on
|
|
||||||
|
|
||||||
Header always set "Docker-Distribution-Api-Version" "registry/2.0"
|
|
||||||
Header onsuccess set "Docker-Distribution-Api-Version" "registry/2.0"
|
|
||||||
RequestHeader set X-Forwarded-Proto "https"
|
|
||||||
|
|
||||||
ProxyRequests off
|
|
||||||
ProxyPreserveHost on
|
|
||||||
|
|
||||||
# no proxy for /error/ (Apache HTTPd errors messages)
|
|
||||||
ProxyPass /error/ !
|
|
||||||
|
|
||||||
ProxyPass /v2 http://registry:5000/v2
|
|
||||||
ProxyPassReverse /v2 http://registry:5000/v2
|
|
||||||
|
|
||||||
<Location /v2>
|
|
||||||
Order deny,allow
|
|
||||||
Allow from all
|
|
||||||
AuthName "Registry Authentication"
|
|
||||||
AuthType basic
|
|
||||||
AuthUserFile "/usr/local/apache2/conf/httpd.htpasswd"
|
|
||||||
AuthGroupFile "/usr/local/apache2/conf/httpd.groups"
|
|
||||||
|
|
||||||
# Read access to authentified users
|
|
||||||
<Limit GET HEAD>
|
|
||||||
Require valid-user
|
|
||||||
</Limit>
|
|
||||||
|
|
||||||
# Write access to docker-deployer only
|
|
||||||
<Limit POST PUT DELETE PATCH>
|
|
||||||
Require group pusher
|
|
||||||
</Limit>
|
|
||||||
|
|
||||||
</Location>
|
|
||||||
|
|
||||||
</VirtualHost>
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Now, create a password file for "testuser" and "testpassword"
|
|
||||||
docker run --entrypoint htpasswd httpd:2.4 -Bbn testuser testpassword > auth/httpd.htpasswd
|
|
||||||
# Create another one for "testuserpush" and "testpasswordpush"
|
|
||||||
docker run --entrypoint htpasswd httpd:2.4 -Bbn testuserpush testpasswordpush >> auth/httpd.htpasswd
|
|
||||||
|
|
||||||
# Create your group file
|
|
||||||
echo "pusher: testuserpush" > auth/httpd.groups
|
|
||||||
|
|
||||||
# Copy over your certificate files
|
|
||||||
cp domain.crt auth
|
|
||||||
cp domain.key auth
|
|
||||||
|
|
||||||
# Now create your compose file
|
|
||||||
|
|
||||||
cat <<EOF > docker-compose.yml
|
|
||||||
apache:
|
|
||||||
image: "httpd:2.4"
|
|
||||||
hostname: myregistrydomain.com
|
|
||||||
ports:
|
|
||||||
- 5043:5043
|
|
||||||
links:
|
|
||||||
- registry:registry
|
|
||||||
volumes:
|
|
||||||
- `pwd`/auth:/usr/local/apache2/conf
|
|
||||||
|
|
||||||
registry:
|
|
||||||
image: registry:2
|
|
||||||
ports:
|
|
||||||
- 127.0.0.1:5000:5000
|
|
||||||
volumes:
|
|
||||||
- `pwd`/data:/var/lib/registry
|
|
||||||
|
|
||||||
EOF
|
|
||||||
```
|
|
||||||
|
|
||||||
## Starting and stopping
|
|
||||||
|
|
||||||
Now, start your stack:
|
|
||||||
|
|
||||||
docker-compose up -d
|
|
||||||
|
|
||||||
Login with a "push" authorized user (using `testuserpush` and `testpasswordpush`), then tag and push your first image:
|
|
||||||
|
|
||||||
docker login myregistrydomain.com:5043
|
|
||||||
docker tag ubuntu myregistrydomain.com:5043/test
|
|
||||||
docker push myregistrydomain.com:5043/test
|
|
||||||
|
|
||||||
Now, login with a "pull-only" user (using `testuser` and `testpassword`), then pull back the image:
|
|
||||||
|
|
||||||
docker login myregistrydomain.com:5043
|
|
||||||
docker pull myregistrydomain.com:5043/test
|
|
||||||
|
|
||||||
Verify that the "pull-only" can NOT push:
|
|
||||||
|
|
||||||
docker push myregistrydomain.com:5043/test
|
|
||||||
37
vendor/github.com/docker/distribution/docs/recipes/index.md
generated
vendored
37
vendor/github.com/docker/distribution/docs/recipes/index.md
generated
vendored
@@ -1,37 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "Recipes Overview"
|
|
||||||
description = "Fun stuff to do with your registry"
|
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution, recipes, advanced"]
|
|
||||||
[menu.main]
|
|
||||||
parent="smn_recipes"
|
|
||||||
weight=-10
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Recipes
|
|
||||||
|
|
||||||
You will find here a list of "recipes", end-to-end scenarios for exotic or otherwise advanced use-cases.
|
|
||||||
|
|
||||||
Most users are not expected to have a use for these.
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
|
|
||||||
You should have followed entirely the basic [deployment guide](../deploying.md).
|
|
||||||
|
|
||||||
If you have not, please take the time to do so.
|
|
||||||
|
|
||||||
At this point, it's assumed that:
|
|
||||||
|
|
||||||
* you understand Docker security requirements, and how to configure your docker engines properly
|
|
||||||
* you have installed Docker Compose
|
|
||||||
* it's HIGHLY recommended that you get a certificate from a known CA instead of self-signed certificates
|
|
||||||
* inside the current directory, you have a X509 `domain.crt` and `domain.key`, for the CN `myregistrydomain.com`
|
|
||||||
* be sure you have stopped and removed any previously running registry (typically `docker stop registry && docker rm -v registry`)
|
|
||||||
|
|
||||||
## The List
|
|
||||||
|
|
||||||
* [using Apache as an authenticating proxy](apache.md)
|
|
||||||
* [using Nginx as an authenticating proxy](nginx.md)
|
|
||||||
* [running a Registry on OS X](osx-setup-guide.md)
|
|
||||||
* [mirror the Docker Hub](mirror.md)
|
|
||||||
21
vendor/github.com/docker/distribution/docs/recipes/menu.md
generated
vendored
21
vendor/github.com/docker/distribution/docs/recipes/menu.md
generated
vendored
@@ -1,21 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "Recipes"
|
|
||||||
description = "Registry Recipes"
|
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution"]
|
|
||||||
type = "menu"
|
|
||||||
[menu.main]
|
|
||||||
identifier="smn_recipes"
|
|
||||||
parent="smn_registry"
|
|
||||||
weight=6
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Recipes
|
|
||||||
|
|
||||||
## The List
|
|
||||||
|
|
||||||
* [using Apache as an authenticating proxy](apache.md)
|
|
||||||
* [using Nginx as an authenticating proxy](nginx.md)
|
|
||||||
* [running a Registry on OS X](osx-setup-guide.md)
|
|
||||||
* [mirror the Docker Hub](mirror.md)
|
|
||||||
74
vendor/github.com/docker/distribution/docs/recipes/mirror.md
generated
vendored
74
vendor/github.com/docker/distribution/docs/recipes/mirror.md
generated
vendored
@@ -1,74 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "Mirroring Docker Hub"
|
|
||||||
description = "Setting-up a local mirror for Docker Hub images"
|
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution, mirror, Hub, recipe, advanced"]
|
|
||||||
[menu.main]
|
|
||||||
parent="smn_recipes"
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Registry as a pull through cache
|
|
||||||
|
|
||||||
## Use-case
|
|
||||||
|
|
||||||
If you have multiple instances of Docker running in your environment (e.g., multiple physical or virtual machines, all running the Docker daemon), each time one of them requires an image that it doesn’t have it will go out to the internet and fetch it from the public Docker registry. By running a local registry mirror, you can keep most of the redundant image fetch traffic on your local network.
|
|
||||||
|
|
||||||
### Alternatives
|
|
||||||
|
|
||||||
Alternatively, if the set of images you are using is well delimited, you can simply pull them manually and push them to a simple, local, private registry.
|
|
||||||
|
|
||||||
Furthermore, if your images are all built in-house, not using the Hub at all and relying entirely on your local registry is the simplest scenario.
|
|
||||||
|
|
||||||
### Gotcha
|
|
||||||
|
|
||||||
It's currently not possible to mirror another private registry. Only the central Hub can be mirrored.
|
|
||||||
|
|
||||||
### Solution
|
|
||||||
|
|
||||||
The Registry can be configured as a pull through cache. In this mode a Registry responds to all normal docker pull requests but stores all content locally.
|
|
||||||
|
|
||||||
## How does it work?
|
|
||||||
|
|
||||||
The first time you request an image from your local registry mirror, it pulls the image from the public Docker registry and stores it locally before handing it back to you. On subsequent requests, the local registry mirror is able to serve the image from its own storage.
|
|
||||||
|
|
||||||
### What if the content changes on the Hub?
|
|
||||||
|
|
||||||
When a pull is attempted with a tag, the Registry will check the remote to ensure if it has the latest version of the requested content. If it doesn't it will fetch the latest content and cache it.
|
|
||||||
|
|
||||||
### What about my disk?
|
|
||||||
|
|
||||||
In environments with high churn rates, stale data can build up in the cache. When running as a pull through cache the Registry will periodically remove old content to save disk space. Subsequent requests for removed content will cause a remote fetch and local re-caching.
|
|
||||||
|
|
||||||
To ensure best performance and guarantee correctness the Registry cache should be configured to use the `filesystem` driver for storage.
|
|
||||||
|
|
||||||
## Running a Registry as a pull through cache
|
|
||||||
|
|
||||||
The easiest way to run a registry as a pull through cache is to run the official Registry image.
|
|
||||||
|
|
||||||
Multiple registry caches can be deployed over the same back-end. A single registry cache will ensure that concurrent requests do not pull duplicate data, but this property will not hold true for a registry cache cluster.
|
|
||||||
|
|
||||||
### Configuring the cache
|
|
||||||
|
|
||||||
To configure a Registry to run as a pull through cache, the addition of a `proxy` section is required to the config file.
|
|
||||||
|
|
||||||
In order to access private images on the Docker Hub, a username and password can be supplied.
|
|
||||||
|
|
||||||
proxy:
|
|
||||||
remoteurl: https://registry-1.docker.io
|
|
||||||
username: [username]
|
|
||||||
password: [password]
|
|
||||||
|
|
||||||
> :warn: if you specify a username and password, it's very important to understand that private resources that this user has access to on the Hub will be made available on your mirror. It's thus paramount that you secure your mirror by implementing authentication if you expect these resources to stay private!
|
|
||||||
|
|
||||||
### Configuring the Docker daemon
|
|
||||||
|
|
||||||
You will need to pass the `--registry-mirror` option to your Docker daemon on startup:
|
|
||||||
|
|
||||||
docker --registry-mirror=https://<my-docker-mirror-host> daemon
|
|
||||||
|
|
||||||
For example, if your mirror is serving on `http://10.0.0.2:5000`, you would run:
|
|
||||||
|
|
||||||
docker --registry-mirror=https://10.0.0.2:5000 daemon
|
|
||||||
|
|
||||||
NOTE: Depending on your local host setup, you may be able to add the `--registry-mirror` option to the `DOCKER_OPTS` variable in `/etc/default/docker`.
|
|
||||||
190
vendor/github.com/docker/distribution/docs/recipes/nginx.md
generated
vendored
190
vendor/github.com/docker/distribution/docs/recipes/nginx.md
generated
vendored
@@ -1,190 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "Authenticating proxy with nginx"
|
|
||||||
description = "Restricting access to your registry using a nginx proxy"
|
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution, nginx, proxy, authentication, TLS, recipe, advanced"]
|
|
||||||
[menu.main]
|
|
||||||
parent="smn_recipes"
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Authenticating proxy with nginx
|
|
||||||
|
|
||||||
|
|
||||||
## Use-case
|
|
||||||
|
|
||||||
People already relying on a nginx proxy to authenticate their users to other services might want to leverage it and have Registry communications tunneled through the same pipeline.
|
|
||||||
|
|
||||||
Usually, that includes enterprise setups using LDAP/AD on the backend and a SSO mechanism fronting their internal http portal.
|
|
||||||
|
|
||||||
### Alternatives
|
|
||||||
|
|
||||||
If you just want authentication for your registry, and are happy maintaining users access separately, you should really consider sticking with the native [basic auth registry feature](../deploying.md#native-basic-auth).
|
|
||||||
|
|
||||||
### Solution
|
|
||||||
|
|
||||||
With the method presented here, you implement basic authentication for docker engines in a reverse proxy that sits in front of your registry.
|
|
||||||
|
|
||||||
While we use a simple htpasswd file as an example, any other nginx authentication backend should be fairly easy to implement once you are done with the example.
|
|
||||||
|
|
||||||
We also implement push restriction (to a limited user group) for the sake of the example. Again, you should modify this to fit your mileage.
|
|
||||||
|
|
||||||
### Gotchas
|
|
||||||
|
|
||||||
While this model gives you the ability to use whatever authentication backend you want through the secondary authentication mechanism implemented inside your proxy, it also requires that you move TLS termination from the Registry to the proxy itself.
|
|
||||||
|
|
||||||
Furthermore, introducing an extra http layer in your communication pipeline will make it more complex to deploy, maintain, and debug, and will possibly create issues. Make sure the extra complexity is required.
|
|
||||||
|
|
||||||
For instance, Amazon's Elastic Load Balancer (ELB) in HTTPS mode already sets the following client header:
|
|
||||||
|
|
||||||
```
|
|
||||||
X-Real-IP
|
|
||||||
X-Forwarded-For
|
|
||||||
X-Forwarded-Proto
|
|
||||||
```
|
|
||||||
|
|
||||||
So if you have an nginx sitting behind it, should remove these lines from the example config below:
|
|
||||||
|
|
||||||
```
|
|
||||||
X-Real-IP $remote_addr; # pass on real client's IP
|
|
||||||
X-Forwarded-For $proxy_add_x_forwarded_for;
|
|
||||||
X-Forwarded-Proto $scheme;
|
|
||||||
```
|
|
||||||
|
|
||||||
Otherwise nginx will reset the ELB's values, and the requests will not be routed properly. For more information, see [#970](https://github.com/docker/distribution/issues/970).
|
|
||||||
|
|
||||||
## Setting things up
|
|
||||||
|
|
||||||
Read again [the requirements](index.md#requirements).
|
|
||||||
|
|
||||||
Ready?
|
|
||||||
|
|
||||||
--
|
|
||||||
|
|
||||||
Create the required directories
|
|
||||||
|
|
||||||
```
|
|
||||||
mkdir -p auth
|
|
||||||
mkdir -p data
|
|
||||||
```
|
|
||||||
|
|
||||||
Create the main nginx configuration you will use.
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
cat <<EOF > auth/nginx.conf
|
|
||||||
events {
|
|
||||||
worker_connections 1024;
|
|
||||||
}
|
|
||||||
|
|
||||||
http {
|
|
||||||
|
|
||||||
upstream docker-registry {
|
|
||||||
server registry:5000;
|
|
||||||
}
|
|
||||||
|
|
||||||
## Set a variable to help us decide if we need to add the
|
|
||||||
## 'Docker-Distribution-Api-Version' header.
|
|
||||||
## The registry always sets this header.
|
|
||||||
## In the case of nginx performing auth, the header will be unset
|
|
||||||
## since nginx is auth-ing before proxying.
|
|
||||||
map \$upstream_http_docker_distribution_api_version \$docker_distribution_api_version {
|
|
||||||
'registry/2.0' '';
|
|
||||||
default registry/2.0;
|
|
||||||
}
|
|
||||||
|
|
||||||
server {
|
|
||||||
listen 443 ssl;
|
|
||||||
server_name myregistrydomain.com;
|
|
||||||
|
|
||||||
# SSL
|
|
||||||
ssl_certificate /etc/nginx/conf.d/domain.crt;
|
|
||||||
ssl_certificate_key /etc/nginx/conf.d/domain.key;
|
|
||||||
|
|
||||||
# Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
|
|
||||||
ssl_protocols TLSv1.1 TLSv1.2;
|
|
||||||
ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH';
|
|
||||||
ssl_prefer_server_ciphers on;
|
|
||||||
ssl_session_cache shared:SSL:10m;
|
|
||||||
|
|
||||||
# disable any limits to avoid HTTP 413 for large image uploads
|
|
||||||
client_max_body_size 0;
|
|
||||||
|
|
||||||
# required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486)
|
|
||||||
chunked_transfer_encoding on;
|
|
||||||
|
|
||||||
location /v2/ {
|
|
||||||
# Do not allow connections from docker 1.5 and earlier
|
|
||||||
# docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents
|
|
||||||
if (\$http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*\$" ) {
|
|
||||||
return 404;
|
|
||||||
}
|
|
||||||
|
|
||||||
# To add basic authentication to v2 use auth_basic setting.
|
|
||||||
auth_basic "Registry realm";
|
|
||||||
auth_basic_user_file /etc/nginx/conf.d/nginx.htpasswd;
|
|
||||||
|
|
||||||
## If $docker_distribution_api_version is empty, the header will not be added.
|
|
||||||
## See the map directive above where this variable is defined.
|
|
||||||
add_header 'Docker-Distribution-Api-Version' \$docker_distribution_api_version always;
|
|
||||||
|
|
||||||
proxy_pass http://docker-registry;
|
|
||||||
proxy_set_header Host \$http_host; # required for docker client's sake
|
|
||||||
proxy_set_header X-Real-IP \$remote_addr; # pass on real client's IP
|
|
||||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
|
||||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
|
||||||
proxy_read_timeout 900;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
```
|
|
||||||
|
|
||||||
Now create a password file for "testuser" and "testpassword"
|
|
||||||
|
|
||||||
```
|
|
||||||
docker run --rm --entrypoint htpasswd registry:2 -bn testuser testpassword > auth/nginx.htpasswd
|
|
||||||
```
|
|
||||||
|
|
||||||
Copy over your certificate files
|
|
||||||
|
|
||||||
```
|
|
||||||
cp domain.crt auth
|
|
||||||
cp domain.key auth
|
|
||||||
```
|
|
||||||
|
|
||||||
Now create your compose file
|
|
||||||
|
|
||||||
```
|
|
||||||
cat <<EOF > docker-compose.yml
|
|
||||||
nginx:
|
|
||||||
image: "nginx:1.9"
|
|
||||||
ports:
|
|
||||||
- 5043:443
|
|
||||||
links:
|
|
||||||
- registry:registry
|
|
||||||
volumes:
|
|
||||||
- ./auth:/etc/nginx/conf.d
|
|
||||||
- ./auth/nginx.conf:/etc/nginx/nginx.conf:ro
|
|
||||||
|
|
||||||
registry:
|
|
||||||
image: registry:2
|
|
||||||
ports:
|
|
||||||
- 127.0.0.1:5000:5000
|
|
||||||
volumes:
|
|
||||||
- `pwd`./data:/var/lib/registry
|
|
||||||
EOF
|
|
||||||
```
|
|
||||||
|
|
||||||
## Starting and stopping
|
|
||||||
|
|
||||||
Now, start your stack:
|
|
||||||
|
|
||||||
docker-compose up -d
|
|
||||||
|
|
||||||
Login with a "push" authorized user (using `testuser` and `testpassword`), then tag and push your first image:
|
|
||||||
|
|
||||||
docker login -u=testuser -p=testpassword -e=root@example.ch myregistrydomain.com:5043
|
|
||||||
docker tag ubuntu myregistrydomain.com:5043/test
|
|
||||||
docker push myregistrydomain.com:5043/test
|
|
||||||
docker pull myregistrydomain.com:5043/test
|
|
||||||
81
vendor/github.com/docker/distribution/docs/recipes/osx-setup-guide.md
generated
vendored
81
vendor/github.com/docker/distribution/docs/recipes/osx-setup-guide.md
generated
vendored
@@ -1,81 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "Running on OS X"
|
|
||||||
description = "Explains how to run a registry on OS X"
|
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution, OS X, recipe, advanced"]
|
|
||||||
[menu.main]
|
|
||||||
parent="smn_recipes"
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# OS X Setup Guide
|
|
||||||
|
|
||||||
## Use-case
|
|
||||||
|
|
||||||
This is useful if you intend to run a registry server natively on OS X.
|
|
||||||
|
|
||||||
### Alternatives
|
|
||||||
|
|
||||||
You can start a VM on OS X, and deploy your registry normally as a container using Docker inside that VM.
|
|
||||||
|
|
||||||
The simplest road to get there is traditionally to use the [docker Toolbox](https://www.docker.com/toolbox), or [docker-machine](/machine/index.md), which usually relies on the [boot2docker](http://boot2docker.io/) iso inside a VirtualBox VM.
|
|
||||||
|
|
||||||
### Solution
|
|
||||||
|
|
||||||
Using the method described here, you install and compile your own from the git repository and run it as an OS X agent.
|
|
||||||
|
|
||||||
### Gotchas
|
|
||||||
|
|
||||||
Production services operation on OS X is out of scope of this document. Be sure you understand well these aspects before considering going to production with this.
|
|
||||||
|
|
||||||
## Setup golang on your machine
|
|
||||||
|
|
||||||
If you know, safely skip to the next section.
|
|
||||||
|
|
||||||
If you don't, the TLDR is:
|
|
||||||
|
|
||||||
bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/master/binscripts/gvm-installer)
|
|
||||||
source ~/.gvm/scripts/gvm
|
|
||||||
gvm install go1.4.2
|
|
||||||
gvm use go1.4.2
|
|
||||||
|
|
||||||
If you want to understand, you should read [How to Write Go Code](https://golang.org/doc/code.html).
|
|
||||||
|
|
||||||
## Checkout the Docker Distribution source tree
|
|
||||||
|
|
||||||
mkdir -p $GOPATH/src/github.com/docker
|
|
||||||
git clone https://github.com/docker/distribution.git $GOPATH/src/github.com/docker/distribution
|
|
||||||
cd $GOPATH/src/github.com/docker/distribution
|
|
||||||
|
|
||||||
## Build the binary
|
|
||||||
|
|
||||||
GOPATH=$(PWD)/Godeps/_workspace:$GOPATH make binaries
|
|
||||||
sudo cp bin/registry /usr/local/libexec/registry
|
|
||||||
|
|
||||||
## Setup
|
|
||||||
|
|
||||||
Copy the registry configuration file in place:
|
|
||||||
|
|
||||||
mkdir /Users/Shared/Registry
|
|
||||||
cp docs/osx/config.yml /Users/Shared/Registry/config.yml
|
|
||||||
|
|
||||||
## Running the Docker Registry under launchd
|
|
||||||
|
|
||||||
Copy the Docker registry plist into place:
|
|
||||||
|
|
||||||
plutil -lint docs/osx/com.docker.registry.plist
|
|
||||||
cp docs/osx/com.docker.registry.plist ~/Library/LaunchAgents/
|
|
||||||
chmod 644 ~/Library/LaunchAgents/com.docker.registry.plist
|
|
||||||
|
|
||||||
Start the Docker registry:
|
|
||||||
|
|
||||||
launchctl load ~/Library/LaunchAgents/com.docker.registry.plist
|
|
||||||
|
|
||||||
### Restarting the docker registry service
|
|
||||||
|
|
||||||
launchctl stop com.docker.registry
|
|
||||||
launchctl start com.docker.registry
|
|
||||||
|
|
||||||
### Unloading the docker registry service
|
|
||||||
|
|
||||||
launchctl unload ~/Library/LaunchAgents/com.docker.registry.plist
|
|
||||||
42
vendor/github.com/docker/distribution/docs/recipes/osx/com.docker.registry.plist
generated
vendored
42
vendor/github.com/docker/distribution/docs/recipes/osx/com.docker.registry.plist
generated
vendored
@@ -1,42 +0,0 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
|
||||||
<plist version="1.0">
|
|
||||||
<dict>
|
|
||||||
<key>Label</key>
|
|
||||||
<string>com.docker.registry</string>
|
|
||||||
<key>KeepAlive</key>
|
|
||||||
<true/>
|
|
||||||
<key>StandardErrorPath</key>
|
|
||||||
<string>/Users/Shared/Registry/registry.log</string>
|
|
||||||
<key>StandardOutPath</key>
|
|
||||||
<string>/Users/Shared/Registry/registry.log</string>
|
|
||||||
<key>Program</key>
|
|
||||||
<string>/usr/local/libexec/registry</string>
|
|
||||||
<key>ProgramArguments</key>
|
|
||||||
<array>
|
|
||||||
<string>/usr/local/libexec/registry</string>
|
|
||||||
<string>/Users/Shared/Registry/config.yml</string>
|
|
||||||
</array>
|
|
||||||
<key>Sockets</key>
|
|
||||||
<dict>
|
|
||||||
<key>http-listen-address</key>
|
|
||||||
<dict>
|
|
||||||
<key>SockServiceName</key>
|
|
||||||
<string>5000</string>
|
|
||||||
<key>SockType</key>
|
|
||||||
<string>dgram</string>
|
|
||||||
<key>SockFamily</key>
|
|
||||||
<string>IPv4</string>
|
|
||||||
</dict>
|
|
||||||
<key>http-debug-address</key>
|
|
||||||
<dict>
|
|
||||||
<key>SockServiceName</key>
|
|
||||||
<string>5001</string>
|
|
||||||
<key>SockType</key>
|
|
||||||
<string>dgram</string>
|
|
||||||
<key>SockFamily</key>
|
|
||||||
<string>IPv4</string>
|
|
||||||
</dict>
|
|
||||||
</dict>
|
|
||||||
</dict>
|
|
||||||
</plist>
|
|
||||||
16
vendor/github.com/docker/distribution/docs/recipes/osx/config.yml
generated
vendored
16
vendor/github.com/docker/distribution/docs/recipes/osx/config.yml
generated
vendored
@@ -1,16 +0,0 @@
|
|||||||
version: 0.1
|
|
||||||
log:
|
|
||||||
level: info
|
|
||||||
fields:
|
|
||||||
service: registry
|
|
||||||
environment: macbook-air
|
|
||||||
storage:
|
|
||||||
cache:
|
|
||||||
blobdescriptor: inmemory
|
|
||||||
filesystem:
|
|
||||||
rootdirectory: /Users/Shared/Registry
|
|
||||||
http:
|
|
||||||
addr: 0.0.0.0:5000
|
|
||||||
secret: mytokensecret
|
|
||||||
debug:
|
|
||||||
addr: localhost:5001
|
|
||||||
16
vendor/github.com/docker/distribution/docs/spec/api.md
generated
vendored
16
vendor/github.com/docker/distribution/docs/spec/api.md
generated
vendored
@@ -1,12 +1,8 @@
|
|||||||
<!--[metadata]>
|
---
|
||||||
+++
|
title: "HTTP API V2"
|
||||||
title = "HTTP API V2"
|
description: "Specification for the Registry API."
|
||||||
description = "Specification for the Registry API."
|
keywords: ["registry, on-prem, images, tags, repository, distribution, api, advanced"]
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution, api, advanced"]
|
---
|
||||||
[menu.main]
|
|
||||||
parent="smn_registry_ref"
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Docker Registry HTTP API V2
|
# Docker Registry HTTP API V2
|
||||||
|
|
||||||
@@ -248,7 +244,7 @@ enforce this. The rules for a repository name are as follows:
|
|||||||
must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`.
|
must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`.
|
||||||
2. If a repository name has two or more path components, they must be
|
2. If a repository name has two or more path components, they must be
|
||||||
separated by a forward slash ("/").
|
separated by a forward slash ("/").
|
||||||
3. The total length of a repository name, including slashes, must be less the
|
3. The total length of a repository name, including slashes, must be less than
|
||||||
256 characters.
|
256 characters.
|
||||||
|
|
||||||
These name requirements _only_ apply to the registry API and should accept a
|
These name requirements _only_ apply to the registry API and should accept a
|
||||||
|
|||||||
16
vendor/github.com/docker/distribution/docs/spec/api.md.tmpl
generated
vendored
16
vendor/github.com/docker/distribution/docs/spec/api.md.tmpl
generated
vendored
@@ -1,12 +1,8 @@
|
|||||||
<!--[metadata]>
|
---
|
||||||
+++
|
title: "HTTP API V2"
|
||||||
title = "HTTP API V2"
|
description: "Specification for the Registry API."
|
||||||
description = "Specification for the Registry API."
|
keywords: ["registry, on-prem, images, tags, repository, distribution, api, advanced"]
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution, api, advanced"]
|
---
|
||||||
[menu.main]
|
|
||||||
parent="smn_registry_ref"
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Docker Registry HTTP API V2
|
# Docker Registry HTTP API V2
|
||||||
|
|
||||||
@@ -248,7 +244,7 @@ enforce this. The rules for a repository name are as follows:
|
|||||||
must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`.
|
must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`.
|
||||||
2. If a repository name has two or more path components, they must be
|
2. If a repository name has two or more path components, they must be
|
||||||
separated by a forward slash ("/").
|
separated by a forward slash ("/").
|
||||||
3. The total length of a repository name, including slashes, must be less the
|
3. The total length of a repository name, including slashes, must be less than
|
||||||
256 characters.
|
256 characters.
|
||||||
|
|
||||||
These name requirements _only_ apply to the registry API and should accept a
|
These name requirements _only_ apply to the registry API and should accept a
|
||||||
|
|||||||
15
vendor/github.com/docker/distribution/docs/spec/auth/index.md
generated
vendored
15
vendor/github.com/docker/distribution/docs/spec/auth/index.md
generated
vendored
@@ -1,13 +1,8 @@
|
|||||||
<!--[metadata]>
|
---
|
||||||
+++
|
title: "Docker Registry Token Authentication"
|
||||||
title = "Docker Registry Token Authentication"
|
description: "Docker Registry v2 authentication schema"
|
||||||
description = "Docker Registry v2 authentication schema"
|
keywords: ["registry, on-prem, images, tags, repository, distribution, authentication, advanced"]
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution, authentication, advanced"]
|
---
|
||||||
[menu.main]
|
|
||||||
parent="smn_registry_ref"
|
|
||||||
weight=100
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Docker Registry v2 authentication
|
# Docker Registry v2 authentication
|
||||||
|
|
||||||
|
|||||||
15
vendor/github.com/docker/distribution/docs/spec/auth/jwt.md
generated
vendored
15
vendor/github.com/docker/distribution/docs/spec/auth/jwt.md
generated
vendored
@@ -1,13 +1,8 @@
|
|||||||
<!--[metadata]>
|
---
|
||||||
+++
|
title: "Token Authentication Implementation"
|
||||||
title = "Token Authentication Implementation"
|
description: "Describe the reference implementation of the Docker Registry v2 authentication schema"
|
||||||
description = "Describe the reference implementation of the Docker Registry v2 authentication schema"
|
keywords: ["registry, on-prem, images, tags, repository, distribution, JWT authentication, advanced"]
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution, JWT authentication, advanced"]
|
---
|
||||||
[menu.main]
|
|
||||||
parent="smn_registry_ref"
|
|
||||||
weight=101
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Docker Registry v2 Bearer token specification
|
# Docker Registry v2 Bearer token specification
|
||||||
|
|
||||||
|
|||||||
16
vendor/github.com/docker/distribution/docs/spec/auth/oauth.md
generated
vendored
16
vendor/github.com/docker/distribution/docs/spec/auth/oauth.md
generated
vendored
@@ -1,13 +1,8 @@
|
|||||||
<!--[metadata]>
|
---
|
||||||
+++
|
title: "Oauth2 Token Authentication"
|
||||||
title = "Oauth2 Token Authentication"
|
description: "Specifies the Docker Registry v2 authentication"
|
||||||
description = "Specifies the Docker Registry v2 authentication"
|
keywords: ["registry, on-prem, images, tags, repository, distribution, oauth2, advanced"]
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution, oauth2, advanced"]
|
---
|
||||||
[menu.main]
|
|
||||||
parent="smn_registry_ref"
|
|
||||||
weight=102
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Docker Registry v2 authentication using OAuth2
|
# Docker Registry v2 authentication using OAuth2
|
||||||
|
|
||||||
@@ -193,4 +188,3 @@ Content-Type: application/json
|
|||||||
|
|
||||||
{"refresh_token":"kas9Da81Dfa8","access_token":"eyJhbGciOiJFUzI1NiIsInR5":"expires_in":900,"scope":"repository:samalba/my-app:pull,repository:samalba/my-app:push"}
|
{"refresh_token":"kas9Da81Dfa8","access_token":"eyJhbGciOiJFUzI1NiIsInR5":"expires_in":900,"scope":"repository:samalba/my-app:pull,repository:samalba/my-app:push"}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
31
vendor/github.com/docker/distribution/docs/spec/auth/scope.md
generated
vendored
31
vendor/github.com/docker/distribution/docs/spec/auth/scope.md
generated
vendored
@@ -1,13 +1,8 @@
|
|||||||
<!--[metadata]>
|
---
|
||||||
+++
|
title: "Token Scope Documentation"
|
||||||
title = "Token Scope Documentation"
|
description: "Describes the scope and access fields used for registry authorization tokens"
|
||||||
description = "Describes the scope and access fields used for registry authorization tokens"
|
keywords: ["registry, on-prem, images, tags, repository, distribution, advanced, access, scope"]
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution, advanced, access, scope"]
|
---
|
||||||
[menu.main]
|
|
||||||
parent="smn_registry_ref"
|
|
||||||
weight=103
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Docker Registry Token Scope and Access
|
# Docker Registry Token Scope and Access
|
||||||
|
|
||||||
@@ -44,13 +39,23 @@ intended to represent. This type may be specific to a resource provider but must
|
|||||||
be understood by the authorization server in order to validate the subject
|
be understood by the authorization server in order to validate the subject
|
||||||
is authorized for a specific resource.
|
is authorized for a specific resource.
|
||||||
|
|
||||||
|
#### Resource Class
|
||||||
|
|
||||||
|
The resource type might have a resource class which further classifies the
|
||||||
|
the resource name within the resource type. A class is not required and
|
||||||
|
is specific to the resource type.
|
||||||
|
|
||||||
#### Example Resource Types
|
#### Example Resource Types
|
||||||
|
|
||||||
- `repository` - represents a single repository within a registry. A
|
- `repository` - represents a single repository within a registry. A
|
||||||
repository may represent many manifest or content blobs, but the resource type
|
repository may represent many manifest or content blobs, but the resource type
|
||||||
is considered the collections of those items. Actions which may be performed on
|
is considered the collections of those items. Actions which may be performed on
|
||||||
a `repository` are `pull` for accessing the collection and `push` for adding to
|
a `repository` are `pull` for accessing the collection and `push` for adding to
|
||||||
it.
|
it. By default the `repository` type has the class of `image`.
|
||||||
|
- `repository(plugin)` - represents a single repository of plugins within a
|
||||||
|
registry. A plugin repository has the same content and actions as a repository.
|
||||||
|
- `registry` - represents the entire registry. Used for administrative actions
|
||||||
|
or lookup operations that span an entire registry.
|
||||||
|
|
||||||
### Resource Name
|
### Resource Name
|
||||||
|
|
||||||
@@ -83,7 +88,8 @@ scopes.
|
|||||||
```
|
```
|
||||||
scope := resourcescope [ ' ' resourcescope ]*
|
scope := resourcescope [ ' ' resourcescope ]*
|
||||||
resourcescope := resourcetype ":" resourcename ":" action [ ',' action ]*
|
resourcescope := resourcetype ":" resourcename ":" action [ ',' action ]*
|
||||||
resourcetype := /[a-z]*/
|
resourcetype := resourcetypevalue [ '(' resourcetypevalue ')' ]
|
||||||
|
resourcetypevalue := /[a-z0-9]+/
|
||||||
resourcename := [ hostname '/' ] component [ '/' component ]*
|
resourcename := [ hostname '/' ] component [ '/' component ]*
|
||||||
hostname := hostcomponent ['.' hostcomponent]* [':' port-number]
|
hostname := hostcomponent ['.' hostcomponent]* [':' port-number]
|
||||||
hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
||||||
@@ -140,4 +146,3 @@ done by fetching an access token using the refresh token. Since the refresh
|
|||||||
token is not scoped to specific resources for an audience, extra care should
|
token is not scoped to specific resources for an audience, extra care should
|
||||||
be taken to only use the refresh token to negotiate new access tokens directly
|
be taken to only use the refresh token to negotiate new access tokens directly
|
||||||
with the authorization server, and never with a resource provider.
|
with the authorization server, and never with a resource provider.
|
||||||
|
|
||||||
|
|||||||
19
vendor/github.com/docker/distribution/docs/spec/auth/token.md
generated
vendored
19
vendor/github.com/docker/distribution/docs/spec/auth/token.md
generated
vendored
@@ -1,13 +1,8 @@
|
|||||||
<!--[metadata]>
|
---
|
||||||
+++
|
title: "Token Authentication Specification"
|
||||||
title = "Token Authentication Specification"
|
description: "Specifies the Docker Registry v2 authentication"
|
||||||
description = "Specifies the Docker Registry v2 authentication"
|
keywords: ["registry, on-prem, images, tags, repository, distribution, Bearer authentication, advanced"]
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution, Bearer authentication, advanced"]
|
---
|
||||||
[menu.main]
|
|
||||||
parent="smn_registry_ref"
|
|
||||||
weight=104
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Docker Registry v2 authentication via central service
|
# Docker Registry v2 authentication via central service
|
||||||
|
|
||||||
@@ -25,7 +20,7 @@ This document outlines the v2 Docker registry authentication scheme:
|
|||||||
5. The client retries the original request with the Bearer token embedded in
|
5. The client retries the original request with the Bearer token embedded in
|
||||||
the request's Authorization header.
|
the request's Authorization header.
|
||||||
6. The Registry authorizes the client by validating the Bearer token and the
|
6. The Registry authorizes the client by validating the Bearer token and the
|
||||||
claim set embedded within it and begins the push/pull session as usual.
|
claim set embedded within it and begins the push/pull session as usual.
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
@@ -161,7 +156,7 @@ Defines getting a bearer and refresh token using the token endpoint.
|
|||||||
<code>expires_in</code>
|
<code>expires_in</code>
|
||||||
</dt>
|
</dt>
|
||||||
<dd>
|
<dd>
|
||||||
(Optional) The duration in seconds since the token was issued that it
|
(Optional) The duration in seconds since the token was issued that it
|
||||||
will remain valid. When omitted, this defaults to 60 seconds. For
|
will remain valid. When omitted, this defaults to 60 seconds. For
|
||||||
compatibility with older clients, a token should never be returned with
|
compatibility with older clients, a token should never be returned with
|
||||||
less than 60 seconds to live.
|
less than 60 seconds to live.
|
||||||
|
|||||||
8
vendor/github.com/docker/distribution/docs/spec/implementations.md
generated
vendored
8
vendor/github.com/docker/distribution/docs/spec/implementations.md
generated
vendored
@@ -1,8 +1,6 @@
|
|||||||
<!--[metadata]>
|
---
|
||||||
+++
|
published: false
|
||||||
draft = true
|
---
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Distribution API Implementations
|
# Distribution API Implementations
|
||||||
|
|
||||||
|
|||||||
15
vendor/github.com/docker/distribution/docs/spec/index.md
generated
vendored
15
vendor/github.com/docker/distribution/docs/spec/index.md
generated
vendored
@@ -1,13 +1,8 @@
|
|||||||
<!--[metadata]>
|
---
|
||||||
+++
|
title: "Reference Overview"
|
||||||
title = "Reference Overview"
|
description: "Explains registry JSON objects"
|
||||||
description = "Explains registry JSON objects"
|
keywords: ["registry, service, images, repository, json"]
|
||||||
keywords = ["registry, service, images, repository, json"]
|
---
|
||||||
[menu.main]
|
|
||||||
parent="smn_registry_ref"
|
|
||||||
weight=-1
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Docker Registry Reference
|
# Docker Registry Reference
|
||||||
|
|
||||||
|
|||||||
16
vendor/github.com/docker/distribution/docs/spec/json.md
generated
vendored
16
vendor/github.com/docker/distribution/docs/spec/json.md
generated
vendored
@@ -1,13 +1,9 @@
|
|||||||
<!--[metadata]>
|
---
|
||||||
+++
|
published: false
|
||||||
draft=true
|
title: "Docker Distribution JSON Canonicalization"
|
||||||
title = "Docker Distribution JSON Canonicalization"
|
description: "Explains registry JSON objects"
|
||||||
description = "Explains registry JSON objects"
|
keywords: ["registry, service, images, repository, json"]
|
||||||
keywords = ["registry, service, images, repository, json"]
|
---
|
||||||
[menu.main]
|
|
||||||
parent="smn_registry_ref"
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
14
vendor/github.com/docker/distribution/docs/spec/manifest-v2-1.md
generated
vendored
14
vendor/github.com/docker/distribution/docs/spec/manifest-v2-1.md
generated
vendored
@@ -1,12 +1,8 @@
|
|||||||
<!--[metadata]>
|
---
|
||||||
+++
|
title: "Image Manifest V 2, Schema 1 "
|
||||||
title = "Image Manifest V 2, Schema 1 "
|
description: "image manifest for the Registry."
|
||||||
description = "image manifest for the Registry."
|
keywords: ["registry, on-prem, images, tags, repository, distribution, api, advanced, manifest"]
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution, api, advanced, manifest"]
|
---
|
||||||
[menu.main]
|
|
||||||
parent="smn_registry_ref"
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Image Manifest Version 2, Schema 1
|
# Image Manifest Version 2, Schema 1
|
||||||
|
|
||||||
|
|||||||
15
vendor/github.com/docker/distribution/docs/spec/manifest-v2-2.md
generated
vendored
15
vendor/github.com/docker/distribution/docs/spec/manifest-v2-2.md
generated
vendored
@@ -1,12 +1,8 @@
|
|||||||
<!--[metadata]>
|
---
|
||||||
+++
|
title: "Image Manifest V 2, Schema 2 "
|
||||||
title = "Image Manifest V 2, Schema 2 "
|
description: "image manifest for the Registry."
|
||||||
description = "image manifest for the Registry."
|
keywords: ["registry, on-prem, images, tags, repository, distribution, api, advanced, manifest"]
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution, api, advanced, manifest"]
|
---
|
||||||
[menu.main]
|
|
||||||
parent="smn_registry_ref"
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Image Manifest Version 2, Schema 2
|
# Image Manifest Version 2, Schema 2
|
||||||
|
|
||||||
@@ -34,6 +30,7 @@ the resources they reference:
|
|||||||
- `application/vnd.docker.container.image.v1+json`: Container config JSON
|
- `application/vnd.docker.container.image.v1+json`: Container config JSON
|
||||||
- `application/vnd.docker.image.rootfs.diff.tar.gzip`: "Layer", as a gzipped tar
|
- `application/vnd.docker.image.rootfs.diff.tar.gzip`: "Layer", as a gzipped tar
|
||||||
- `application/vnd.docker.image.rootfs.foreign.diff.tar.gzip`: "Layer", as a gzipped tar that should never be pushed
|
- `application/vnd.docker.image.rootfs.foreign.diff.tar.gzip`: "Layer", as a gzipped tar that should never be pushed
|
||||||
|
- `application/vnd.docker.plugin.v1+json`: Plugin config JSON
|
||||||
|
|
||||||
## Manifest List
|
## Manifest List
|
||||||
|
|
||||||
|
|||||||
20
vendor/github.com/docker/distribution/docs/spec/menu.md
generated
vendored
20
vendor/github.com/docker/distribution/docs/spec/menu.md
generated
vendored
@@ -1,13 +1,7 @@
|
|||||||
<!--[metadata]>
|
---
|
||||||
+++
|
title: "Reference"
|
||||||
title = "Reference"
|
description: "Explains registry JSON objects"
|
||||||
description = "Explains registry JSON objects"
|
keywords: ["registry, service, images, repository, json"]
|
||||||
keywords = ["registry, service, images, repository, json"]
|
type: "menu"
|
||||||
type = "menu"
|
identifier: "smn_registry_ref"
|
||||||
[menu.main]
|
---
|
||||||
identifier="smn_registry_ref"
|
|
||||||
parent="smn_registry"
|
|
||||||
weight=7
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
|
|||||||
78
vendor/github.com/docker/distribution/docs/storage-drivers/azure.md
generated
vendored
78
vendor/github.com/docker/distribution/docs/storage-drivers/azure.md
generated
vendored
@@ -1,78 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "Microsoft Azure storage driver"
|
|
||||||
description = "Explains how to use the Azure storage drivers"
|
|
||||||
keywords = ["registry, service, driver, images, storage, azure"]
|
|
||||||
[menu.main]
|
|
||||||
parent = "smn_storagedrivers"
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
|
|
||||||
# Microsoft Azure storage driver
|
|
||||||
|
|
||||||
An implementation of the `storagedriver.StorageDriver` interface which uses [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/) for object storage.
|
|
||||||
|
|
||||||
## Parameters
|
|
||||||
|
|
||||||
<table>
|
|
||||||
<tr>
|
|
||||||
<th>Parameter</th>
|
|
||||||
<th>Required</th>
|
|
||||||
<th>Description</th>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>accountname</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
yes
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Name of the Azure Storage Account.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>accountkey</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
yes
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Primary or Secondary Key for the Storage Account.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>container</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
yes
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Name of the Azure root storage container in which all registry data will be stored. Must comply the storage container name [requirements][create-container-api].
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>realm</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Domain name suffix for the Storage Service API endpoint. For example realm for "Azure in China" would be `core.chinacloudapi.cn` and realm for "Azure Government" would be `core.usgovcloudapi.net`. By default, this
|
|
||||||
is <code>core.windows.net</code>.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
|
|
||||||
</table>
|
|
||||||
|
|
||||||
|
|
||||||
## Related Information
|
|
||||||
|
|
||||||
* To get information about
|
|
||||||
[azure-blob-storage](http://azure.microsoft.com/en-us/services/storage/) visit
|
|
||||||
the Microsoft website.
|
|
||||||
* You can use Microsoft's [Blob Service REST API](https://msdn.microsoft.com/en-us/library/azure/dd135733.aspx) to [create a container] (https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx).
|
|
||||||
24
vendor/github.com/docker/distribution/docs/storage-drivers/filesystem.md
generated
vendored
24
vendor/github.com/docker/distribution/docs/storage-drivers/filesystem.md
generated
vendored
@@ -1,24 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "Filesystem storage driver"
|
|
||||||
description = "Explains how to use the filesystem storage drivers"
|
|
||||||
keywords = ["registry, service, driver, images, storage, filesystem"]
|
|
||||||
[menu.main]
|
|
||||||
parent="smn_storagedrivers"
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
|
|
||||||
# Filesystem storage driver
|
|
||||||
|
|
||||||
An implementation of the `storagedriver.StorageDriver` interface which uses the local filesystem.
|
|
||||||
|
|
||||||
## Parameters
|
|
||||||
|
|
||||||
`rootdirectory`: (optional) The absolute path to a root directory tree in which
|
|
||||||
to store all registry files. The registry stores all its data here so make sure
|
|
||||||
there is adequate space available. Defaults to `/var/lib/registry`.
|
|
||||||
`maxthreads`: (optional) The maximum number of simultaneous blocking filesystem
|
|
||||||
operations permitted within the registry. Each operation spawns a new thread and
|
|
||||||
may cause thread exhaustion issues if many are done in parallel. Defaults to
|
|
||||||
`100`, and can be no lower than `25`.
|
|
||||||
78
vendor/github.com/docker/distribution/docs/storage-drivers/gcs.md
generated
vendored
78
vendor/github.com/docker/distribution/docs/storage-drivers/gcs.md
generated
vendored
@@ -1,78 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "GCS storage driver"
|
|
||||||
description = "Explains how to use the Google Cloud Storage drivers"
|
|
||||||
keywords = ["registry, service, driver, images, storage, gcs, google, cloud"]
|
|
||||||
[menu.main]
|
|
||||||
parent="smn_storagedrivers"
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
|
|
||||||
# Google Cloud Storage driver
|
|
||||||
|
|
||||||
An implementation of the `storagedriver.StorageDriver` interface which uses Google Cloud for object storage.
|
|
||||||
|
|
||||||
## Parameters
|
|
||||||
|
|
||||||
|
|
||||||
<table>
|
|
||||||
<tr>
|
|
||||||
<th>Parameter</th>
|
|
||||||
<th>Required</th>
|
|
||||||
<th>Description</th>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>bucket</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
yes
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Storage bucket name.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>keyfile</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
A private service account key file in JSON format. Instead of a key file <a href="https://developers.google.com/identity/protocols/application-default-credentials">Google Application Default Credentials</a> can be used.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>rootdirectory</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
This is a prefix that will be applied to all Google Cloud Storage keys to allow you to segment data in your bucket if necessary.
|
|
||||||
</tr>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>chunksize</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no (default 5242880)
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
This is the chunk size used for uploading large blobs, must be a multiple of 256*1024.
|
|
||||||
</tr>
|
|
||||||
|
|
||||||
</table>
|
|
||||||
|
|
||||||
|
|
||||||
`bucket`: The name of your Google Cloud Storage bucket where you wish to store objects (needs to already be created prior to driver initialization).
|
|
||||||
|
|
||||||
`keyfile`: (optional) A private key file in JSON format, used for [Service Account Authentication](https://cloud.google.com/storage/docs/authentication#service_accounts).
|
|
||||||
|
|
||||||
**Note** Instead of a key file you can use [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials).
|
|
||||||
|
|
||||||
`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root).
|
|
||||||
66
vendor/github.com/docker/distribution/docs/storage-drivers/index.md
generated
vendored
66
vendor/github.com/docker/distribution/docs/storage-drivers/index.md
generated
vendored
@@ -1,66 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "Storage Driver overview"
|
|
||||||
description = "Explains how to use storage drivers"
|
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution, storage drivers, advanced"]
|
|
||||||
aliases = ["/registry/storagedrivers/"]
|
|
||||||
[menu.main]
|
|
||||||
parent="smn_storagedrivers"
|
|
||||||
identifier="storage_index"
|
|
||||||
weight=-1
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
|
|
||||||
# Docker Registry Storage Driver
|
|
||||||
|
|
||||||
This document describes the registry storage driver model, implementation, and explains how to contribute new storage drivers.
|
|
||||||
|
|
||||||
## Provided Drivers
|
|
||||||
|
|
||||||
This storage driver package comes bundled with several drivers:
|
|
||||||
|
|
||||||
- [inmemory](inmemory.md): A temporary storage driver using a local inmemory map. This exists solely for reference and testing.
|
|
||||||
- [filesystem](filesystem.md): A local storage driver configured to use a directory tree in the local filesystem.
|
|
||||||
- [s3](s3.md): A driver storing objects in an Amazon Simple Storage Solution (S3) bucket.
|
|
||||||
- [azure](azure.md): A driver storing objects in [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/).
|
|
||||||
- [swift](swift.md): A driver storing objects in [Openstack Swift](http://docs.openstack.org/developer/swift/).
|
|
||||||
- [oss](oss.md): A driver storing objects in [Aliyun OSS](http://www.aliyun.com/product/oss).
|
|
||||||
- [gcs](gcs.md): A driver storing objects in a [Google Cloud Storage](https://cloud.google.com/storage/) bucket.
|
|
||||||
|
|
||||||
## Storage Driver API
|
|
||||||
|
|
||||||
The storage driver API is designed to model a filesystem-like key/value storage in a manner abstract enough to support a range of drivers from the local filesystem to Amazon S3 or other distributed object storage systems.
|
|
||||||
|
|
||||||
Storage drivers are required to implement the `storagedriver.StorageDriver` interface provided in `storagedriver.go`, which includes methods for reading, writing, and deleting content, as well as listing child objects of a specified prefix key.
|
|
||||||
|
|
||||||
Storage drivers are intended to be written in Go, providing compile-time
|
|
||||||
validation of the `storagedriver.StorageDriver` interface.
|
|
||||||
|
|
||||||
## Driver Selection and Configuration
|
|
||||||
|
|
||||||
The preferred method of selecting a storage driver is using the `StorageDriverFactory` interface in the `storagedriver/factory` package. These factories provide a common interface for constructing storage drivers with a parameters map. The factory model is based off of the [Register](http://golang.org/pkg/database/sql/#Register) and [Open](http://golang.org/pkg/database/sql/#Open) methods in the builtin [database/sql](http://golang.org/pkg/database/sql) package.
|
|
||||||
|
|
||||||
Storage driver factories may be registered by name using the
|
|
||||||
`factory.Register` method, and then later invoked by calling `factory.Create`
|
|
||||||
with a driver name and parameters map. If no such storage driver can be found,
|
|
||||||
`factory.Create` will return an `InvalidStorageDriverError`.
|
|
||||||
|
|
||||||
## Driver Contribution
|
|
||||||
|
|
||||||
### Writing new storage drivers
|
|
||||||
|
|
||||||
To create a valid storage driver, one must implement the
|
|
||||||
`storagedriver.StorageDriver` interface and make sure to expose this driver
|
|
||||||
via the factory system.
|
|
||||||
|
|
||||||
#### Registering
|
|
||||||
|
|
||||||
Storage drivers should call `factory.Register` with their driver name in an `init` method, allowing callers of `factory.New` to construct instances of this driver without requiring modification of imports throughout the codebase.
|
|
||||||
|
|
||||||
## Testing
|
|
||||||
|
|
||||||
Storage driver test suites are provided in
|
|
||||||
`storagedriver/testsuites/testsuites.go` and may be used for any storage
|
|
||||||
driver written in Go. Tests can be registered using the `RegisterSuite`
|
|
||||||
function, which run the same set of tests for any registered drivers.
|
|
||||||
23
vendor/github.com/docker/distribution/docs/storage-drivers/inmemory.md
generated
vendored
23
vendor/github.com/docker/distribution/docs/storage-drivers/inmemory.md
generated
vendored
@@ -1,23 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "In-memory storage driver"
|
|
||||||
description = "Explains how to use the in-memory storage drivers"
|
|
||||||
keywords = ["registry, service, driver, images, storage, in-memory"]
|
|
||||||
[menu.main]
|
|
||||||
parent="smn_storagedrivers"
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
|
|
||||||
# In-memory storage driver (Testing Only)
|
|
||||||
|
|
||||||
For purely tests purposes, you can use the `inmemory` storage driver. This
|
|
||||||
driver is an implementation of the `storagedriver.StorageDriver` interface which
|
|
||||||
uses local memory for object storage. If you would like to run a registry from
|
|
||||||
volatile memory, use the [`filesystem` driver](filesystem.md) on a ramdisk.
|
|
||||||
|
|
||||||
**IMPORTANT**: This storage driver *does not* persist data across runs. This is why it is only suitable for testing. *Never* use this driver in production.
|
|
||||||
|
|
||||||
## Parameters
|
|
||||||
|
|
||||||
None
|
|
||||||
13
vendor/github.com/docker/distribution/docs/storage-drivers/menu.md
generated
vendored
13
vendor/github.com/docker/distribution/docs/storage-drivers/menu.md
generated
vendored
@@ -1,13 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "Storage Drivers"
|
|
||||||
description = "Storage Drivers"
|
|
||||||
keywords = ["registry, on-prem, images, tags, repository, distribution"]
|
|
||||||
type = "menu"
|
|
||||||
[menu.main]
|
|
||||||
identifier="smn_storagedrivers"
|
|
||||||
parent="smn_registry"
|
|
||||||
weight=7
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
126
vendor/github.com/docker/distribution/docs/storage-drivers/oss.md
generated
vendored
126
vendor/github.com/docker/distribution/docs/storage-drivers/oss.md
generated
vendored
@@ -1,126 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "Aliyun OSS storage driver"
|
|
||||||
description = "Explains how to use the Aliyun OSS storage driver"
|
|
||||||
keywords = ["registry, service, driver, images, storage, OSS, aliyun"]
|
|
||||||
[menu.main]
|
|
||||||
parent="smn_storagedrivers"
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# Aliyun OSS storage driver
|
|
||||||
|
|
||||||
An implementation of the `storagedriver.StorageDriver` interface which uses [Aliyun OSS](http://www.aliyun.com/product/oss) for object storage.
|
|
||||||
|
|
||||||
## Parameters
|
|
||||||
|
|
||||||
<table>
|
|
||||||
<tr>
|
|
||||||
<th>Parameter</th>
|
|
||||||
<th>Required</th>
|
|
||||||
<th>Description</th>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>accesskeyid</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
yes
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Your access key ID.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>accesskeysecret</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
yes
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Your access key secret.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>region</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
yes
|
|
||||||
</td>
|
|
||||||
<td> The name of the OSS region in which you would like to store objects (for example `oss-cn-beijing`). For a list of regions, you can look at <http://docs.aliyun.com/#/oss/product-documentation/domain-region>
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>endpoint</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
An endpoint which defaults to `<bucket>.<region>.aliyuncs.com` or `<bucket>.<region>-internal.aliyuncs.com` (when `internal=true`). You can change the default endpoint by changing this value.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>internal</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td> An internal endpoint or the public endpoint for OSS access. The default is false. For a list of regions, you can look at <http://docs.aliyun.com/#/oss/product-documentation/domain-region>
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>bucket</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
yes
|
|
||||||
</td>
|
|
||||||
<td> The name of your OSS bucket where you wish to store objects (needs to already be created prior to driver initialization).
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>encrypt</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td> Specifies whether you would like your data encrypted on the server side. Defaults to false if not specified.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>secure</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td> Specifies whether to transfer data to the bucket over ssl or not. If you omit this value, `true` is used.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>chunksize</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td> The default part size for multipart uploads (performed by WriteStream) to OSS. The default is 10 MB. Keep in mind that the minimum part size for OSS is 5MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to OSS.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>rootdirectory</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td> The root directory tree in which to store all registry files. Defaults to an empty string (bucket root).
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
</table>
|
|
||||||
320
vendor/github.com/docker/distribution/docs/storage-drivers/s3.md
generated
vendored
320
vendor/github.com/docker/distribution/docs/storage-drivers/s3.md
generated
vendored
@@ -1,320 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "S3 storage driver"
|
|
||||||
description = "Explains how to use the S3 storage drivers"
|
|
||||||
keywords = ["registry, service, driver, images, storage, S3"]
|
|
||||||
[menu.main]
|
|
||||||
parent="smn_storagedrivers"
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
|
|
||||||
# S3 storage driver
|
|
||||||
|
|
||||||
An implementation of the `storagedriver.StorageDriver` interface which uses Amazon S3 or S3 compatible services for object storage.
|
|
||||||
|
|
||||||
## Parameters
|
|
||||||
|
|
||||||
<table>
|
|
||||||
<tr>
|
|
||||||
<th>Parameter</th>
|
|
||||||
<th>Required</th>
|
|
||||||
<th>Description</th>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>accesskey</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
yes
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Your AWS Access Key.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>secretkey</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
yes
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Your AWS Secret Key.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>region</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
yes
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
The AWS region in which your bucket exists. For the moment, the Go AWS
|
|
||||||
library in use does not use the newer DNS based bucket routing.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>regionendpoint</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Endpoint for S3 compatible storage services (Minio, etc)
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>bucket</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
yes
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
The bucket name in which you want to store the registry's data.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>encrypt</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Specifies whether the registry stores the image in encrypted format or
|
|
||||||
not. A boolean value. The default is false.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>keyid</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Optional KMS key ID to use for encryption (encrypt must be true, or this
|
|
||||||
parameter will be ignored). The default is none.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>secure</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Indicates whether to use HTTPS instead of HTTP. A boolean value. The
|
|
||||||
default is <code>true</code>.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>v4auth</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Indicates whether the registry uses Version 4 of AWS's authentication.
|
|
||||||
Generally, you should set this to <code>true</code> unless you are using an
|
|
||||||
S3 compatible provider that does not support v4 signature signing.
|
|
||||||
If you set this to <code>false</code> then the storage driver will use v2 signature signing.
|
|
||||||
By default, this is <code>true</code>.
|
|
||||||
You can not use v2 signing if you are using AWS S3.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>chunksize</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
The S3 API requires multipart upload chunks to be at least 5MB. This value
|
|
||||||
should be a number that is larger than 5*1024*1024.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>multipartcopychunksize</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Chunk size for all but the last Upload Part - Copy
|
|
||||||
operation of a copy that uses the multipart upload API.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>multipartcopymaxconcurrency</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Maximum number of concurrent Upload Part - Copy operations for a
|
|
||||||
copy that uses the multipart upload API.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>multipartcopythresholdsize</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Objects above this size will be copied using the multipart upload API.
|
|
||||||
PUT Object - Copy is used for objects at or below this size.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>rootdirectory</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
This is a prefix that will be applied to all S3 keys to allow you to segment data in your bucket if necessary.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>storageclass</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
The S3 storage class applied to each registry file. The default value is STANDARD.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>objectacl</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
The S3 Canned ACL for objects. The default value is "private".
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
</table>
|
|
||||||
|
|
||||||
|
|
||||||
`accesskey`: Your aws access key.
|
|
||||||
|
|
||||||
`secretkey`: Your aws secret key.
|
|
||||||
|
|
||||||
**Note** You can provide empty strings for your access and secret keys if you plan on running the driver on an ec2 instance and will handle authentication with the instance's credentials.
|
|
||||||
|
|
||||||
`region`: The name of the aws region in which you would like to store objects (for example `us-east-1`). For a list of regions, you can look at http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html
|
|
||||||
|
|
||||||
`regionendpoint`: (optional) Endpoint URL for S3 compatible APIs. This should not be provided when using Amazon S3.
|
|
||||||
|
|
||||||
`bucket`: The name of your S3 bucket where you wish to store objects. The bucket must exist prior to the driver initialization.
|
|
||||||
|
|
||||||
`encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified).
|
|
||||||
|
|
||||||
`keyid`: (optional) Whether you would like your data encrypted with this KMS key ID (defaults to none if not specified, will be ignored if encrypt is not true).
|
|
||||||
|
|
||||||
`secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to true (meaning transferring over ssl) if not specified. Note that while setting this to false will improve performance, it is not recommended due to security concerns.
|
|
||||||
|
|
||||||
`v4auth`: (optional) Whether you would like to use aws signature version 4 with your requests. This defaults to true if not specified (note that the eu-central-1 region does not work with version 2 signatures, so the driver will error out if initialized with this region and v4auth set to false)
|
|
||||||
|
|
||||||
`chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to S3. The default is 10 MB. Keep in mind that the minimum part size for S3 is 5MB. Depending on the speed of your connection to S3, a larger chunk size may result in better performance; faster connections will benefit from larger chunk sizes.
|
|
||||||
|
|
||||||
`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root).
|
|
||||||
|
|
||||||
`storageclass`: (optional) The storage class applied to each registry file. Defaults to STANDARD. Valid options are NONE, STANDARD and REDUCED_REDUNDANCY. Use NONE if your S3 compatible provider does not support storage classes.
|
|
||||||
|
|
||||||
`objectacl`: (optional) The canned object ACL to be applied to each registry object. Defaults to `private`. If you are using a bucket owned by another AWS account, it is recommended that you set this to `bucket-owner-full-control` so that the bucket owner can access your objects. Other valid options are available in the [AWS S3 documentation](http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl).
|
|
||||||
|
|
||||||
## S3 permission scopes
|
|
||||||
|
|
||||||
The following IAM permissions are required by the registry for push and pull. See [the S3 policy documentation](http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) for more details.
|
|
||||||
|
|
||||||
```
|
|
||||||
"Statement": [
|
|
||||||
{
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Action": [
|
|
||||||
"s3:ListBucket",
|
|
||||||
"s3:GetBucketLocation",
|
|
||||||
"s3:ListBucketMultipartUploads"
|
|
||||||
],
|
|
||||||
"Resource": "arn:aws:s3:::mybucket"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Action": [
|
|
||||||
"s3:PutObject",
|
|
||||||
"s3:GetObject",
|
|
||||||
"s3:DeleteObject",
|
|
||||||
"s3:ListMultipartUploadParts",
|
|
||||||
"s3:AbortMultipartUpload"
|
|
||||||
],
|
|
||||||
"Resource": "arn:aws:s3:::mybucket/*"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
# CloudFront as Middleware with S3 backend
|
|
||||||
|
|
||||||
## Use Case
|
|
||||||
|
|
||||||
Adding CloudFront as a middleware for your S3 backed registry can dramatically improve pull times. Your registry will have the ability to retrieve your images from edge servers, rather than the geographically limited location of your S3 bucket. The farther your registry is from your bucket, the more improvements you will see. See [Amazon CloudFront](https://aws.amazon.com/cloudfront/details/).
|
|
||||||
|
|
||||||
## Configuring CloudFront for Distribution
|
|
||||||
|
|
||||||
If you are unfamiliar with creating a CloudFront distribution, see [Getting Started with Cloudfront](http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/GettingStarted.html).
|
|
||||||
|
|
||||||
Defaults can be kept in most areas except:
|
|
||||||
|
|
||||||
### Origin:
|
|
||||||
|
|
||||||
The CloudFront distribution must be created such that the `Origin Path` is set to the directory level of the root "docker" key in S3. If your registry exists on the root of the bucket, this path should be left blank.
|
|
||||||
|
|
||||||
### Behaviors:
|
|
||||||
|
|
||||||
- Viewer Protocol Policy: HTTPS Only
|
|
||||||
- Allowed HTTP Methods: GET, HEAD, OPTIONS, PUT, POST, PATCH, DELETE
|
|
||||||
- Cached HTTP Methods: OPTIONS (checked)
|
|
||||||
- Restrict Viewer Access (Use Signed URLs or Signed Cookies): Yes
|
|
||||||
- Trusted Signers: Self (Can add other accounts as long as you have access to CloudFront Key Pairs for those additional accounts)
|
|
||||||
|
|
||||||
## Registry configuration
|
|
||||||
|
|
||||||
Here the `middleware` option is used. It is still important to keep the `storage` option as CloudFront will only handle `pull` actions; `push` actions are still directly written to S3.
|
|
||||||
|
|
||||||
The following example shows what you will need at minimum:
|
|
||||||
```
|
|
||||||
...
|
|
||||||
storage:
|
|
||||||
s3:
|
|
||||||
region: us-east-1
|
|
||||||
bucket: docker.myregistry.com
|
|
||||||
middleware:
|
|
||||||
storage:
|
|
||||||
- name: cloudfront
|
|
||||||
options:
|
|
||||||
baseurl: https://abcdefghijklmn.cloudfront.net/
|
|
||||||
privatekey: /etc/docker/cloudfront/pk-ABCEDFGHIJKLMNOPQRST.pem
|
|
||||||
keypairid: ABCEDFGHIJKLMNOPQRST
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
## CloudFront Key-Pair
|
|
||||||
|
|
||||||
A CloudFront key-pair is required for all AWS accounts needing access to your CloudFront distribution. For information, please see [Creating CloudFront Key Pairs](http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-trusted-signers.html#private-content-creating-cloudfront-key-pairs).
|
|
||||||
268
vendor/github.com/docker/distribution/docs/storage-drivers/swift.md
generated
vendored
268
vendor/github.com/docker/distribution/docs/storage-drivers/swift.md
generated
vendored
@@ -1,268 +0,0 @@
|
|||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "Swift storage driver"
|
|
||||||
description = "Explains how to use the OpenStack swift storage driver"
|
|
||||||
keywords = ["registry, service, driver, images, storage, swift"]
|
|
||||||
[menu.main]
|
|
||||||
parent="smn_storagedrivers"
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
|
|
||||||
# OpenStack Swift storage driver
|
|
||||||
|
|
||||||
An implementation of the `storagedriver.StorageDriver` interface that uses [OpenStack Swift](http://docs.openstack.org/developer/swift/) for object storage.
|
|
||||||
|
|
||||||
## Parameters
|
|
||||||
|
|
||||||
|
|
||||||
<table>
|
|
||||||
<tr>
|
|
||||||
<th>Parameter</th>
|
|
||||||
<th>Required</th>
|
|
||||||
<th>Description</th>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>authurl</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
yes
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
URL for obtaining an auth token. https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>username</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
yes
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Your Openstack user name.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>password</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
yes
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Your Openstack password.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>region</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
The Openstack region in which your container exists.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>container</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
yes
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
The name of your Swift container where you wish to store the registry's data. The driver creates the named container during its initialization.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>tenant</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Your Openstack tenant name. You can either use <code>tenant</code> or <code>tenantid</code>.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>tenantid</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Your Openstack tenant id. You can either use <code>tenant</code> or <code>tenantid</code>.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>domain</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Your user's Openstack domain name for Identity v3 API. You can either use <code>domain</code> or <code>domainid</code>.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>domainid</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Your user's Openstack domain id for Identity v3 API. You can either use <code>domain</code> or <code>domainid</code>.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>tenantdomain</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Your tenant's Openstack domain name for Identity v3 API. Only necessary if different from the <code>domain</code>. You can either use <code>tenantdomain</code> or <code>tenantdomainid</code>.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>tenantdomainid</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Your tenant's Openstack domain id for Identity v3 API. Only necessary if different from the <code>domain</code>. You can either use <code>tenantdomain</code> or <code>tenantdomainid</code>.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>trustid</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Your Openstack trust id for Identity v3 API.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>insecureskipverify</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
true to skip TLS verification, false by default.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>chunksize</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Size of the data segments for the Swift Dynamic Large Objects. This value should be a number (defaults to 5M).
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>prefix</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
This is a prefix that will be applied to all Swift keys to allow you to segment data in your container if necessary. Defaults to the empty string which is the container's root.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>secretkey</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
The secret key used to generate temporary URLs.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>accesskey</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
The access key to generate temporary URLs. It is used by HP Cloud Object Storage in addition to the `secretkey` parameter.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>authversion</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
Specify the OpenStack Auth's version,for example <code>3</code>. By default the driver will autodetect the auth's version from the AuthURL.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>endpointtype</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
no
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
The endpoint type used when connecting to swift. Possible values are `public`, `internal` and `admin`. Default is `public`.
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
</table>
|
|
||||||
|
|
||||||
The features supported by the Swift server are queried by requesting the `/info` URL on the server. In case the administrator
|
|
||||||
disabled that feature, the configuration file can specify the following optional parameters :
|
|
||||||
|
|
||||||
<table>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>tempurlcontainerkey</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
<p>
|
|
||||||
Specify whether to use container secret key to generate temporary URL when set to true, or the account secret key otherwise.</p>
|
|
||||||
</p>
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
<code>tempurlmethods</code>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
<p>
|
|
||||||
Array of HTTP methods that are supported by the TempURL middleware of the Swift server. Example:</p>
|
|
||||||
<code>
|
|
||||||
- tempurlmethods:
|
|
||||||
- GET
|
|
||||||
- PUT
|
|
||||||
- HEAD
|
|
||||||
- POST
|
|
||||||
- DELETE
|
|
||||||
</code>
|
|
||||||
</p>
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
</table>
|
|
||||||
7
vendor/github.com/docker/distribution/manifest/schema1/config_builder.go
generated
vendored
7
vendor/github.com/docker/distribution/manifest/schema1/config_builder.go
generated
vendored
@@ -9,11 +9,10 @@ import (
|
|||||||
|
|
||||||
"github.com/docker/distribution"
|
"github.com/docker/distribution"
|
||||||
"github.com/docker/distribution/context"
|
"github.com/docker/distribution/context"
|
||||||
"github.com/docker/distribution/reference"
|
|
||||||
"github.com/docker/libtrust"
|
|
||||||
|
|
||||||
"github.com/docker/distribution/digest"
|
"github.com/docker/distribution/digest"
|
||||||
"github.com/docker/distribution/manifest"
|
"github.com/docker/distribution/manifest"
|
||||||
|
"github.com/docker/distribution/reference"
|
||||||
|
"github.com/docker/libtrust"
|
||||||
)
|
)
|
||||||
|
|
||||||
type diffID digest.Digest
|
type diffID digest.Digest
|
||||||
@@ -95,7 +94,7 @@ func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Mani
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(img.RootFS.DiffIDs) != len(mb.descriptors) {
|
if len(img.RootFS.DiffIDs) != len(mb.descriptors) {
|
||||||
return nil, errors.New("number of descriptors and number of layers in rootfs must match")
|
return nil, fmt.Errorf("number of descriptors and number of layers in rootfs must match: len(%v) != len(%v)", img.RootFS.DiffIDs, mb.descriptors)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate IDs for each layer
|
// Generate IDs for each layer
|
||||||
|
|||||||
4
vendor/github.com/docker/distribution/manifest/schema2/builder_test.go
generated
vendored
4
vendor/github.com/docker/distribution/manifest/schema2/builder_test.go
generated
vendored
@@ -203,8 +203,8 @@ func TestBuilder(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
references := manifest.References()
|
references := manifest.References()
|
||||||
|
expected := append([]distribution.Descriptor{manifest.Target()}, descriptors...)
|
||||||
if !reflect.DeepEqual(references, descriptors) {
|
if !reflect.DeepEqual(references, expected) {
|
||||||
t.Fatal("References() does not match the descriptors added")
|
t.Fatal("References() does not match the descriptors added")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
7
vendor/github.com/docker/distribution/manifest/schema2/manifest.go
generated
vendored
7
vendor/github.com/docker/distribution/manifest/schema2/manifest.go
generated
vendored
@@ -18,7 +18,7 @@ const (
|
|||||||
MediaTypeConfig = "application/vnd.docker.container.image.v1+json"
|
MediaTypeConfig = "application/vnd.docker.container.image.v1+json"
|
||||||
|
|
||||||
// MediaTypePluginConfig specifies the mediaType for plugin configuration.
|
// MediaTypePluginConfig specifies the mediaType for plugin configuration.
|
||||||
MediaTypePluginConfig = "application/vnd.docker.plugin.image.v0+json"
|
MediaTypePluginConfig = "application/vnd.docker.plugin.v1+json"
|
||||||
|
|
||||||
// MediaTypeLayer is the mediaType used for layers referenced by the
|
// MediaTypeLayer is the mediaType used for layers referenced by the
|
||||||
// manifest.
|
// manifest.
|
||||||
@@ -69,7 +69,10 @@ type Manifest struct {
|
|||||||
|
|
||||||
// References returnes the descriptors of this manifests references.
|
// References returnes the descriptors of this manifests references.
|
||||||
func (m Manifest) References() []distribution.Descriptor {
|
func (m Manifest) References() []distribution.Descriptor {
|
||||||
return m.Layers
|
references := make([]distribution.Descriptor, 0, 1+len(m.Layers))
|
||||||
|
references = append(references, m.Config)
|
||||||
|
references = append(references, m.Layers...)
|
||||||
|
return references
|
||||||
}
|
}
|
||||||
|
|
||||||
// Target returns the target of this signed manifest.
|
// Target returns the target of this signed manifest.
|
||||||
|
|||||||
14
vendor/github.com/docker/distribution/manifest/schema2/manifest_test.go
generated
vendored
14
vendor/github.com/docker/distribution/manifest/schema2/manifest_test.go
generated
vendored
@@ -90,16 +90,22 @@ func TestManifest(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
references := deserialized.References()
|
references := deserialized.References()
|
||||||
if len(references) != 1 {
|
if len(references) != 2 {
|
||||||
t.Fatalf("unexpected number of references: %d", len(references))
|
t.Fatalf("unexpected number of references: %d", len(references))
|
||||||
}
|
}
|
||||||
if references[0].Digest != "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b" {
|
|
||||||
|
if !reflect.DeepEqual(references[0], target) {
|
||||||
|
t.Fatalf("first reference should be target: %v != %v", references[0], target)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test the second reference
|
||||||
|
if references[1].Digest != "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b" {
|
||||||
t.Fatalf("unexpected digest in reference: %s", references[0].Digest.String())
|
t.Fatalf("unexpected digest in reference: %s", references[0].Digest.String())
|
||||||
}
|
}
|
||||||
if references[0].MediaType != MediaTypeLayer {
|
if references[1].MediaType != MediaTypeLayer {
|
||||||
t.Fatalf("unexpected media type in reference: %s", references[0].MediaType)
|
t.Fatalf("unexpected media type in reference: %s", references[0].MediaType)
|
||||||
}
|
}
|
||||||
if references[0].Size != 153263 {
|
if references[1].Size != 153263 {
|
||||||
t.Fatalf("unexpected size in reference: %d", references[0].Size)
|
t.Fatalf("unexpected size in reference: %d", references[0].Size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
12
vendor/github.com/docker/distribution/manifests.go
generated
vendored
12
vendor/github.com/docker/distribution/manifests.go
generated
vendored
@@ -12,8 +12,13 @@ import (
|
|||||||
// references and an optional target
|
// references and an optional target
|
||||||
type Manifest interface {
|
type Manifest interface {
|
||||||
// References returns a list of objects which make up this manifest.
|
// References returns a list of objects which make up this manifest.
|
||||||
// The references are strictly ordered from base to head. A reference
|
// A reference is anything which can be represented by a
|
||||||
// is anything which can be represented by a distribution.Descriptor
|
// distribution.Descriptor. These can consist of layers, resources or other
|
||||||
|
// manifests.
|
||||||
|
//
|
||||||
|
// While no particular order is required, implementations should return
|
||||||
|
// them from highest to lowest priority. For example, one might want to
|
||||||
|
// return the base layer before the top layer.
|
||||||
References() []Descriptor
|
References() []Descriptor
|
||||||
|
|
||||||
// Payload provides the serialized format of the manifest, in addition to
|
// Payload provides the serialized format of the manifest, in addition to
|
||||||
@@ -36,6 +41,9 @@ type ManifestBuilder interface {
|
|||||||
// AppendReference includes the given object in the manifest after any
|
// AppendReference includes the given object in the manifest after any
|
||||||
// existing dependencies. If the add fails, such as when adding an
|
// existing dependencies. If the add fails, such as when adding an
|
||||||
// unsupported dependency, an error may be returned.
|
// unsupported dependency, an error may be returned.
|
||||||
|
//
|
||||||
|
// The destination of the reference is dependent on the manifest type and
|
||||||
|
// the dependency type.
|
||||||
AppendReference(dependency Describable) error
|
AppendReference(dependency Describable) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
32
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
32
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
@@ -24,6 +24,7 @@ package reference
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/distribution/digest"
|
"github.com/docker/distribution/digest"
|
||||||
@@ -138,7 +139,7 @@ type Canonical interface {
|
|||||||
func SplitHostname(named Named) (string, string) {
|
func SplitHostname(named Named) (string, string) {
|
||||||
name := named.Name()
|
name := named.Name()
|
||||||
match := anchoredNameRegexp.FindStringSubmatch(name)
|
match := anchoredNameRegexp.FindStringSubmatch(name)
|
||||||
if match == nil || len(match) != 3 {
|
if len(match) != 3 {
|
||||||
return "", name
|
return "", name
|
||||||
}
|
}
|
||||||
return match[1], match[2]
|
return match[1], match[2]
|
||||||
@@ -218,6 +219,13 @@ func WithTag(name Named, tag string) (NamedTagged, error) {
|
|||||||
if !anchoredTagRegexp.MatchString(tag) {
|
if !anchoredTagRegexp.MatchString(tag) {
|
||||||
return nil, ErrTagInvalidFormat
|
return nil, ErrTagInvalidFormat
|
||||||
}
|
}
|
||||||
|
if canonical, ok := name.(Canonical); ok {
|
||||||
|
return reference{
|
||||||
|
name: name.Name(),
|
||||||
|
tag: tag,
|
||||||
|
digest: canonical.Digest(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
return taggedReference{
|
return taggedReference{
|
||||||
name: name.Name(),
|
name: name.Name(),
|
||||||
tag: tag,
|
tag: tag,
|
||||||
@@ -230,12 +238,34 @@ func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
|
|||||||
if !anchoredDigestRegexp.MatchString(digest.String()) {
|
if !anchoredDigestRegexp.MatchString(digest.String()) {
|
||||||
return nil, ErrDigestInvalidFormat
|
return nil, ErrDigestInvalidFormat
|
||||||
}
|
}
|
||||||
|
if tagged, ok := name.(Tagged); ok {
|
||||||
|
return reference{
|
||||||
|
name: name.Name(),
|
||||||
|
tag: tagged.Tag(),
|
||||||
|
digest: digest,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
return canonicalReference{
|
return canonicalReference{
|
||||||
name: name.Name(),
|
name: name.Name(),
|
||||||
digest: digest,
|
digest: digest,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Match reports whether ref matches the specified pattern.
|
||||||
|
// See https://godoc.org/path#Match for supported patterns.
|
||||||
|
func Match(pattern string, ref Reference) (bool, error) {
|
||||||
|
matched, err := path.Match(pattern, ref.String())
|
||||||
|
if namedRef, isNamed := ref.(Named); isNamed && !matched {
|
||||||
|
matched, _ = path.Match(pattern, namedRef.Name())
|
||||||
|
}
|
||||||
|
return matched, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrimNamed removes any tag or digest from the named reference.
|
||||||
|
func TrimNamed(ref Named) Named {
|
||||||
|
return repository(ref.Name())
|
||||||
|
}
|
||||||
|
|
||||||
func getBestReferenceType(ref reference) Reference {
|
func getBestReferenceType(ref reference) Reference {
|
||||||
if ref.name == "" {
|
if ref.name == "" {
|
||||||
// Allow digest only references
|
// Allow digest only references
|
||||||
|
|||||||
107
vendor/github.com/docker/distribution/reference/reference_test.go
generated
vendored
107
vendor/github.com/docker/distribution/reference/reference_test.go
generated
vendored
@@ -467,6 +467,7 @@ func TestSerialization(t *testing.T) {
|
|||||||
func TestWithTag(t *testing.T) {
|
func TestWithTag(t *testing.T) {
|
||||||
testcases := []struct {
|
testcases := []struct {
|
||||||
name string
|
name string
|
||||||
|
digest digest.Digest
|
||||||
tag string
|
tag string
|
||||||
combined string
|
combined string
|
||||||
}{
|
}{
|
||||||
@@ -490,6 +491,12 @@ func TestWithTag(t *testing.T) {
|
|||||||
tag: "TAG5",
|
tag: "TAG5",
|
||||||
combined: "test.com:8000/foo:TAG5",
|
combined: "test.com:8000/foo:TAG5",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "test.com:8000/foo",
|
||||||
|
digest: "sha256:1234567890098765432112345667890098765",
|
||||||
|
tag: "TAG5",
|
||||||
|
combined: "test.com:8000/foo:TAG5@sha256:1234567890098765432112345667890098765",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, testcase := range testcases {
|
for _, testcase := range testcases {
|
||||||
failf := func(format string, v ...interface{}) {
|
failf := func(format string, v ...interface{}) {
|
||||||
@@ -501,6 +508,14 @@ func TestWithTag(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
failf("error parsing name: %s", err)
|
failf("error parsing name: %s", err)
|
||||||
}
|
}
|
||||||
|
if testcase.digest != "" {
|
||||||
|
canonical, err := WithDigest(named, testcase.digest)
|
||||||
|
if err != nil {
|
||||||
|
failf("error adding digest")
|
||||||
|
}
|
||||||
|
named = canonical
|
||||||
|
}
|
||||||
|
|
||||||
tagged, err := WithTag(named, testcase.tag)
|
tagged, err := WithTag(named, testcase.tag)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
failf("WithTag failed: %s", err)
|
failf("WithTag failed: %s", err)
|
||||||
@@ -515,6 +530,7 @@ func TestWithDigest(t *testing.T) {
|
|||||||
testcases := []struct {
|
testcases := []struct {
|
||||||
name string
|
name string
|
||||||
digest digest.Digest
|
digest digest.Digest
|
||||||
|
tag string
|
||||||
combined string
|
combined string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
@@ -532,6 +548,12 @@ func TestWithDigest(t *testing.T) {
|
|||||||
digest: "sha256:1234567890098765432112345667890098765",
|
digest: "sha256:1234567890098765432112345667890098765",
|
||||||
combined: "test.com:8000/foo@sha256:1234567890098765432112345667890098765",
|
combined: "test.com:8000/foo@sha256:1234567890098765432112345667890098765",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "test.com:8000/foo",
|
||||||
|
digest: "sha256:1234567890098765432112345667890098765",
|
||||||
|
tag: "latest",
|
||||||
|
combined: "test.com:8000/foo:latest@sha256:1234567890098765432112345667890098765",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, testcase := range testcases {
|
for _, testcase := range testcases {
|
||||||
failf := func(format string, v ...interface{}) {
|
failf := func(format string, v ...interface{}) {
|
||||||
@@ -543,6 +565,13 @@ func TestWithDigest(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
failf("error parsing name: %s", err)
|
failf("error parsing name: %s", err)
|
||||||
}
|
}
|
||||||
|
if testcase.tag != "" {
|
||||||
|
tagged, err := WithTag(named, testcase.tag)
|
||||||
|
if err != nil {
|
||||||
|
failf("error adding tag")
|
||||||
|
}
|
||||||
|
named = tagged
|
||||||
|
}
|
||||||
digested, err := WithDigest(named, testcase.digest)
|
digested, err := WithDigest(named, testcase.digest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
failf("WithDigest failed: %s", err)
|
failf("WithDigest failed: %s", err)
|
||||||
@@ -552,3 +581,81 @@ func TestWithDigest(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMatchError(t *testing.T) {
|
||||||
|
named, err := Parse("foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
_, err = Match("[-x]", named)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expected an error, got nothing")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMatch(t *testing.T) {
|
||||||
|
matchCases := []struct {
|
||||||
|
reference string
|
||||||
|
pattern string
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
reference: "foo",
|
||||||
|
pattern: "foo/**/ba[rz]",
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
reference: "foo/any/bat",
|
||||||
|
pattern: "foo/**/ba[rz]",
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
reference: "foo/a/bar",
|
||||||
|
pattern: "foo/**/ba[rz]",
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
reference: "foo/b/baz",
|
||||||
|
pattern: "foo/**/ba[rz]",
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
reference: "foo/c/baz:tag",
|
||||||
|
pattern: "foo/**/ba[rz]",
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
reference: "foo/c/baz:tag",
|
||||||
|
pattern: "foo/*/baz:tag",
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
reference: "foo/c/baz:tag",
|
||||||
|
pattern: "foo/c/baz:tag",
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
reference: "example.com/foo/c/baz:tag",
|
||||||
|
pattern: "*/foo/c/baz",
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
reference: "example.com/foo/c/baz:tag",
|
||||||
|
pattern: "example.com/foo/c/baz",
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, c := range matchCases {
|
||||||
|
named, err := Parse(c.reference)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
actual, err := Match(c.pattern, named)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if actual != c.expected {
|
||||||
|
t.Fatalf("expected %s match %s to be %v, was %v", c.reference, c.pattern, c.expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
161
vendor/github.com/docker/distribution/registry/api/v2/headerparser.go
generated
vendored
Normal file
161
vendor/github.com/docker/distribution/registry/api/v2/headerparser.go
generated
vendored
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
package v2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// according to rfc7230
|
||||||
|
reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`)
|
||||||
|
reQuotedValue = regexp.MustCompile(`^[^\\"]+`)
|
||||||
|
reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`)
|
||||||
|
)
|
||||||
|
|
||||||
|
// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains
|
||||||
|
// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The
|
||||||
|
// function parses only the first element of the list, which is set by the very first proxy. It returns a map
|
||||||
|
// of corresponding key-value pairs and an unparsed slice of the input string.
|
||||||
|
//
|
||||||
|
// Examples of Forwarded header values:
|
||||||
|
//
|
||||||
|
// 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown
|
||||||
|
// 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80"
|
||||||
|
//
|
||||||
|
// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into
|
||||||
|
// {"for": "192.0.2.43:443", "host": "registry.example.org"}.
|
||||||
|
func parseForwardedHeader(forwarded string) (map[string]string, string, error) {
|
||||||
|
// Following are states of forwarded header parser. Any state could transition to a failure.
|
||||||
|
const (
|
||||||
|
// terminating state; can transition to Parameter
|
||||||
|
stateElement = iota
|
||||||
|
// terminating state; can transition to KeyValueDelimiter
|
||||||
|
stateParameter
|
||||||
|
// can transition to Value
|
||||||
|
stateKeyValueDelimiter
|
||||||
|
// can transition to one of { QuotedValue, PairEnd }
|
||||||
|
stateValue
|
||||||
|
// can transition to one of { EscapedCharacter, PairEnd }
|
||||||
|
stateQuotedValue
|
||||||
|
// can transition to one of { QuotedValue }
|
||||||
|
stateEscapedCharacter
|
||||||
|
// terminating state; can transition to one of { Parameter, Element }
|
||||||
|
statePairEnd
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
parameter string
|
||||||
|
value string
|
||||||
|
parse = forwarded[:]
|
||||||
|
res = map[string]string{}
|
||||||
|
state = stateElement
|
||||||
|
)
|
||||||
|
|
||||||
|
Loop:
|
||||||
|
for {
|
||||||
|
// skip spaces unless in quoted value
|
||||||
|
if state != stateQuotedValue && state != stateEscapedCharacter {
|
||||||
|
parse = strings.TrimLeftFunc(parse, unicode.IsSpace)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(parse) == 0 {
|
||||||
|
if state != stateElement && state != statePairEnd && state != stateParameter {
|
||||||
|
return nil, parse, fmt.Errorf("unexpected end of input")
|
||||||
|
}
|
||||||
|
// terminating
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
switch state {
|
||||||
|
// terminate at list element delimiter
|
||||||
|
case stateElement:
|
||||||
|
if parse[0] == ',' {
|
||||||
|
parse = parse[1:]
|
||||||
|
break Loop
|
||||||
|
}
|
||||||
|
state = stateParameter
|
||||||
|
|
||||||
|
// parse parameter (the key of key-value pair)
|
||||||
|
case stateParameter:
|
||||||
|
match := reToken.FindString(parse)
|
||||||
|
if len(match) == 0 {
|
||||||
|
return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse))
|
||||||
|
}
|
||||||
|
parameter = strings.ToLower(match)
|
||||||
|
parse = parse[len(match):]
|
||||||
|
state = stateKeyValueDelimiter
|
||||||
|
|
||||||
|
// parse '='
|
||||||
|
case stateKeyValueDelimiter:
|
||||||
|
if parse[0] != '=' {
|
||||||
|
return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse))
|
||||||
|
}
|
||||||
|
parse = parse[1:]
|
||||||
|
state = stateValue
|
||||||
|
|
||||||
|
// parse value or quoted value
|
||||||
|
case stateValue:
|
||||||
|
if parse[0] == '"' {
|
||||||
|
parse = parse[1:]
|
||||||
|
state = stateQuotedValue
|
||||||
|
} else {
|
||||||
|
value = reToken.FindString(parse)
|
||||||
|
if len(value) == 0 {
|
||||||
|
return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse))
|
||||||
|
}
|
||||||
|
if _, exists := res[parameter]; exists {
|
||||||
|
return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse))
|
||||||
|
}
|
||||||
|
res[parameter] = value
|
||||||
|
parse = parse[len(value):]
|
||||||
|
value = ""
|
||||||
|
state = statePairEnd
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse a part of quoted value until the first backslash
|
||||||
|
case stateQuotedValue:
|
||||||
|
match := reQuotedValue.FindString(parse)
|
||||||
|
value += match
|
||||||
|
parse = parse[len(match):]
|
||||||
|
switch {
|
||||||
|
case len(parse) == 0:
|
||||||
|
return nil, parse, fmt.Errorf("unterminated quoted string")
|
||||||
|
case parse[0] == '"':
|
||||||
|
res[parameter] = value
|
||||||
|
value = ""
|
||||||
|
parse = parse[1:]
|
||||||
|
state = statePairEnd
|
||||||
|
case parse[0] == '\\':
|
||||||
|
parse = parse[1:]
|
||||||
|
state = stateEscapedCharacter
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse escaped character in a quoted string, ignore the backslash
|
||||||
|
// transition back to QuotedValue state
|
||||||
|
case stateEscapedCharacter:
|
||||||
|
c := reEscapedCharacter.FindString(parse)
|
||||||
|
if len(c) == 0 {
|
||||||
|
return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1)
|
||||||
|
}
|
||||||
|
value += c
|
||||||
|
parse = parse[1:]
|
||||||
|
state = stateQuotedValue
|
||||||
|
|
||||||
|
// expect either a new key-value pair, new list or end of input
|
||||||
|
case statePairEnd:
|
||||||
|
switch parse[0] {
|
||||||
|
case ';':
|
||||||
|
parse = parse[1:]
|
||||||
|
state = stateParameter
|
||||||
|
case ',':
|
||||||
|
state = stateElement
|
||||||
|
default:
|
||||||
|
return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, parse, nil
|
||||||
|
}
|
||||||
161
vendor/github.com/docker/distribution/registry/api/v2/headerparser_test.go
generated
vendored
Normal file
161
vendor/github.com/docker/distribution/registry/api/v2/headerparser_test.go
generated
vendored
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
package v2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestParseForwardedHeader(t *testing.T) {
|
||||||
|
for _, tc := range []struct {
|
||||||
|
name string
|
||||||
|
raw string
|
||||||
|
expected map[string]string
|
||||||
|
expectedRest string
|
||||||
|
expectedError bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "empty",
|
||||||
|
raw: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "one pair",
|
||||||
|
raw: " key = value ",
|
||||||
|
expected: map[string]string{"key": "value"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "two pairs",
|
||||||
|
raw: " key1 = value1; key2=value2",
|
||||||
|
expected: map[string]string{"key1": "value1", "key2": "value2"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "uppercase parameter",
|
||||||
|
raw: "KeY=VaL",
|
||||||
|
expected: map[string]string{"key": "VaL"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "missing key=value pair - be tolerant",
|
||||||
|
raw: "key=val;",
|
||||||
|
expected: map[string]string{"key": "val"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "quoted values",
|
||||||
|
raw: `key="val";param = "[[ $((1 + 1)) == 3 ]] && echo panic!;" ; p=" abcd "`,
|
||||||
|
expected: map[string]string{"key": "val", "param": "[[ $((1 + 1)) == 3 ]] && echo panic!;", "p": " abcd "},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty quoted value",
|
||||||
|
raw: `key=""`,
|
||||||
|
expected: map[string]string{"key": ""},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "quoted double quotes",
|
||||||
|
raw: `key="\"value\""`,
|
||||||
|
expected: map[string]string{"key": `"value"`},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "quoted backslash",
|
||||||
|
raw: `key="\"\\\""`,
|
||||||
|
expected: map[string]string{"key": `"\"`},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ignore subsequent elements",
|
||||||
|
raw: "key=a, param= b",
|
||||||
|
expected: map[string]string{"key": "a"},
|
||||||
|
expectedRest: " param= b",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty element - be tolerant",
|
||||||
|
raw: " , key=val",
|
||||||
|
expectedRest: " key=val",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "obscure key",
|
||||||
|
raw: `ob₷C&r€ = value`,
|
||||||
|
expected: map[string]string{`ob₷c&r€`: "value"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "duplicate parameter",
|
||||||
|
raw: "key=a; p=b; key=c",
|
||||||
|
expectedError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty parameter",
|
||||||
|
raw: "=value",
|
||||||
|
expectedError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty value",
|
||||||
|
raw: "key= ",
|
||||||
|
expectedError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty value before a new element ",
|
||||||
|
raw: "key=,",
|
||||||
|
expectedError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty value before a new pair",
|
||||||
|
raw: "key=;",
|
||||||
|
expectedError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "just parameter",
|
||||||
|
raw: "key",
|
||||||
|
expectedError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "missing key-value",
|
||||||
|
raw: "a=b;;",
|
||||||
|
expectedError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "unclosed quoted value",
|
||||||
|
raw: `key="value`,
|
||||||
|
expectedError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "escaped terminating dquote",
|
||||||
|
raw: `key="value\"`,
|
||||||
|
expectedError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "just a quoted value",
|
||||||
|
raw: `"key=val"`,
|
||||||
|
expectedError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "quoted key",
|
||||||
|
raw: `"key"=val`,
|
||||||
|
expectedError: true,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
parsed, rest, err := parseForwardedHeader(tc.raw)
|
||||||
|
if err != nil && !tc.expectedError {
|
||||||
|
t.Errorf("[%s] got unexpected error: %v", tc.name, err)
|
||||||
|
}
|
||||||
|
if err == nil && tc.expectedError {
|
||||||
|
t.Errorf("[%s] got unexpected non-error", tc.name)
|
||||||
|
}
|
||||||
|
if err != nil || tc.expectedError {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for key, value := range tc.expected {
|
||||||
|
v, exists := parsed[key]
|
||||||
|
if !exists {
|
||||||
|
t.Errorf("[%s] missing expected parameter %q", tc.name, key)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if v != value {
|
||||||
|
t.Errorf("[%s] got unexpected value for parameter %q: %q != %q", tc.name, key, v, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for key, value := range parsed {
|
||||||
|
if _, exists := tc.expected[key]; !exists {
|
||||||
|
t.Errorf("[%s] got unexpected key/value pair: %q=%q", tc.name, key, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rest != tc.expectedRest {
|
||||||
|
t.Errorf("[%s] got unexpected unparsed string: %q != %q", tc.name, rest, tc.expectedRest)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
54
vendor/github.com/docker/distribution/registry/api/v2/urls.go
generated
vendored
54
vendor/github.com/docker/distribution/registry/api/v2/urls.go
generated
vendored
@@ -46,30 +46,42 @@ func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) {
|
|||||||
// NewURLBuilderFromRequest uses information from an *http.Request to
|
// NewURLBuilderFromRequest uses information from an *http.Request to
|
||||||
// construct the root url.
|
// construct the root url.
|
||||||
func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder {
|
func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder {
|
||||||
var scheme string
|
var (
|
||||||
|
|
||||||
forwardedProto := r.Header.Get("X-Forwarded-Proto")
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case len(forwardedProto) > 0:
|
|
||||||
scheme = forwardedProto
|
|
||||||
case r.TLS != nil:
|
|
||||||
scheme = "https"
|
|
||||||
case len(r.URL.Scheme) > 0:
|
|
||||||
scheme = r.URL.Scheme
|
|
||||||
default:
|
|
||||||
scheme = "http"
|
scheme = "http"
|
||||||
|
host = r.Host
|
||||||
|
)
|
||||||
|
|
||||||
|
if r.TLS != nil {
|
||||||
|
scheme = "https"
|
||||||
|
} else if len(r.URL.Scheme) > 0 {
|
||||||
|
scheme = r.URL.Scheme
|
||||||
}
|
}
|
||||||
|
|
||||||
host := r.Host
|
// Handle fowarded headers
|
||||||
forwardedHost := r.Header.Get("X-Forwarded-Host")
|
// Prefer "Forwarded" header as defined by rfc7239 if given
|
||||||
if len(forwardedHost) > 0 {
|
// see https://tools.ietf.org/html/rfc7239
|
||||||
// According to the Apache mod_proxy docs, X-Forwarded-Host can be a
|
if forwarded := r.Header.Get("Forwarded"); len(forwarded) > 0 {
|
||||||
// comma-separated list of hosts, to which each proxy appends the
|
forwardedHeader, _, err := parseForwardedHeader(forwarded)
|
||||||
// requested host. We want to grab the first from this comma-separated
|
if err == nil {
|
||||||
// list.
|
if fproto := forwardedHeader["proto"]; len(fproto) > 0 {
|
||||||
hosts := strings.SplitN(forwardedHost, ",", 2)
|
scheme = fproto
|
||||||
host = strings.TrimSpace(hosts[0])
|
}
|
||||||
|
if fhost := forwardedHeader["host"]; len(fhost) > 0 {
|
||||||
|
host = fhost
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if forwardedProto := r.Header.Get("X-Forwarded-Proto"); len(forwardedProto) > 0 {
|
||||||
|
scheme = forwardedProto
|
||||||
|
}
|
||||||
|
if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 {
|
||||||
|
// According to the Apache mod_proxy docs, X-Forwarded-Host can be a
|
||||||
|
// comma-separated list of hosts, to which each proxy appends the
|
||||||
|
// requested host. We want to grab the first from this comma-separated
|
||||||
|
// list.
|
||||||
|
hosts := strings.SplitN(forwardedHost, ",", 2)
|
||||||
|
host = strings.TrimSpace(hosts[0])
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
basePath := routeDescriptorsMap[RouteNameBase].Path
|
basePath := routeDescriptorsMap[RouteNameBase].Path
|
||||||
|
|||||||
228
vendor/github.com/docker/distribution/registry/api/v2/urls_test.go
generated
vendored
228
vendor/github.com/docker/distribution/registry/api/v2/urls_test.go
generated
vendored
@@ -165,50 +165,213 @@ func TestBuilderFromRequest(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
forwardedProtoHeader := make(http.Header, 1)
|
|
||||||
forwardedProtoHeader.Set("X-Forwarded-Proto", "https")
|
|
||||||
|
|
||||||
forwardedHostHeader1 := make(http.Header, 1)
|
|
||||||
forwardedHostHeader1.Set("X-Forwarded-Host", "first.example.com")
|
|
||||||
|
|
||||||
forwardedHostHeader2 := make(http.Header, 1)
|
|
||||||
forwardedHostHeader2.Set("X-Forwarded-Host", "first.example.com, proxy1.example.com")
|
|
||||||
|
|
||||||
testRequests := []struct {
|
testRequests := []struct {
|
||||||
|
name string
|
||||||
request *http.Request
|
request *http.Request
|
||||||
base string
|
base string
|
||||||
configHost url.URL
|
configHost url.URL
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
name: "no forwarded header",
|
||||||
request: &http.Request{URL: u, Host: u.Host},
|
request: &http.Request{URL: u, Host: u.Host},
|
||||||
base: "http://example.com",
|
base: "http://example.com",
|
||||||
},
|
},
|
||||||
|
|
||||||
{
|
{
|
||||||
request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader},
|
name: "https protocol forwarded with a non-standard header",
|
||||||
base: "http://example.com",
|
request: &http.Request{URL: u, Host: u.Host, Header: http.Header{
|
||||||
|
"X-Custom-Forwarded-Proto": []string{"https"},
|
||||||
|
}},
|
||||||
|
base: "http://example.com",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader},
|
name: "forwarded protocol is the same",
|
||||||
base: "https://example.com",
|
request: &http.Request{URL: u, Host: u.Host, Header: http.Header{
|
||||||
|
"X-Forwarded-Proto": []string{"https"},
|
||||||
|
}},
|
||||||
|
base: "https://example.com",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader1},
|
name: "forwarded host with a non-standard header",
|
||||||
base: "http://first.example.com",
|
request: &http.Request{URL: u, Host: u.Host, Header: http.Header{
|
||||||
|
"X-Forwarded-Host": []string{"first.example.com"},
|
||||||
|
}},
|
||||||
|
base: "http://first.example.com",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader2},
|
name: "forwarded multiple hosts a with non-standard header",
|
||||||
base: "http://first.example.com",
|
request: &http.Request{URL: u, Host: u.Host, Header: http.Header{
|
||||||
|
"X-Forwarded-Host": []string{"first.example.com, proxy1.example.com"},
|
||||||
|
}},
|
||||||
|
base: "http://first.example.com",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader2},
|
name: "host configured in config file takes priority",
|
||||||
base: "https://third.example.com:5000",
|
request: &http.Request{URL: u, Host: u.Host, Header: http.Header{
|
||||||
|
"X-Forwarded-Host": []string{"first.example.com, proxy1.example.com"},
|
||||||
|
}},
|
||||||
|
base: "https://third.example.com:5000",
|
||||||
configHost: url.URL{
|
configHost: url.URL{
|
||||||
Scheme: "https",
|
Scheme: "https",
|
||||||
Host: "third.example.com:5000",
|
Host: "third.example.com:5000",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "forwarded host and port with just one non-standard header",
|
||||||
|
request: &http.Request{URL: u, Host: u.Host, Header: http.Header{
|
||||||
|
"X-Forwarded-Host": []string{"first.example.com:443"},
|
||||||
|
}},
|
||||||
|
base: "http://first.example.com:443",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "forwarded port with a non-standard header",
|
||||||
|
request: &http.Request{URL: u, Host: u.Host, Header: http.Header{
|
||||||
|
"X-Forwarded-Host": []string{"example.com:5000"},
|
||||||
|
"X-Forwarded-Port": []string{"5000"},
|
||||||
|
}},
|
||||||
|
base: "http://example.com:5000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "forwarded multiple ports with a non-standard header",
|
||||||
|
request: &http.Request{URL: u, Host: u.Host, Header: http.Header{
|
||||||
|
"X-Forwarded-Port": []string{"443 , 5001"},
|
||||||
|
}},
|
||||||
|
base: "http://example.com",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "forwarded standard port with non-standard headers",
|
||||||
|
request: &http.Request{URL: u, Host: u.Host, Header: http.Header{
|
||||||
|
"X-Forwarded-Proto": []string{"https"},
|
||||||
|
"X-Forwarded-Host": []string{"example.com"},
|
||||||
|
"X-Forwarded-Port": []string{"443"},
|
||||||
|
}},
|
||||||
|
base: "https://example.com",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "forwarded standard port with non-standard headers and explicit port",
|
||||||
|
request: &http.Request{URL: u, Host: u.Host + ":443", Header: http.Header{
|
||||||
|
"X-Forwarded-Proto": []string{"https"},
|
||||||
|
"X-Forwarded-Host": []string{u.Host + ":443"},
|
||||||
|
"X-Forwarded-Port": []string{"443"},
|
||||||
|
}},
|
||||||
|
base: "https://example.com:443",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "several non-standard headers",
|
||||||
|
request: &http.Request{URL: u, Host: u.Host, Header: http.Header{
|
||||||
|
"X-Forwarded-Proto": []string{"https"},
|
||||||
|
"X-Forwarded-Host": []string{" first.example.com:12345 "},
|
||||||
|
}},
|
||||||
|
base: "https://first.example.com:12345",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "forwarded host with port supplied takes priority",
|
||||||
|
request: &http.Request{URL: u, Host: u.Host, Header: http.Header{
|
||||||
|
"X-Forwarded-Host": []string{"first.example.com:5000"},
|
||||||
|
"X-Forwarded-Port": []string{"80"},
|
||||||
|
}},
|
||||||
|
base: "http://first.example.com:5000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "malformed forwarded port",
|
||||||
|
request: &http.Request{URL: u, Host: u.Host, Header: http.Header{
|
||||||
|
"X-Forwarded-Host": []string{"first.example.com"},
|
||||||
|
"X-Forwarded-Port": []string{"abcd"},
|
||||||
|
}},
|
||||||
|
base: "http://first.example.com",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "forwarded protocol and addr using standard header",
|
||||||
|
request: &http.Request{URL: u, Host: u.Host, Header: http.Header{
|
||||||
|
"Forwarded": []string{`proto=https;host="192.168.22.30:80"`},
|
||||||
|
}},
|
||||||
|
base: "https://192.168.22.30:80",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "forwarded host takes priority over for",
|
||||||
|
request: &http.Request{URL: u, Host: u.Host, Header: http.Header{
|
||||||
|
"Forwarded": []string{`host="reg.example.com:5000";for="192.168.22.30"`},
|
||||||
|
}},
|
||||||
|
base: "http://reg.example.com:5000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "forwarded host and protocol using standard header",
|
||||||
|
request: &http.Request{URL: u, Host: u.Host, Header: http.Header{
|
||||||
|
"Forwarded": []string{`host=reg.example.com;proto=https`},
|
||||||
|
}},
|
||||||
|
base: "https://reg.example.com",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "process just the first standard forwarded header",
|
||||||
|
request: &http.Request{URL: u, Host: u.Host, Header: http.Header{
|
||||||
|
"Forwarded": []string{`host="reg.example.com:88";proto=http`, `host=reg.example.com;proto=https`},
|
||||||
|
}},
|
||||||
|
base: "http://reg.example.com:88",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "process just the first list element of standard header",
|
||||||
|
request: &http.Request{URL: u, Host: u.Host, Header: http.Header{
|
||||||
|
"Forwarded": []string{`host="reg.example.com:443";proto=https, host="reg.example.com:80";proto=http`},
|
||||||
|
}},
|
||||||
|
base: "https://reg.example.com:443",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "IPv6 address use host",
|
||||||
|
request: &http.Request{URL: u, Host: u.Host, Header: http.Header{
|
||||||
|
"Forwarded": []string{`for="2607:f0d0:1002:51::4";host="[2607:f0d0:1002:51::4]:5001"`},
|
||||||
|
"X-Forwarded-Port": []string{"5002"},
|
||||||
|
}},
|
||||||
|
base: "http://[2607:f0d0:1002:51::4]:5001",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "IPv6 address with port",
|
||||||
|
request: &http.Request{URL: u, Host: u.Host, Header: http.Header{
|
||||||
|
"Forwarded": []string{`host="[2607:f0d0:1002:51::4]:4000"`},
|
||||||
|
"X-Forwarded-Port": []string{"5001"},
|
||||||
|
}},
|
||||||
|
base: "http://[2607:f0d0:1002:51::4]:4000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "non-standard and standard forward headers",
|
||||||
|
request: &http.Request{URL: u, Host: u.Host, Header: http.Header{
|
||||||
|
"X-Forwarded-Proto": []string{`https`},
|
||||||
|
"X-Forwarded-Host": []string{`first.example.com`},
|
||||||
|
"X-Forwarded-Port": []string{``},
|
||||||
|
"Forwarded": []string{`host=first.example.com; proto=https`},
|
||||||
|
}},
|
||||||
|
base: "https://first.example.com",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "standard header takes precedence over non-standard headers",
|
||||||
|
request: &http.Request{URL: u, Host: u.Host, Header: http.Header{
|
||||||
|
"X-Forwarded-Proto": []string{`http`},
|
||||||
|
"Forwarded": []string{`host=second.example.com; proto=https`},
|
||||||
|
"X-Forwarded-Host": []string{`first.example.com`},
|
||||||
|
"X-Forwarded-Port": []string{`4000`},
|
||||||
|
}},
|
||||||
|
base: "https://second.example.com",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "incomplete standard header uses default",
|
||||||
|
request: &http.Request{URL: u, Host: u.Host, Header: http.Header{
|
||||||
|
"X-Forwarded-Proto": []string{`https`},
|
||||||
|
"Forwarded": []string{`for=127.0.0.1`},
|
||||||
|
"X-Forwarded-Host": []string{`first.example.com`},
|
||||||
|
"X-Forwarded-Port": []string{`4000`},
|
||||||
|
}},
|
||||||
|
base: "http://" + u.Host,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "standard with just proto",
|
||||||
|
request: &http.Request{URL: u, Host: u.Host, Header: http.Header{
|
||||||
|
"X-Forwarded-Proto": []string{`https`},
|
||||||
|
"Forwarded": []string{`proto=https`},
|
||||||
|
"X-Forwarded-Host": []string{`first.example.com`},
|
||||||
|
"X-Forwarded-Port": []string{`4000`},
|
||||||
|
}},
|
||||||
|
base: "https://" + u.Host,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
doTest := func(relative bool) {
|
doTest := func(relative bool) {
|
||||||
for _, tr := range testRequests {
|
for _, tr := range testRequests {
|
||||||
var builder *URLBuilder
|
var builder *URLBuilder
|
||||||
@@ -221,34 +384,21 @@ func TestBuilderFromRequest(t *testing.T) {
|
|||||||
for _, testCase := range makeURLBuilderTestCases(builder) {
|
for _, testCase := range makeURLBuilderTestCases(builder) {
|
||||||
buildURL, err := testCase.build()
|
buildURL, err := testCase.build()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: error building url: %v", testCase.description, err)
|
t.Fatalf("[relative=%t, request=%q, case=%q]: error building url: %v", relative, tr.name, testCase.description, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var expectedURL string
|
expectedURL := testCase.expectedPath
|
||||||
proto, ok := tr.request.Header["X-Forwarded-Proto"]
|
if !relative {
|
||||||
if !ok {
|
expectedURL = tr.base + expectedURL
|
||||||
expectedURL = testCase.expectedPath
|
|
||||||
if !relative {
|
|
||||||
expectedURL = tr.base + expectedURL
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
urlBase, err := url.Parse(tr.base)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
urlBase.Scheme = proto[0]
|
|
||||||
expectedURL = testCase.expectedPath
|
|
||||||
if !relative {
|
|
||||||
expectedURL = urlBase.String() + expectedURL
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if buildURL != expectedURL {
|
if buildURL != expectedURL {
|
||||||
t.Fatalf("%s: %q != %q", testCase.description, buildURL, expectedURL)
|
t.Errorf("[relative=%t, request=%q, case=%q]: %q != %q", relative, tr.name, testCase.description, buildURL, expectedURL)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
doTest(true)
|
doTest(true)
|
||||||
doTest(false)
|
doTest(false)
|
||||||
}
|
}
|
||||||
|
|||||||
38
vendor/github.com/docker/distribution/registry/auth/auth.go
generated
vendored
38
vendor/github.com/docker/distribution/registry/auth/auth.go
generated
vendored
@@ -66,8 +66,9 @@ type UserInfo struct {
|
|||||||
|
|
||||||
// Resource describes a resource by type and name.
|
// Resource describes a resource by type and name.
|
||||||
type Resource struct {
|
type Resource struct {
|
||||||
Type string
|
Type string
|
||||||
Name string
|
Class string
|
||||||
|
Name string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Access describes a specific action that is
|
// Access describes a specific action that is
|
||||||
@@ -135,6 +136,39 @@ func (uic userInfoContext) Value(key interface{}) interface{} {
|
|||||||
return uic.Context.Value(key)
|
return uic.Context.Value(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithResources returns a context with the authorized resources.
|
||||||
|
func WithResources(ctx context.Context, resources []Resource) context.Context {
|
||||||
|
return resourceContext{
|
||||||
|
Context: ctx,
|
||||||
|
resources: resources,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type resourceContext struct {
|
||||||
|
context.Context
|
||||||
|
resources []Resource
|
||||||
|
}
|
||||||
|
|
||||||
|
type resourceKey struct{}
|
||||||
|
|
||||||
|
func (rc resourceContext) Value(key interface{}) interface{} {
|
||||||
|
if key == (resourceKey{}) {
|
||||||
|
return rc.resources
|
||||||
|
}
|
||||||
|
|
||||||
|
return rc.Context.Value(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthorizedResources returns the list of resources which have
|
||||||
|
// been authorized for this request.
|
||||||
|
func AuthorizedResources(ctx context.Context) []Resource {
|
||||||
|
if resources, ok := ctx.Value(resourceKey{}).([]Resource); ok {
|
||||||
|
return resources
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// InitFunc is the type of an AccessController factory function and is used
|
// InitFunc is the type of an AccessController factory function and is used
|
||||||
// to register the constructor for different AccesController backends.
|
// to register the constructor for different AccesController backends.
|
||||||
type InitFunc func(options map[string]interface{}) (AccessController, error)
|
type InitFunc func(options map[string]interface{}) (AccessController, error)
|
||||||
|
|||||||
2
vendor/github.com/docker/distribution/registry/auth/silly/access_test.go
generated
vendored
2
vendor/github.com/docker/distribution/registry/auth/silly/access_test.go
generated
vendored
@@ -16,7 +16,7 @@ func TestSillyAccessController(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.WithValue(nil, "http.request", r)
|
ctx := context.WithRequest(context.Background(), r)
|
||||||
authCtx, err := ac.Authorized(ctx)
|
authCtx, err := ac.Authorized(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
switch err := err.(type) {
|
switch err := err.(type) {
|
||||||
|
|||||||
2
vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go
generated
vendored
2
vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go
generated
vendored
@@ -261,6 +261,8 @@ func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth.
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx = auth.WithResources(ctx, token.resources())
|
||||||
|
|
||||||
return auth.WithUser(ctx, auth.UserInfo{Name: token.Claims.Subject}), nil
|
return auth.WithUser(ctx, auth.UserInfo{Name: token.Claims.Subject}), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
24
vendor/github.com/docker/distribution/registry/auth/token/token.go
generated
vendored
24
vendor/github.com/docker/distribution/registry/auth/token/token.go
generated
vendored
@@ -34,6 +34,7 @@ var (
|
|||||||
// ResourceActions stores allowed actions on a named and typed resource.
|
// ResourceActions stores allowed actions on a named and typed resource.
|
||||||
type ResourceActions struct {
|
type ResourceActions struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
|
Class string `json:"class,omitempty"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Actions []string `json:"actions"`
|
Actions []string `json:"actions"`
|
||||||
}
|
}
|
||||||
@@ -349,6 +350,29 @@ func (t *Token) accessSet() accessSet {
|
|||||||
return accessSet
|
return accessSet
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *Token) resources() []auth.Resource {
|
||||||
|
if t.Claims == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
resourceSet := map[auth.Resource]struct{}{}
|
||||||
|
for _, resourceActions := range t.Claims.Access {
|
||||||
|
resource := auth.Resource{
|
||||||
|
Type: resourceActions.Type,
|
||||||
|
Class: resourceActions.Class,
|
||||||
|
Name: resourceActions.Name,
|
||||||
|
}
|
||||||
|
resourceSet[resource] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
resources := make([]auth.Resource, 0, len(resourceSet))
|
||||||
|
for resource := range resourceSet {
|
||||||
|
resources = append(resources, resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
return resources
|
||||||
|
}
|
||||||
|
|
||||||
func (t *Token) compactRaw() string {
|
func (t *Token) compactRaw() string {
|
||||||
return fmt.Sprintf("%s.%s", t.Raw, joseBase64UrlEncode(t.Signature))
|
return fmt.Sprintf("%s.%s", t.Raw, joseBase64UrlEncode(t.Signature))
|
||||||
}
|
}
|
||||||
|
|||||||
2
vendor/github.com/docker/distribution/registry/auth/token/token_test.go
generated
vendored
2
vendor/github.com/docker/distribution/registry/auth/token/token_test.go
generated
vendored
@@ -354,7 +354,7 @@ func TestAccessController(t *testing.T) {
|
|||||||
Action: "baz",
|
Action: "baz",
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.WithValue(nil, "http.request", req)
|
ctx := context.WithRequest(context.Background(), req)
|
||||||
authCtx, err := accessController.Authorized(ctx, testAccess)
|
authCtx, err := accessController.Authorized(ctx, testAccess)
|
||||||
challenge, ok := err.(auth.Challenge)
|
challenge, ok := err.(auth.Challenge)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
package auth
|
package challenge
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/url"
|
"net/url"
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package auth
|
package challenge
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -18,12 +18,12 @@ type Challenge struct {
|
|||||||
Parameters map[string]string
|
Parameters map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChallengeManager manages the challenges for endpoints.
|
// Manager manages the challenges for endpoints.
|
||||||
// The challenges are pulled out of HTTP responses. Only
|
// The challenges are pulled out of HTTP responses. Only
|
||||||
// responses which expect challenges should be added to
|
// responses which expect challenges should be added to
|
||||||
// the manager, since a non-unauthorized request will be
|
// the manager, since a non-unauthorized request will be
|
||||||
// viewed as not requiring challenges.
|
// viewed as not requiring challenges.
|
||||||
type ChallengeManager interface {
|
type Manager interface {
|
||||||
// GetChallenges returns the challenges for the given
|
// GetChallenges returns the challenges for the given
|
||||||
// endpoint URL.
|
// endpoint URL.
|
||||||
GetChallenges(endpoint url.URL) ([]Challenge, error)
|
GetChallenges(endpoint url.URL) ([]Challenge, error)
|
||||||
@@ -37,19 +37,19 @@ type ChallengeManager interface {
|
|||||||
AddResponse(resp *http.Response) error
|
AddResponse(resp *http.Response) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSimpleChallengeManager returns an instance of
|
// NewSimpleManager returns an instance of
|
||||||
// ChallengeManger which only maps endpoints to challenges
|
// Manger which only maps endpoints to challenges
|
||||||
// based on the responses which have been added the
|
// based on the responses which have been added the
|
||||||
// manager. The simple manager will make no attempt to
|
// manager. The simple manager will make no attempt to
|
||||||
// perform requests on the endpoints or cache the responses
|
// perform requests on the endpoints or cache the responses
|
||||||
// to a backend.
|
// to a backend.
|
||||||
func NewSimpleChallengeManager() ChallengeManager {
|
func NewSimpleManager() Manager {
|
||||||
return &simpleChallengeManager{
|
return &simpleManager{
|
||||||
Challanges: make(map[string][]Challenge),
|
Challanges: make(map[string][]Challenge),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type simpleChallengeManager struct {
|
type simpleManager struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
Challanges map[string][]Challenge
|
Challanges map[string][]Challenge
|
||||||
}
|
}
|
||||||
@@ -59,7 +59,7 @@ func normalizeURL(endpoint *url.URL) {
|
|||||||
endpoint.Host = canonicalAddr(endpoint)
|
endpoint.Host = canonicalAddr(endpoint)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *simpleChallengeManager) GetChallenges(endpoint url.URL) ([]Challenge, error) {
|
func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) {
|
||||||
normalizeURL(&endpoint)
|
normalizeURL(&endpoint)
|
||||||
|
|
||||||
m.RLock()
|
m.RLock()
|
||||||
@@ -68,7 +68,7 @@ func (m *simpleChallengeManager) GetChallenges(endpoint url.URL) ([]Challenge, e
|
|||||||
return challenges, nil
|
return challenges, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *simpleChallengeManager) AddResponse(resp *http.Response) error {
|
func (m *simpleManager) AddResponse(resp *http.Response) error {
|
||||||
challenges := ResponseChallenges(resp)
|
challenges := ResponseChallenges(resp)
|
||||||
if resp.Request == nil {
|
if resp.Request == nil {
|
||||||
return fmt.Errorf("missing request reference")
|
return fmt.Errorf("missing request reference")
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package auth
|
package challenge
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -50,7 +50,7 @@ func TestAuthChallengeNormalization(t *testing.T) {
|
|||||||
|
|
||||||
func testAuthChallengeNormalization(t *testing.T, host string) {
|
func testAuthChallengeNormalization(t *testing.T, host string) {
|
||||||
|
|
||||||
scm := NewSimpleChallengeManager()
|
scm := NewSimpleManager()
|
||||||
|
|
||||||
url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host))
|
url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -86,7 +86,7 @@ func testAuthChallengeNormalization(t *testing.T, host string) {
|
|||||||
|
|
||||||
func testAuthChallengeConcurrent(t *testing.T, host string) {
|
func testAuthChallengeConcurrent(t *testing.T, host string) {
|
||||||
|
|
||||||
scm := NewSimpleChallengeManager()
|
scm := NewSimpleManager()
|
||||||
|
|
||||||
url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host))
|
url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
18
vendor/github.com/docker/distribution/registry/client/auth/session.go
generated
vendored
18
vendor/github.com/docker/distribution/registry/client/auth/session.go
generated
vendored
@@ -12,6 +12,7 @@ import (
|
|||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
"github.com/docker/distribution/registry/client"
|
"github.com/docker/distribution/registry/client"
|
||||||
|
"github.com/docker/distribution/registry/client/auth/challenge"
|
||||||
"github.com/docker/distribution/registry/client/transport"
|
"github.com/docker/distribution/registry/client/transport"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -58,7 +59,7 @@ type CredentialStore interface {
|
|||||||
// schemes. The handlers are tried in order, the higher priority authentication
|
// schemes. The handlers are tried in order, the higher priority authentication
|
||||||
// methods should be first. The challengeMap holds a list of challenges for
|
// methods should be first. The challengeMap holds a list of challenges for
|
||||||
// a given root API endpoint (for example "https://registry-1.docker.io/v2/").
|
// a given root API endpoint (for example "https://registry-1.docker.io/v2/").
|
||||||
func NewAuthorizer(manager ChallengeManager, handlers ...AuthenticationHandler) transport.RequestModifier {
|
func NewAuthorizer(manager challenge.Manager, handlers ...AuthenticationHandler) transport.RequestModifier {
|
||||||
return &endpointAuthorizer{
|
return &endpointAuthorizer{
|
||||||
challenges: manager,
|
challenges: manager,
|
||||||
handlers: handlers,
|
handlers: handlers,
|
||||||
@@ -66,7 +67,7 @@ func NewAuthorizer(manager ChallengeManager, handlers ...AuthenticationHandler)
|
|||||||
}
|
}
|
||||||
|
|
||||||
type endpointAuthorizer struct {
|
type endpointAuthorizer struct {
|
||||||
challenges ChallengeManager
|
challenges challenge.Manager
|
||||||
handlers []AuthenticationHandler
|
handlers []AuthenticationHandler
|
||||||
transport http.RoundTripper
|
transport http.RoundTripper
|
||||||
}
|
}
|
||||||
@@ -94,11 +95,11 @@ func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error {
|
|||||||
|
|
||||||
if len(challenges) > 0 {
|
if len(challenges) > 0 {
|
||||||
for _, handler := range ea.handlers {
|
for _, handler := range ea.handlers {
|
||||||
for _, challenge := range challenges {
|
for _, c := range challenges {
|
||||||
if challenge.Scheme != handler.Scheme() {
|
if c.Scheme != handler.Scheme() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := handler.AuthorizeRequest(req, challenge.Parameters); err != nil {
|
if err := handler.AuthorizeRequest(req, c.Parameters); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -146,13 +147,18 @@ type Scope interface {
|
|||||||
// to a repository.
|
// to a repository.
|
||||||
type RepositoryScope struct {
|
type RepositoryScope struct {
|
||||||
Repository string
|
Repository string
|
||||||
|
Class string
|
||||||
Actions []string
|
Actions []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns the string representation of the repository
|
// String returns the string representation of the repository
|
||||||
// using the scope grammar
|
// using the scope grammar
|
||||||
func (rs RepositoryScope) String() string {
|
func (rs RepositoryScope) String() string {
|
||||||
return fmt.Sprintf("repository:%s:%s", rs.Repository, strings.Join(rs.Actions, ","))
|
repoType := "repository"
|
||||||
|
if rs.Class != "" {
|
||||||
|
repoType = fmt.Sprintf("%s(%s)", repoType, rs.Class)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s:%s:%s", repoType, rs.Repository, strings.Join(rs.Actions, ","))
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegistryScope represents a token scope for access
|
// RegistryScope represents a token scope for access
|
||||||
|
|||||||
23
vendor/github.com/docker/distribution/registry/client/auth/session_test.go
generated
vendored
23
vendor/github.com/docker/distribution/registry/client/auth/session_test.go
generated
vendored
@@ -9,6 +9,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/distribution/registry/client/auth/challenge"
|
||||||
"github.com/docker/distribution/registry/client/transport"
|
"github.com/docker/distribution/registry/client/transport"
|
||||||
"github.com/docker/distribution/testutil"
|
"github.com/docker/distribution/testutil"
|
||||||
)
|
)
|
||||||
@@ -65,7 +66,7 @@ func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, au
|
|||||||
|
|
||||||
// ping pings the provided endpoint to determine its required authorization challenges.
|
// ping pings the provided endpoint to determine its required authorization challenges.
|
||||||
// If a version header is provided, the versions will be returned.
|
// If a version header is provided, the versions will be returned.
|
||||||
func ping(manager ChallengeManager, endpoint, versionHeader string) ([]APIVersion, error) {
|
func ping(manager challenge.Manager, endpoint, versionHeader string) ([]APIVersion, error) {
|
||||||
resp, err := http.Get(endpoint)
|
resp, err := http.Get(endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -149,7 +150,7 @@ func TestEndpointAuthorizeToken(t *testing.T) {
|
|||||||
e, c := testServerWithAuth(m, authenicate, validCheck)
|
e, c := testServerWithAuth(m, authenicate, validCheck)
|
||||||
defer c()
|
defer c()
|
||||||
|
|
||||||
challengeManager1 := NewSimpleChallengeManager()
|
challengeManager1 := challenge.NewSimpleManager()
|
||||||
versions, err := ping(challengeManager1, e+"/v2/", "x-api-version")
|
versions, err := ping(challengeManager1, e+"/v2/", "x-api-version")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -176,7 +177,7 @@ func TestEndpointAuthorizeToken(t *testing.T) {
|
|||||||
e2, c2 := testServerWithAuth(m, authenicate, validCheck)
|
e2, c2 := testServerWithAuth(m, authenicate, validCheck)
|
||||||
defer c2()
|
defer c2()
|
||||||
|
|
||||||
challengeManager2 := NewSimpleChallengeManager()
|
challengeManager2 := challenge.NewSimpleManager()
|
||||||
versions, err = ping(challengeManager2, e2+"/v2/", "x-multi-api-version")
|
versions, err = ping(challengeManager2, e2+"/v2/", "x-multi-api-version")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -273,7 +274,7 @@ func TestEndpointAuthorizeRefreshToken(t *testing.T) {
|
|||||||
e, c := testServerWithAuth(m, authenicate, validCheck)
|
e, c := testServerWithAuth(m, authenicate, validCheck)
|
||||||
defer c()
|
defer c()
|
||||||
|
|
||||||
challengeManager1 := NewSimpleChallengeManager()
|
challengeManager1 := challenge.NewSimpleManager()
|
||||||
versions, err := ping(challengeManager1, e+"/v2/", "x-api-version")
|
versions, err := ping(challengeManager1, e+"/v2/", "x-api-version")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -306,7 +307,7 @@ func TestEndpointAuthorizeRefreshToken(t *testing.T) {
|
|||||||
e2, c2 := testServerWithAuth(m, authenicate, validCheck)
|
e2, c2 := testServerWithAuth(m, authenicate, validCheck)
|
||||||
defer c2()
|
defer c2()
|
||||||
|
|
||||||
challengeManager2 := NewSimpleChallengeManager()
|
challengeManager2 := challenge.NewSimpleManager()
|
||||||
versions, err = ping(challengeManager2, e2+"/v2/", "x-api-version")
|
versions, err = ping(challengeManager2, e2+"/v2/", "x-api-version")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -339,7 +340,7 @@ func TestEndpointAuthorizeRefreshToken(t *testing.T) {
|
|||||||
e3, c3 := testServerWithAuth(m, authenicate, validCheck)
|
e3, c3 := testServerWithAuth(m, authenicate, validCheck)
|
||||||
defer c3()
|
defer c3()
|
||||||
|
|
||||||
challengeManager3 := NewSimpleChallengeManager()
|
challengeManager3 := challenge.NewSimpleManager()
|
||||||
versions, err = ping(challengeManager3, e3+"/v2/", "x-api-version")
|
versions, err = ping(challengeManager3, e3+"/v2/", "x-api-version")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -401,7 +402,7 @@ func TestEndpointAuthorizeV2RefreshToken(t *testing.T) {
|
|||||||
e, c := testServerWithAuth(m, authenicate, validCheck)
|
e, c := testServerWithAuth(m, authenicate, validCheck)
|
||||||
defer c()
|
defer c()
|
||||||
|
|
||||||
challengeManager1 := NewSimpleChallengeManager()
|
challengeManager1 := challenge.NewSimpleManager()
|
||||||
versions, err := ping(challengeManager1, e+"/v2/", "x-api-version")
|
versions, err := ping(challengeManager1, e+"/v2/", "x-api-version")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -496,7 +497,7 @@ func TestEndpointAuthorizeTokenBasic(t *testing.T) {
|
|||||||
password: password,
|
password: password,
|
||||||
}
|
}
|
||||||
|
|
||||||
challengeManager := NewSimpleChallengeManager()
|
challengeManager := challenge.NewSimpleManager()
|
||||||
_, err := ping(challengeManager, e+"/v2/", "")
|
_, err := ping(challengeManager, e+"/v2/", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -614,7 +615,7 @@ func TestEndpointAuthorizeTokenBasicWithExpiresIn(t *testing.T) {
|
|||||||
password: password,
|
password: password,
|
||||||
}
|
}
|
||||||
|
|
||||||
challengeManager := NewSimpleChallengeManager()
|
challengeManager := challenge.NewSimpleManager()
|
||||||
_, err := ping(challengeManager, e+"/v2/", "")
|
_, err := ping(challengeManager, e+"/v2/", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -765,7 +766,7 @@ func TestEndpointAuthorizeTokenBasicWithExpiresInAndIssuedAt(t *testing.T) {
|
|||||||
password: password,
|
password: password,
|
||||||
}
|
}
|
||||||
|
|
||||||
challengeManager := NewSimpleChallengeManager()
|
challengeManager := challenge.NewSimpleManager()
|
||||||
_, err := ping(challengeManager, e+"/v2/", "")
|
_, err := ping(challengeManager, e+"/v2/", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -845,7 +846,7 @@ func TestEndpointAuthorizeBasic(t *testing.T) {
|
|||||||
password: password,
|
password: password,
|
||||||
}
|
}
|
||||||
|
|
||||||
challengeManager := NewSimpleChallengeManager()
|
challengeManager := challenge.NewSimpleManager()
|
||||||
_, err := ping(challengeManager, e+"/v2/", "")
|
_, err := ping(challengeManager, e+"/v2/", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
|||||||
42
vendor/github.com/docker/distribution/registry/client/errors.go
generated
vendored
42
vendor/github.com/docker/distribution/registry/client/errors.go
generated
vendored
@@ -9,6 +9,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/docker/distribution/registry/api/errcode"
|
"github.com/docker/distribution/registry/api/errcode"
|
||||||
|
"github.com/docker/distribution/registry/client/auth/challenge"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty
|
// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty
|
||||||
@@ -82,21 +83,52 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
|
|||||||
return errors
|
return errors
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func makeErrorList(err error) []error {
|
||||||
|
if errL, ok := err.(errcode.Errors); ok {
|
||||||
|
return []error(errL)
|
||||||
|
}
|
||||||
|
return []error{err}
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeErrors(err1, err2 error) error {
|
||||||
|
return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...))
|
||||||
|
}
|
||||||
|
|
||||||
// HandleErrorResponse returns error parsed from HTTP response for an
|
// HandleErrorResponse returns error parsed from HTTP response for an
|
||||||
// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An
|
// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An
|
||||||
// UnexpectedHTTPStatusError returned for response code outside of expected
|
// UnexpectedHTTPStatusError returned for response code outside of expected
|
||||||
// range.
|
// range.
|
||||||
func HandleErrorResponse(resp *http.Response) error {
|
func HandleErrorResponse(resp *http.Response) error {
|
||||||
if resp.StatusCode == 401 {
|
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
|
||||||
|
// Check for OAuth errors within the `WWW-Authenticate` header first
|
||||||
|
// See https://tools.ietf.org/html/rfc6750#section-3
|
||||||
|
for _, c := range challenge.ResponseChallenges(resp) {
|
||||||
|
if c.Scheme == "bearer" {
|
||||||
|
var err errcode.Error
|
||||||
|
// codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
|
||||||
|
switch c.Parameters["error"] {
|
||||||
|
case "invalid_token":
|
||||||
|
err.Code = errcode.ErrorCodeUnauthorized
|
||||||
|
case "insufficient_scope":
|
||||||
|
err.Code = errcode.ErrorCodeDenied
|
||||||
|
default:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if description := c.Parameters["error_description"]; description != "" {
|
||||||
|
err.Message = description
|
||||||
|
} else {
|
||||||
|
err.Message = err.Code.Message()
|
||||||
|
}
|
||||||
|
|
||||||
|
return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body))
|
||||||
|
}
|
||||||
|
}
|
||||||
err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
|
err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
|
||||||
if uErr, ok := err.(*UnexpectedHTTPResponseError); ok {
|
if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
|
||||||
return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
|
return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
|
|
||||||
return parseHTTPErrorResponse(resp.StatusCode, resp.Body)
|
|
||||||
}
|
|
||||||
return &UnexpectedHTTPStatusError{Status: resp.Status}
|
return &UnexpectedHTTPStatusError{Status: resp.Status}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
1
vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
generated
vendored
1
vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
generated
vendored
@@ -181,6 +181,7 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) {
|
|||||||
// context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range"))
|
// context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
req.Header.Add("Accept-Encoding", "identity")
|
||||||
resp, err := hrs.client.Do(req)
|
resp, err := hrs.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
40
vendor/github.com/docker/distribution/registry/handlers/app.go
generated
vendored
40
vendor/github.com/docker/distribution/registry/handlers/app.go
generated
vendored
@@ -341,7 +341,7 @@ func (app *App) RegisterHealthChecks(healthRegistries ...*health.Registry) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
storageDriverCheck := func() error {
|
storageDriverCheck := func() error {
|
||||||
_, err := app.driver.List(app, "/") // "/" should always exist
|
_, err := app.driver.Stat(app, "/") // "/" should always exist
|
||||||
return err // any error will be treated as failure
|
return err // any error will be treated as failure
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -461,6 +461,8 @@ func (app *App) configureEvents(configuration *configuration.Configuration) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type redisStartAtKey struct{}
|
||||||
|
|
||||||
func (app *App) configureRedis(configuration *configuration.Configuration) {
|
func (app *App) configureRedis(configuration *configuration.Configuration) {
|
||||||
if configuration.Redis.Addr == "" {
|
if configuration.Redis.Addr == "" {
|
||||||
ctxu.GetLogger(app).Infof("redis not configured")
|
ctxu.GetLogger(app).Infof("redis not configured")
|
||||||
@@ -470,11 +472,11 @@ func (app *App) configureRedis(configuration *configuration.Configuration) {
|
|||||||
pool := &redis.Pool{
|
pool := &redis.Pool{
|
||||||
Dial: func() (redis.Conn, error) {
|
Dial: func() (redis.Conn, error) {
|
||||||
// TODO(stevvooe): Yet another use case for contextual timing.
|
// TODO(stevvooe): Yet another use case for contextual timing.
|
||||||
ctx := context.WithValue(app, "redis.connect.startedat", time.Now())
|
ctx := context.WithValue(app, redisStartAtKey{}, time.Now())
|
||||||
|
|
||||||
done := func(err error) {
|
done := func(err error) {
|
||||||
logger := ctxu.GetLoggerWithField(ctx, "redis.connect.duration",
|
logger := ctxu.GetLoggerWithField(ctx, "redis.connect.duration",
|
||||||
ctxu.Since(ctx, "redis.connect.startedat"))
|
ctxu.Since(ctx, redisStartAtKey{}))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("redis: error connecting: %v", err)
|
logger.Errorf("redis: error connecting: %v", err)
|
||||||
} else {
|
} else {
|
||||||
@@ -707,6 +709,18 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type errCodeKey struct{}
|
||||||
|
|
||||||
|
func (errCodeKey) String() string { return "err.code" }
|
||||||
|
|
||||||
|
type errMessageKey struct{}
|
||||||
|
|
||||||
|
func (errMessageKey) String() string { return "err.message" }
|
||||||
|
|
||||||
|
type errDetailKey struct{}
|
||||||
|
|
||||||
|
func (errDetailKey) String() string { return "err.detail" }
|
||||||
|
|
||||||
func (app *App) logError(context context.Context, errors errcode.Errors) {
|
func (app *App) logError(context context.Context, errors errcode.Errors) {
|
||||||
for _, e1 := range errors {
|
for _, e1 := range errors {
|
||||||
var c ctxu.Context
|
var c ctxu.Context
|
||||||
@@ -714,23 +728,23 @@ func (app *App) logError(context context.Context, errors errcode.Errors) {
|
|||||||
switch e1.(type) {
|
switch e1.(type) {
|
||||||
case errcode.Error:
|
case errcode.Error:
|
||||||
e, _ := e1.(errcode.Error)
|
e, _ := e1.(errcode.Error)
|
||||||
c = ctxu.WithValue(context, "err.code", e.Code)
|
c = ctxu.WithValue(context, errCodeKey{}, e.Code)
|
||||||
c = ctxu.WithValue(c, "err.message", e.Code.Message())
|
c = ctxu.WithValue(c, errMessageKey{}, e.Code.Message())
|
||||||
c = ctxu.WithValue(c, "err.detail", e.Detail)
|
c = ctxu.WithValue(c, errDetailKey{}, e.Detail)
|
||||||
case errcode.ErrorCode:
|
case errcode.ErrorCode:
|
||||||
e, _ := e1.(errcode.ErrorCode)
|
e, _ := e1.(errcode.ErrorCode)
|
||||||
c = ctxu.WithValue(context, "err.code", e)
|
c = ctxu.WithValue(context, errCodeKey{}, e)
|
||||||
c = ctxu.WithValue(c, "err.message", e.Message())
|
c = ctxu.WithValue(c, errMessageKey{}, e.Message())
|
||||||
default:
|
default:
|
||||||
// just normal go 'error'
|
// just normal go 'error'
|
||||||
c = ctxu.WithValue(context, "err.code", errcode.ErrorCodeUnknown)
|
c = ctxu.WithValue(context, errCodeKey{}, errcode.ErrorCodeUnknown)
|
||||||
c = ctxu.WithValue(c, "err.message", e1.Error())
|
c = ctxu.WithValue(c, errMessageKey{}, e1.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
c = ctxu.WithLogger(c, ctxu.GetLogger(c,
|
c = ctxu.WithLogger(c, ctxu.GetLogger(c,
|
||||||
"err.code",
|
errCodeKey{},
|
||||||
"err.message",
|
errMessageKey{},
|
||||||
"err.detail"))
|
errDetailKey{}))
|
||||||
ctxu.GetResponseLogger(c).Errorf("response completed with error")
|
ctxu.GetResponseLogger(c).Errorf("response completed with error")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
76
vendor/github.com/docker/distribution/registry/handlers/images.go
generated
vendored
76
vendor/github.com/docker/distribution/registry/handlers/images.go
generated
vendored
@@ -15,6 +15,7 @@ import (
|
|||||||
"github.com/docker/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
"github.com/docker/distribution/registry/api/errcode"
|
"github.com/docker/distribution/registry/api/errcode"
|
||||||
"github.com/docker/distribution/registry/api/v2"
|
"github.com/docker/distribution/registry/api/v2"
|
||||||
|
"github.com/docker/distribution/registry/auth"
|
||||||
"github.com/gorilla/handlers"
|
"github.com/gorilla/handlers"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -205,7 +206,7 @@ func (imh *imageManifestHandler) convertSchema2Manifest(schema2Manifest *schema2
|
|||||||
}
|
}
|
||||||
|
|
||||||
builder := schema1.NewConfigManifestBuilder(imh.Repository.Blobs(imh), imh.Context.App.trustKey, ref, configJSON)
|
builder := schema1.NewConfigManifestBuilder(imh.Repository.Blobs(imh), imh.Context.App.trustKey, ref, configJSON)
|
||||||
for _, d := range schema2Manifest.References() {
|
for _, d := range schema2Manifest.Layers {
|
||||||
if err := builder.AppendReference(d); err != nil {
|
if err := builder.AppendReference(d); err != nil {
|
||||||
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err))
|
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -269,6 +270,12 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http
|
|||||||
if imh.Tag != "" {
|
if imh.Tag != "" {
|
||||||
options = append(options, distribution.WithTag(imh.Tag))
|
options = append(options, distribution.WithTag(imh.Tag))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := imh.applyResourcePolicy(manifest); err != nil {
|
||||||
|
imh.Errors = append(imh.Errors, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
_, err = manifests.Put(imh, manifest, options...)
|
_, err = manifests.Put(imh, manifest, options...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO(stevvooe): These error handling switches really need to be
|
// TODO(stevvooe): These error handling switches really need to be
|
||||||
@@ -339,6 +346,73 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http
|
|||||||
w.WriteHeader(http.StatusCreated)
|
w.WriteHeader(http.StatusCreated)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// applyResourcePolicy checks whether the resource class matches what has
|
||||||
|
// been authorized and allowed by the policy configuration.
|
||||||
|
func (imh *imageManifestHandler) applyResourcePolicy(manifest distribution.Manifest) error {
|
||||||
|
allowedClasses := imh.App.Config.Policy.Repository.Classes
|
||||||
|
if len(allowedClasses) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var class string
|
||||||
|
switch m := manifest.(type) {
|
||||||
|
case *schema1.SignedManifest:
|
||||||
|
class = "image"
|
||||||
|
case *schema2.DeserializedManifest:
|
||||||
|
switch m.Config.MediaType {
|
||||||
|
case schema2.MediaTypeConfig:
|
||||||
|
class = "image"
|
||||||
|
case schema2.MediaTypePluginConfig:
|
||||||
|
class = "plugin"
|
||||||
|
default:
|
||||||
|
message := fmt.Sprintf("unknown manifest class for %s", m.Config.MediaType)
|
||||||
|
return errcode.ErrorCodeDenied.WithMessage(message)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if class == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check to see if class is allowed in registry
|
||||||
|
var allowedClass bool
|
||||||
|
for _, c := range allowedClasses {
|
||||||
|
if class == c {
|
||||||
|
allowedClass = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !allowedClass {
|
||||||
|
message := fmt.Sprintf("registry does not allow %s manifest", class)
|
||||||
|
return errcode.ErrorCodeDenied.WithMessage(message)
|
||||||
|
}
|
||||||
|
|
||||||
|
resources := auth.AuthorizedResources(imh)
|
||||||
|
n := imh.Repository.Named().Name()
|
||||||
|
|
||||||
|
var foundResource bool
|
||||||
|
for _, r := range resources {
|
||||||
|
if r.Name == n {
|
||||||
|
if r.Class == "" {
|
||||||
|
r.Class = "image"
|
||||||
|
}
|
||||||
|
if r.Class == class {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
foundResource = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// resource was found but no matching class was found
|
||||||
|
if foundResource {
|
||||||
|
message := fmt.Sprintf("repository not authorized for %s manifest", class)
|
||||||
|
return errcode.ErrorCodeDenied.WithMessage(message)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
// DeleteImageManifest removes the manifest with the given digest from the registry.
|
// DeleteImageManifest removes the manifest with the given digest from the registry.
|
||||||
func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *http.Request) {
|
func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *http.Request) {
|
||||||
ctxu.GetLogger(imh).Debug("DeleteImageManifest")
|
ctxu.GetLogger(imh).Debug("DeleteImageManifest")
|
||||||
|
|||||||
41
vendor/github.com/docker/distribution/registry/proxy/proxyauth.go
generated
vendored
41
vendor/github.com/docker/distribution/registry/proxy/proxyauth.go
generated
vendored
@@ -3,11 +3,13 @@ package proxy
|
|||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/distribution/context"
|
||||||
"github.com/docker/distribution/registry/client/auth"
|
"github.com/docker/distribution/registry/client/auth"
|
||||||
|
"github.com/docker/distribution/registry/client/auth/challenge"
|
||||||
)
|
)
|
||||||
|
|
||||||
const tokenURL = "https://auth.docker.io/token"
|
|
||||||
const challengeHeader = "Docker-Distribution-Api-Version"
|
const challengeHeader = "Docker-Distribution-Api-Version"
|
||||||
|
|
||||||
type userpass struct {
|
type userpass struct {
|
||||||
@@ -33,17 +35,44 @@ func (c credentials) SetRefreshToken(u *url.URL, service, token string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// configureAuth stores credentials for challenge responses
|
// configureAuth stores credentials for challenge responses
|
||||||
func configureAuth(username, password string) (auth.CredentialStore, error) {
|
func configureAuth(username, password, remoteURL string) (auth.CredentialStore, error) {
|
||||||
creds := map[string]userpass{
|
creds := map[string]userpass{}
|
||||||
tokenURL: {
|
|
||||||
|
authURLs, err := getAuthURLs(remoteURL)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, url := range authURLs {
|
||||||
|
context.GetLogger(context.Background()).Infof("Discovered token authentication URL: %s", url)
|
||||||
|
creds[url] = userpass{
|
||||||
username: username,
|
username: username,
|
||||||
password: password,
|
password: password,
|
||||||
},
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return credentials{creds: creds}, nil
|
return credentials{creds: creds}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ping(manager auth.ChallengeManager, endpoint, versionHeader string) error {
|
func getAuthURLs(remoteURL string) ([]string, error) {
|
||||||
|
authURLs := []string{}
|
||||||
|
|
||||||
|
resp, err := http.Get(remoteURL + "/v2/")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
for _, c := range challenge.ResponseChallenges(resp) {
|
||||||
|
if strings.EqualFold(c.Scheme, "bearer") {
|
||||||
|
authURLs = append(authURLs, c.Parameters["realm"])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return authURLs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ping(manager challenge.Manager, endpoint, versionHeader string) error {
|
||||||
resp, err := http.Get(endpoint)
|
resp, err := http.Get(endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
7
vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go
generated
vendored
7
vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go
generated
vendored
@@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/docker/distribution/manifest/schema1"
|
"github.com/docker/distribution/manifest/schema1"
|
||||||
"github.com/docker/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
"github.com/docker/distribution/registry/client/auth"
|
"github.com/docker/distribution/registry/client/auth"
|
||||||
|
"github.com/docker/distribution/registry/client/auth/challenge"
|
||||||
"github.com/docker/distribution/registry/proxy/scheduler"
|
"github.com/docker/distribution/registry/proxy/scheduler"
|
||||||
"github.com/docker/distribution/registry/storage"
|
"github.com/docker/distribution/registry/storage"
|
||||||
"github.com/docker/distribution/registry/storage/cache/memory"
|
"github.com/docker/distribution/registry/storage/cache/memory"
|
||||||
@@ -77,7 +78,7 @@ func (m *mockChallenger) credentialStore() auth.CredentialStore {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockChallenger) challengeManager() auth.ChallengeManager {
|
func (m *mockChallenger) challengeManager() challenge.Manager {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -111,7 +112,7 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE
|
|||||||
stats: make(map[string]int),
|
stats: make(map[string]int),
|
||||||
}
|
}
|
||||||
|
|
||||||
manifestDigest, err := populateRepo(t, ctx, truthRepo, name, tag)
|
manifestDigest, err := populateRepo(ctx, t, truthRepo, name, tag)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(err.Error())
|
t.Fatalf(err.Error())
|
||||||
}
|
}
|
||||||
@@ -148,7 +149,7 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func populateRepo(t *testing.T, ctx context.Context, repository distribution.Repository, name, tag string) (digest.Digest, error) {
|
func populateRepo(ctx context.Context, t *testing.T, repository distribution.Repository, name, tag string) (digest.Digest, error) {
|
||||||
m := schema1.Manifest{
|
m := schema1.Manifest{
|
||||||
Versioned: manifest.Versioned{
|
Versioned: manifest.Versioned{
|
||||||
SchemaVersion: 1,
|
SchemaVersion: 1,
|
||||||
|
|||||||
11
vendor/github.com/docker/distribution/registry/proxy/proxyregistry.go
generated
vendored
11
vendor/github.com/docker/distribution/registry/proxy/proxyregistry.go
generated
vendored
@@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/docker/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
"github.com/docker/distribution/registry/client"
|
"github.com/docker/distribution/registry/client"
|
||||||
"github.com/docker/distribution/registry/client/auth"
|
"github.com/docker/distribution/registry/client/auth"
|
||||||
|
"github.com/docker/distribution/registry/client/auth/challenge"
|
||||||
"github.com/docker/distribution/registry/client/transport"
|
"github.com/docker/distribution/registry/client/transport"
|
||||||
"github.com/docker/distribution/registry/proxy/scheduler"
|
"github.com/docker/distribution/registry/proxy/scheduler"
|
||||||
"github.com/docker/distribution/registry/storage"
|
"github.com/docker/distribution/registry/storage"
|
||||||
@@ -91,7 +92,7 @@ func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Name
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
cs, err := configureAuth(config.Username, config.Password)
|
cs, err := configureAuth(config.Username, config.Password, config.RemoteURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -102,7 +103,7 @@ func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Name
|
|||||||
remoteURL: *remoteURL,
|
remoteURL: *remoteURL,
|
||||||
authChallenger: &remoteAuthChallenger{
|
authChallenger: &remoteAuthChallenger{
|
||||||
remoteURL: *remoteURL,
|
remoteURL: *remoteURL,
|
||||||
cm: auth.NewSimpleChallengeManager(),
|
cm: challenge.NewSimpleManager(),
|
||||||
cs: cs,
|
cs: cs,
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
@@ -177,14 +178,14 @@ func (pr *proxyingRegistry) BlobStatter() distribution.BlobStatter {
|
|||||||
// authChallenger encapsulates a request to the upstream to establish credential challenges
|
// authChallenger encapsulates a request to the upstream to establish credential challenges
|
||||||
type authChallenger interface {
|
type authChallenger interface {
|
||||||
tryEstablishChallenges(context.Context) error
|
tryEstablishChallenges(context.Context) error
|
||||||
challengeManager() auth.ChallengeManager
|
challengeManager() challenge.Manager
|
||||||
credentialStore() auth.CredentialStore
|
credentialStore() auth.CredentialStore
|
||||||
}
|
}
|
||||||
|
|
||||||
type remoteAuthChallenger struct {
|
type remoteAuthChallenger struct {
|
||||||
remoteURL url.URL
|
remoteURL url.URL
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
cm auth.ChallengeManager
|
cm challenge.Manager
|
||||||
cs auth.CredentialStore
|
cs auth.CredentialStore
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -192,7 +193,7 @@ func (r *remoteAuthChallenger) credentialStore() auth.CredentialStore {
|
|||||||
return r.cs
|
return r.cs
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *remoteAuthChallenger) challengeManager() auth.ChallengeManager {
|
func (r *remoteAuthChallenger) challengeManager() challenge.Manager {
|
||||||
return r.cm
|
return r.cm
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
12
vendor/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go
generated
vendored
12
vendor/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go
generated
vendored
@@ -16,12 +16,12 @@ import (
|
|||||||
func CheckBlobDescriptorCache(t *testing.T, provider cache.BlobDescriptorCacheProvider) {
|
func CheckBlobDescriptorCache(t *testing.T, provider cache.BlobDescriptorCacheProvider) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
checkBlobDescriptorCacheEmptyRepository(t, ctx, provider)
|
checkBlobDescriptorCacheEmptyRepository(ctx, t, provider)
|
||||||
checkBlobDescriptorCacheSetAndRead(t, ctx, provider)
|
checkBlobDescriptorCacheSetAndRead(ctx, t, provider)
|
||||||
checkBlobDescriptorCacheClear(t, ctx, provider)
|
checkBlobDescriptorCacheClear(ctx, t, provider)
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) {
|
func checkBlobDescriptorCacheEmptyRepository(ctx context.Context, t *testing.T, provider cache.BlobDescriptorCacheProvider) {
|
||||||
if _, err := provider.Stat(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); err != distribution.ErrBlobUnknown {
|
if _, err := provider.Stat(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); err != distribution.ErrBlobUnknown {
|
||||||
t.Fatalf("expected unknown blob error with empty store: %v", err)
|
t.Fatalf("expected unknown blob error with empty store: %v", err)
|
||||||
}
|
}
|
||||||
@@ -59,7 +59,7 @@ func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) {
|
func checkBlobDescriptorCacheSetAndRead(ctx context.Context, t *testing.T, provider cache.BlobDescriptorCacheProvider) {
|
||||||
localDigest := digest.Digest("sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")
|
localDigest := digest.Digest("sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")
|
||||||
expected := distribution.Descriptor{
|
expected := distribution.Descriptor{
|
||||||
Digest: "sha256:abc1111111111111111111111111111111111111111111111111111111111111",
|
Digest: "sha256:abc1111111111111111111111111111111111111111111111111111111111111",
|
||||||
@@ -143,7 +143,7 @@ func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provi
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkBlobDescriptorCacheClear(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) {
|
func checkBlobDescriptorCacheClear(ctx context.Context, t *testing.T, provider cache.BlobDescriptorCacheProvider) {
|
||||||
localDigest := digest.Digest("sha384:def111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")
|
localDigest := digest.Digest("sha384:def111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")
|
||||||
expected := distribution.Descriptor{
|
expected := distribution.Descriptor{
|
||||||
Digest: "sha256:def1111111111111111111111111111111111111111111111111111111111111",
|
Digest: "sha256:def1111111111111111111111111111111111111111111111111111111111111",
|
||||||
|
|||||||
4
vendor/github.com/docker/distribution/registry/storage/catalog_test.go
generated
vendored
4
vendor/github.com/docker/distribution/registry/storage/catalog_test.go
generated
vendored
@@ -44,7 +44,7 @@ func setupFS(t *testing.T) *setupEnv {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, repo := range repos {
|
for _, repo := range repos {
|
||||||
makeRepo(t, ctx, repo, registry)
|
makeRepo(ctx, t, repo, registry)
|
||||||
}
|
}
|
||||||
|
|
||||||
expected := []string{
|
expected := []string{
|
||||||
@@ -67,7 +67,7 @@ func setupFS(t *testing.T) *setupEnv {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeRepo(t *testing.T, ctx context.Context, name string, reg distribution.Namespace) {
|
func makeRepo(ctx context.Context, t *testing.T, name string, reg distribution.Namespace) {
|
||||||
named, err := reference.ParseNamed(name)
|
named, err := reference.ParseNamed(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
|||||||
17
vendor/github.com/docker/distribution/registry/storage/driver/azure/azure.go
generated
vendored
17
vendor/github.com/docker/distribution/registry/storage/driver/azure/azure.go
generated
vendored
@@ -111,12 +111,13 @@ func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
defer blob.Close()
|
||||||
return ioutil.ReadAll(blob)
|
return ioutil.ReadAll(blob)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutContent stores the []byte content at a location designated by "path".
|
// PutContent stores the []byte content at a location designated by "path".
|
||||||
func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error {
|
func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error {
|
||||||
if _, err := d.client.DeleteBlobIfExists(d.container, path); err != nil {
|
if _, err := d.client.DeleteBlobIfExists(d.container, path, nil); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
writer, err := d.Writer(ctx, path, false)
|
writer, err := d.Writer(ctx, path, false)
|
||||||
@@ -151,7 +152,7 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read
|
|||||||
}
|
}
|
||||||
|
|
||||||
bytesRange := fmt.Sprintf("%v-", offset)
|
bytesRange := fmt.Sprintf("%v-", offset)
|
||||||
resp, err := d.client.GetBlobRange(d.container, path, bytesRange)
|
resp, err := d.client.GetBlobRange(d.container, path, bytesRange, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -174,7 +175,7 @@ func (d *driver) Writer(ctx context.Context, path string, append bool) (storaged
|
|||||||
}
|
}
|
||||||
size = blobProperties.ContentLength
|
size = blobProperties.ContentLength
|
||||||
} else {
|
} else {
|
||||||
err := d.client.DeleteBlob(d.container, path)
|
err := d.client.DeleteBlob(d.container, path, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -272,12 +273,12 @@ func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) e
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return d.client.DeleteBlob(d.container, sourcePath)
|
return d.client.DeleteBlob(d.container, sourcePath, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete recursively deletes all objects stored at "path" and its subpaths.
|
// Delete recursively deletes all objects stored at "path" and its subpaths.
|
||||||
func (d *driver) Delete(ctx context.Context, path string) error {
|
func (d *driver) Delete(ctx context.Context, path string) error {
|
||||||
ok, err := d.client.DeleteBlobIfExists(d.container, path)
|
ok, err := d.client.DeleteBlobIfExists(d.container, path, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -292,7 +293,7 @@ func (d *driver) Delete(ctx context.Context, path string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, b := range blobs {
|
for _, b := range blobs {
|
||||||
if err = d.client.DeleteBlob(d.container, b); err != nil {
|
if err = d.client.DeleteBlob(d.container, b, nil); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -442,7 +443,7 @@ func (w *writer) Cancel() error {
|
|||||||
return fmt.Errorf("already committed")
|
return fmt.Errorf("already committed")
|
||||||
}
|
}
|
||||||
w.cancelled = true
|
w.cancelled = true
|
||||||
return w.driver.client.DeleteBlob(w.driver.container, w.path)
|
return w.driver.client.DeleteBlob(w.driver.container, w.path, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *writer) Commit() error {
|
func (w *writer) Commit() error {
|
||||||
@@ -470,7 +471,7 @@ func (bw *blockWriter) Write(p []byte) (int, error) {
|
|||||||
if offset+chunkSize > len(p) {
|
if offset+chunkSize > len(p) {
|
||||||
chunkSize = len(p) - offset
|
chunkSize = len(p) - offset
|
||||||
}
|
}
|
||||||
err := bw.client.AppendBlock(bw.container, bw.path, p[offset:offset+chunkSize])
|
err := bw.client.AppendBlock(bw.container, bw.path, p[offset:offset+chunkSize], nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|||||||
2
vendor/github.com/docker/distribution/registry/storage/driver/base/base.go
generated
vendored
2
vendor/github.com/docker/distribution/registry/storage/driver/base/base.go
generated
vendored
@@ -137,7 +137,7 @@ func (base *Base) Stat(ctx context.Context, path string) (storagedriver.FileInfo
|
|||||||
ctx, done := context.WithTrace(ctx)
|
ctx, done := context.WithTrace(ctx)
|
||||||
defer done("%s.Stat(%q)", base.Name(), path)
|
defer done("%s.Stat(%q)", base.Name(), path)
|
||||||
|
|
||||||
if !storagedriver.PathRegexp.MatchString(path) {
|
if !storagedriver.PathRegexp.MatchString(path) && path != "/" {
|
||||||
return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
|
return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
23
vendor/github.com/docker/distribution/registry/storage/driver/oss/oss.go
generated
vendored
23
vendor/github.com/docker/distribution/registry/storage/driver/oss/oss.go
generated
vendored
@@ -389,15 +389,17 @@ func (d *driver) List(ctx context.Context, opath string) ([]string, error) {
|
|||||||
return append(files, directories...), nil
|
return append(files, directories...), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const maxConcurrency = 10
|
||||||
|
|
||||||
// Move moves an object stored at sourcePath to destPath, removing the original
|
// Move moves an object stored at sourcePath to destPath, removing the original
|
||||||
// object.
|
// object.
|
||||||
func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error {
|
func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error {
|
||||||
logrus.Infof("Move from %s to %s", d.ossPath(sourcePath), d.ossPath(destPath))
|
logrus.Infof("Move from %s to %s", d.ossPath(sourcePath), d.ossPath(destPath))
|
||||||
|
err := d.Bucket.CopyLargeFileInParallel(d.ossPath(sourcePath), d.ossPath(destPath),
|
||||||
err := d.Bucket.CopyLargeFile(d.ossPath(sourcePath), d.ossPath(destPath),
|
|
||||||
d.getContentType(),
|
d.getContentType(),
|
||||||
getPermissions(),
|
getPermissions(),
|
||||||
oss.Options{})
|
oss.Options{},
|
||||||
|
maxConcurrency)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("Failed for move from %s to %s: %v", d.ossPath(sourcePath), d.ossPath(destPath), err)
|
logrus.Errorf("Failed for move from %s to %s: %v", d.ossPath(sourcePath), d.ossPath(destPath), err)
|
||||||
return parseError(sourcePath, err)
|
return parseError(sourcePath, err)
|
||||||
@@ -408,7 +410,8 @@ func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) e
|
|||||||
|
|
||||||
// Delete recursively deletes all objects stored at "path" and its subpaths.
|
// Delete recursively deletes all objects stored at "path" and its subpaths.
|
||||||
func (d *driver) Delete(ctx context.Context, path string) error {
|
func (d *driver) Delete(ctx context.Context, path string) error {
|
||||||
listResponse, err := d.Bucket.List(d.ossPath(path), "", "", listMax)
|
ossPath := d.ossPath(path)
|
||||||
|
listResponse, err := d.Bucket.List(ossPath, "", "", listMax)
|
||||||
if err != nil || len(listResponse.Contents) == 0 {
|
if err != nil || len(listResponse.Contents) == 0 {
|
||||||
return storagedriver.PathNotFoundError{Path: path}
|
return storagedriver.PathNotFoundError{Path: path}
|
||||||
}
|
}
|
||||||
@@ -416,15 +419,25 @@ func (d *driver) Delete(ctx context.Context, path string) error {
|
|||||||
ossObjects := make([]oss.Object, listMax)
|
ossObjects := make([]oss.Object, listMax)
|
||||||
|
|
||||||
for len(listResponse.Contents) > 0 {
|
for len(listResponse.Contents) > 0 {
|
||||||
|
numOssObjects := len(listResponse.Contents)
|
||||||
for index, key := range listResponse.Contents {
|
for index, key := range listResponse.Contents {
|
||||||
|
// Stop if we encounter a key that is not a subpath (so that deleting "/a" does not delete "/ab").
|
||||||
|
if len(key.Key) > len(ossPath) && (key.Key)[len(ossPath)] != '/' {
|
||||||
|
numOssObjects = index
|
||||||
|
break
|
||||||
|
}
|
||||||
ossObjects[index].Key = key.Key
|
ossObjects[index].Key = key.Key
|
||||||
}
|
}
|
||||||
|
|
||||||
err := d.Bucket.DelMulti(oss.Delete{Quiet: false, Objects: ossObjects[0:len(listResponse.Contents)]})
|
err := d.Bucket.DelMulti(oss.Delete{Quiet: false, Objects: ossObjects[0:numOssObjects]})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if numOssObjects < len(listResponse.Contents) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
listResponse, err = d.Bucket.List(d.ossPath(path), "", "", listMax)
|
listResponse, err = d.Bucket.List(d.ossPath(path), "", "", listMax)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
77
vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3.go
generated
vendored
77
vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3.go
generated
vendored
@@ -76,8 +76,8 @@ const noStorageClass = "NONE"
|
|||||||
// validRegions maps known s3 region identifiers to region descriptors
|
// validRegions maps known s3 region identifiers to region descriptors
|
||||||
var validRegions = map[string]struct{}{}
|
var validRegions = map[string]struct{}{}
|
||||||
|
|
||||||
// validObjectAcls contains known s3 object Acls
|
// validObjectACLs contains known s3 object Acls
|
||||||
var validObjectAcls = map[string]struct{}{}
|
var validObjectACLs = map[string]struct{}{}
|
||||||
|
|
||||||
//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set
|
//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set
|
||||||
type DriverParameters struct {
|
type DriverParameters struct {
|
||||||
@@ -97,12 +97,13 @@ type DriverParameters struct {
|
|||||||
RootDirectory string
|
RootDirectory string
|
||||||
StorageClass string
|
StorageClass string
|
||||||
UserAgent string
|
UserAgent string
|
||||||
ObjectAcl string
|
ObjectACL string
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
for _, region := range []string{
|
for _, region := range []string{
|
||||||
"us-east-1",
|
"us-east-1",
|
||||||
|
"us-east-2",
|
||||||
"us-west-1",
|
"us-west-1",
|
||||||
"us-west-2",
|
"us-west-2",
|
||||||
"eu-west-1",
|
"eu-west-1",
|
||||||
@@ -118,7 +119,7 @@ func init() {
|
|||||||
validRegions[region] = struct{}{}
|
validRegions[region] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, objectAcl := range []string{
|
for _, objectACL := range []string{
|
||||||
s3.ObjectCannedACLPrivate,
|
s3.ObjectCannedACLPrivate,
|
||||||
s3.ObjectCannedACLPublicRead,
|
s3.ObjectCannedACLPublicRead,
|
||||||
s3.ObjectCannedACLPublicReadWrite,
|
s3.ObjectCannedACLPublicReadWrite,
|
||||||
@@ -127,7 +128,7 @@ func init() {
|
|||||||
s3.ObjectCannedACLBucketOwnerRead,
|
s3.ObjectCannedACLBucketOwnerRead,
|
||||||
s3.ObjectCannedACLBucketOwnerFullControl,
|
s3.ObjectCannedACLBucketOwnerFullControl,
|
||||||
} {
|
} {
|
||||||
validObjectAcls[objectAcl] = struct{}{}
|
validObjectACLs[objectACL] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register this as the default s3 driver in addition to s3aws
|
// Register this as the default s3 driver in addition to s3aws
|
||||||
@@ -153,7 +154,7 @@ type driver struct {
|
|||||||
MultipartCopyThresholdSize int64
|
MultipartCopyThresholdSize int64
|
||||||
RootDirectory string
|
RootDirectory string
|
||||||
StorageClass string
|
StorageClass string
|
||||||
ObjectAcl string
|
ObjectACL string
|
||||||
}
|
}
|
||||||
|
|
||||||
type baseEmbed struct {
|
type baseEmbed struct {
|
||||||
@@ -313,18 +314,18 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
|
|||||||
userAgent = ""
|
userAgent = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
objectAcl := s3.ObjectCannedACLPrivate
|
objectACL := s3.ObjectCannedACLPrivate
|
||||||
objectAclParam := parameters["objectacl"]
|
objectACLParam := parameters["objectacl"]
|
||||||
if objectAclParam != nil {
|
if objectACLParam != nil {
|
||||||
objectAclString, ok := objectAclParam.(string)
|
objectACLString, ok := objectACLParam.(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("Invalid value for objectacl parameter: %v", objectAclParam)
|
return nil, fmt.Errorf("Invalid value for objectacl parameter: %v", objectACLParam)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok = validObjectAcls[objectAclString]; !ok {
|
if _, ok = validObjectACLs[objectACLString]; !ok {
|
||||||
return nil, fmt.Errorf("Invalid value for objectacl parameter: %v", objectAclParam)
|
return nil, fmt.Errorf("Invalid value for objectacl parameter: %v", objectACLParam)
|
||||||
}
|
}
|
||||||
objectAcl = objectAclString
|
objectACL = objectACLString
|
||||||
}
|
}
|
||||||
|
|
||||||
params := DriverParameters{
|
params := DriverParameters{
|
||||||
@@ -344,7 +345,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
|
|||||||
fmt.Sprint(rootDirectory),
|
fmt.Sprint(rootDirectory),
|
||||||
storageClass,
|
storageClass,
|
||||||
fmt.Sprint(userAgent),
|
fmt.Sprint(userAgent),
|
||||||
objectAcl,
|
objectACL,
|
||||||
}
|
}
|
||||||
|
|
||||||
return New(params)
|
return New(params)
|
||||||
@@ -389,29 +390,19 @@ func New(params DriverParameters) (*Driver, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
awsConfig := aws.NewConfig()
|
awsConfig := aws.NewConfig()
|
||||||
var creds *credentials.Credentials
|
creds := credentials.NewChainCredentials([]credentials.Provider{
|
||||||
if params.RegionEndpoint == "" {
|
&credentials.StaticProvider{
|
||||||
creds = credentials.NewChainCredentials([]credentials.Provider{
|
Value: credentials.Value{
|
||||||
&credentials.StaticProvider{
|
AccessKeyID: params.AccessKey,
|
||||||
Value: credentials.Value{
|
SecretAccessKey: params.SecretKey,
|
||||||
AccessKeyID: params.AccessKey,
|
|
||||||
SecretAccessKey: params.SecretKey,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
&credentials.EnvProvider{},
|
},
|
||||||
&credentials.SharedCredentialsProvider{},
|
&credentials.EnvProvider{},
|
||||||
&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())},
|
&credentials.SharedCredentialsProvider{},
|
||||||
})
|
&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())},
|
||||||
} else {
|
})
|
||||||
creds = credentials.NewChainCredentials([]credentials.Provider{
|
|
||||||
&credentials.StaticProvider{
|
if params.RegionEndpoint != "" {
|
||||||
Value: credentials.Value{
|
|
||||||
AccessKeyID: params.AccessKey,
|
|
||||||
SecretAccessKey: params.SecretKey,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
&credentials.EnvProvider{},
|
|
||||||
})
|
|
||||||
awsConfig.WithS3ForcePathStyle(true)
|
awsConfig.WithS3ForcePathStyle(true)
|
||||||
awsConfig.WithEndpoint(params.RegionEndpoint)
|
awsConfig.WithEndpoint(params.RegionEndpoint)
|
||||||
}
|
}
|
||||||
@@ -459,7 +450,7 @@ func New(params DriverParameters) (*Driver, error) {
|
|||||||
MultipartCopyThresholdSize: params.MultipartCopyThresholdSize,
|
MultipartCopyThresholdSize: params.MultipartCopyThresholdSize,
|
||||||
RootDirectory: params.RootDirectory,
|
RootDirectory: params.RootDirectory,
|
||||||
StorageClass: params.StorageClass,
|
StorageClass: params.StorageClass,
|
||||||
ObjectAcl: params.ObjectAcl,
|
ObjectACL: params.ObjectACL,
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Driver{
|
return &Driver{
|
||||||
@@ -784,10 +775,12 @@ func min(a, b int) int {
|
|||||||
// We must be careful since S3 does not guarantee read after delete consistency
|
// We must be careful since S3 does not guarantee read after delete consistency
|
||||||
func (d *driver) Delete(ctx context.Context, path string) error {
|
func (d *driver) Delete(ctx context.Context, path string) error {
|
||||||
s3Objects := make([]*s3.ObjectIdentifier, 0, listMax)
|
s3Objects := make([]*s3.ObjectIdentifier, 0, listMax)
|
||||||
|
s3Path := d.s3Path(path)
|
||||||
listObjectsInput := &s3.ListObjectsInput{
|
listObjectsInput := &s3.ListObjectsInput{
|
||||||
Bucket: aws.String(d.Bucket),
|
Bucket: aws.String(d.Bucket),
|
||||||
Prefix: aws.String(d.s3Path(path)),
|
Prefix: aws.String(s3Path),
|
||||||
}
|
}
|
||||||
|
ListLoop:
|
||||||
for {
|
for {
|
||||||
// list all the objects
|
// list all the objects
|
||||||
resp, err := d.S3.ListObjects(listObjectsInput)
|
resp, err := d.S3.ListObjects(listObjectsInput)
|
||||||
@@ -800,6 +793,10 @@ func (d *driver) Delete(ctx context.Context, path string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, key := range resp.Contents {
|
for _, key := range resp.Contents {
|
||||||
|
// Stop if we encounter a key that is not a subpath (so that deleting "/a" does not delete "/ab").
|
||||||
|
if len(*key.Key) > len(s3Path) && (*key.Key)[len(s3Path)] != '/' {
|
||||||
|
break ListLoop
|
||||||
|
}
|
||||||
s3Objects = append(s3Objects, &s3.ObjectIdentifier{
|
s3Objects = append(s3Objects, &s3.ObjectIdentifier{
|
||||||
Key: key.Key,
|
Key: key.Key,
|
||||||
})
|
})
|
||||||
@@ -912,7 +909,7 @@ func (d *driver) getContentType() *string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *driver) getACL() *string {
|
func (d *driver) getACL() *string {
|
||||||
return aws.String(d.ObjectAcl)
|
return aws.String(d.ObjectACL)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *driver) getStorageClass() *string {
|
func (d *driver) getStorageClass() *string {
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user